container: add support for uploading to registries
Add a new generic container registry client via a new `container` package. Use this to create a command line utility as well as a new upload target for container registries. The code uses the github.com/containers/* project and packages to interact with container registires that is also used by skopeo, podman et al. One if the dependencies is `proglottis/gpgme` that is using cgo to bind libgpgme, so we have to add the corresponding devel package to the BuildRequires as well as installing it on CI. Checks will follow later via an integration test.
This commit is contained in:
parent
d136a075bc
commit
986f076276
955 changed files with 164203 additions and 2549 deletions
7
.github/workflows/tests.yml
vendored
7
.github/workflows/tests.yml
vendored
|
|
@ -20,8 +20,9 @@ jobs:
|
|||
steps:
|
||||
# krb5-devel is needed to test internal/upload/koji package
|
||||
# gcc is needed to build the mock dnf-json binary for the unit tests
|
||||
# gpgme-devel is needed for container upload dependencies
|
||||
- name: Install build and test dependencies
|
||||
run: dnf -y install krb5-devel gcc git-core go
|
||||
run: dnf -y install krb5-devel gcc git-core go gpgme-devel
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
|
@ -79,6 +80,10 @@ jobs:
|
|||
- name: Install kerberos devel package
|
||||
run: sudo apt-get install -y libkrb5-dev
|
||||
|
||||
# This is needed for the container upload dependencies
|
||||
- name: Install libgpgme devel package
|
||||
run: sudo apt-get install -y libgpgme-dev
|
||||
|
||||
- name: Run golangci-lint
|
||||
run: $(go env GOPATH)/bin/golangci-lint run --timeout 5m0s
|
||||
|
||||
|
|
|
|||
76
cmd/osbuild-upload-container/main.go
Normal file
76
cmd/osbuild-upload-container/main.go
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var filename string
|
||||
var destination string
|
||||
var username string
|
||||
var password string
|
||||
var tag string
|
||||
var ignoreTls bool
|
||||
|
||||
flag.StringVar(&filename, "container", "", "path to the oci-archive to upload (required)")
|
||||
flag.StringVar(&destination, "destination", "", "destination to upload to (required)")
|
||||
flag.StringVar(&tag, "tag", "", "destination tag to use for the container")
|
||||
flag.StringVar(&username, "username", "", "username to use for registry")
|
||||
flag.StringVar(&password, "password", "", "password to use for registry")
|
||||
flag.BoolVar(&ignoreTls, "ignore-tls", false, "ignore tls verification for destination")
|
||||
flag.Parse()
|
||||
|
||||
if filename == "" || destination == "" {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Container to upload is:", filename)
|
||||
|
||||
client, err := container.NewClient(destination)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error creating the upload client: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if password != "" {
|
||||
if username == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error looking up current user: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
username = u.Username
|
||||
}
|
||||
client.SetCredentials(username, password)
|
||||
}
|
||||
|
||||
client.TlsVerify = !ignoreTls
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
from := fmt.Sprintf("oci-archive://%s", absPath)
|
||||
|
||||
digest, err := client.UploadImage(ctx, from, tag)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error uploading: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("upload done; destination manifest: %s\n", digest.String())
|
||||
}
|
||||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/oci"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
|
@ -813,6 +814,38 @@ func (impl *OSBuildJobImpl) Run(job worker.Job) error {
|
|||
)
|
||||
osbuildJobResult.Success = true
|
||||
osbuildJobResult.UploadStatus = "success"
|
||||
|
||||
case *target.ContainerTargetOptions:
|
||||
destination := args.Targets[0].ImageName
|
||||
|
||||
logWithId.Printf("[container] ⬆ Uploading the image to %s", destination)
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := container.NewClient(destination)
|
||||
if err != nil {
|
||||
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidConfig, err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
client.Auth.Username = options.Username
|
||||
client.Auth.Password = options.Password
|
||||
|
||||
if options.TlsVerify != nil {
|
||||
client.TlsVerify = *options.TlsVerify
|
||||
}
|
||||
|
||||
sourcePath := path.Join(outputDirectory, exportPath, options.Filename)
|
||||
|
||||
// TODO: get the container type from the metadata of the osbuild job
|
||||
sourceRef := fmt.Sprintf("oci-archive:%s", sourcePath)
|
||||
|
||||
digest, err := client.UploadImage(ctx, sourceRef, "")
|
||||
if err != nil {
|
||||
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, err.Error())
|
||||
return nil
|
||||
}
|
||||
logWithId.Printf("[container] 🎉 Image uploaded (%s)!", digest.String())
|
||||
|
||||
default:
|
||||
osbuildJobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorInvalidTarget, fmt.Sprintf("invalid target type: %s", args.Targets[0].Name))
|
||||
return nil
|
||||
|
|
|
|||
9
go.mod
9
go.mod
|
|
@ -14,7 +14,9 @@ require (
|
|||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/aws/aws-sdk-go v1.44.43
|
||||
github.com/aws/aws-sdk-go v1.44.4
|
||||
github.com/containers/common v0.48.0
|
||||
github.com/containers/image/v5 v5.21.1
|
||||
github.com/coreos/go-semver v0.3.0
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/deepmap/oapi-codegen v1.8.2
|
||||
|
|
@ -22,7 +24,6 @@ require (
|
|||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/golang-jwt/jwt/v4 v4.4.1
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gophercloud/gophercloud v0.24.0
|
||||
|
|
@ -33,6 +34,7 @@ require (
|
|||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
|
||||
github.com/labstack/echo/v4 v4.7.2
|
||||
github.com/labstack/gommon v0.3.1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.266
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
|
|
@ -44,9 +46,8 @@ require (
|
|||
github.com/vmware/govmomi v0.28.0
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
|
||||
google.golang.org/api v0.75.0
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/ini.v1 v1.66.4
|
||||
)
|
||||
|
|
|
|||
201
internal/container/client.go
Normal file
201
internal/container/client.go
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
// package container implements a client for a container
|
||||
// registry. It can be used to upload container images.
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
_ "github.com/containers/image/v5/docker/archive"
|
||||
_ "github.com/containers/image/v5/oci/archive"
|
||||
_ "github.com/containers/image/v5/oci/layout"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultUserAgent = "osbuild-composer/1.0"
|
||||
DefaultPolicyPath = "/etc/containers/policy.json"
|
||||
)
|
||||
|
||||
type Credentials struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// A Client to interact with the given Target object at a
|
||||
// container registry, like e.g. uploading an image to.
|
||||
// All mentioned defaults are only set when using the
|
||||
// NewClient constructor.
|
||||
type Client struct {
|
||||
Target reference.Named // the target object to interact with
|
||||
Auth Credentials // credentials to use
|
||||
|
||||
ReportWriter io.Writer // used for writing status reports, defaults to os.Stdout
|
||||
|
||||
PrecomputeDigests bool // precompute digest in order to avoid uploads
|
||||
MaxRetries int // how often to retry http requests
|
||||
|
||||
UserAgent string // user agent string to use for requests, defaults to DefaultUserAgent
|
||||
TlsVerify bool // use an insecure connection
|
||||
|
||||
// internal state
|
||||
policy *signature.Policy
|
||||
sysCtx *types.SystemContext
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client for target with default options.
|
||||
// It will add the "latest" tag if target does not contain it.
|
||||
func NewClient(target string) (*Client, error) {
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse '%s': %w", target, err)
|
||||
}
|
||||
|
||||
var policy *signature.Policy
|
||||
if _, err := os.Stat(DefaultPolicyPath); err == nil {
|
||||
policy, err = signature.NewPolicyFromFile(DefaultPolicyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
policy = &signature.Policy{
|
||||
Default: []signature.PolicyRequirement{
|
||||
signature.NewPRInsecureAcceptAnything(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := Client{
|
||||
Target: reference.TagNameOnly(ref),
|
||||
|
||||
ReportWriter: os.Stdout,
|
||||
PrecomputeDigests: true,
|
||||
|
||||
UserAgent: DefaultUserAgent,
|
||||
TlsVerify: true,
|
||||
|
||||
sysCtx: &types.SystemContext{
|
||||
RegistriesDirPath: "",
|
||||
SystemRegistriesConfPath: "",
|
||||
BigFilesTemporaryDir: "/var/tmp",
|
||||
},
|
||||
policy: policy,
|
||||
}
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// SetCredentials will set username and password for Client
|
||||
func (cl *Client) SetCredentials(username, password string) {
|
||||
cl.Auth.Username = username
|
||||
cl.Auth.Password = password
|
||||
}
|
||||
|
||||
func parseImageName(name string) (types.ImageReference, error) {
|
||||
|
||||
parts := strings.SplitN(name, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid image name '%s'", name)
|
||||
}
|
||||
|
||||
transport := transports.Get(parts[0])
|
||||
if transport == nil {
|
||||
return nil, fmt.Errorf("unknown transport '%s'", parts[0])
|
||||
}
|
||||
|
||||
return transport.ParseReference(parts[1])
|
||||
}
|
||||
|
||||
// UploadImage takes an container image located at from and uploads it
|
||||
// to the Target of Client. If tag is set, i.e. not the empty string,
|
||||
// it will replace any previously set tag or digest of the target.
|
||||
// Returns the digest of the manifest that was written to the server.
|
||||
func (cl *Client) UploadImage(ctx context.Context, from, tag string) (digest.Digest, error) {
|
||||
|
||||
targetCtx := *cl.sysCtx
|
||||
targetCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!cl.TlsVerify)
|
||||
targetCtx.DockerRegistryPushPrecomputeDigests = cl.PrecomputeDigests
|
||||
|
||||
targetCtx.DockerAuthConfig = &types.DockerAuthConfig{
|
||||
Username: cl.Auth.Username,
|
||||
Password: cl.Auth.Password,
|
||||
}
|
||||
|
||||
policyContext, err := signature.NewPolicyContext(cl.policy)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
srcRef, err := parseImageName(from)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid source name '%s': %w", from, err)
|
||||
}
|
||||
|
||||
target := cl.Target
|
||||
|
||||
if tag != "" {
|
||||
target = reference.TrimNamed(target)
|
||||
target, err = reference.WithTag(target, tag)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating reference with tag '%s': %w", tag, err)
|
||||
}
|
||||
}
|
||||
|
||||
destRef, err := docker.NewReference(target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
retryOpts := retry.RetryOptions{
|
||||
MaxRetry: cl.MaxRetries,
|
||||
}
|
||||
|
||||
var manifestDigest digest.Digest
|
||||
|
||||
err = retry.RetryIfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: false,
|
||||
SignBy: "",
|
||||
SignPassphrase: "",
|
||||
ReportWriter: cl.ReportWriter,
|
||||
SourceCtx: cl.sysCtx,
|
||||
DestinationCtx: &targetCtx,
|
||||
ForceManifestMIMEType: "",
|
||||
ImageListSelection: copy.CopyAllImages,
|
||||
PreserveDigests: false,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestDigest, err = manifest.Digest(manifestBytes)
|
||||
|
||||
return err
|
||||
|
||||
}, &retryOpts)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return manifestDigest, nil
|
||||
}
|
||||
17
internal/target/container_target.go
Normal file
17
internal/target/container_target.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package target
|
||||
|
||||
type ContainerTargetOptions struct {
|
||||
Filename string `json:"filename"`
|
||||
Reference string `json:"reference"`
|
||||
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
|
||||
TlsVerify *bool `json:"tls_verify,omitempty"`
|
||||
}
|
||||
|
||||
func (ContainerTargetOptions) isTargetOptions() {}
|
||||
|
||||
func NewContainerTarget(options *ContainerTargetOptions) *Target {
|
||||
return newTarget("org.osbuild.container", options)
|
||||
}
|
||||
|
|
@ -83,6 +83,8 @@ func UnmarshalTargetOptions(targetName string, rawOptions json.RawMessage) (Targ
|
|||
options = new(VMWareTargetOptions)
|
||||
case "org.osbuild.oci":
|
||||
options = new(OCITargetOptions)
|
||||
case "org.osbuild.container":
|
||||
options = new(ContainerTargetOptions)
|
||||
default:
|
||||
return nil, errors.New("unexpected target name")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,6 +97,15 @@ type ociUploadSettings struct {
|
|||
|
||||
func (ociUploadSettings) isUploadSettings() {}
|
||||
|
||||
type containerUploadSettings struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
|
||||
TlsVerify *bool `json:"tls_verify,omitempty"`
|
||||
}
|
||||
|
||||
func (containerUploadSettings) isUploadSettings() {}
|
||||
|
||||
type uploadRequest struct {
|
||||
Provider string `json:"provider"`
|
||||
ImageName string `json:"image_name"`
|
||||
|
|
@ -134,6 +143,8 @@ func (u *uploadRequest) UnmarshalJSON(data []byte) error {
|
|||
// While the API still accepts provider type "generic.s3", the request is handled
|
||||
// in the same way as for a request with provider type "aws.s3"
|
||||
settings = new(awsS3UploadSettings)
|
||||
case "container":
|
||||
settings = new(containerUploadSettings)
|
||||
default:
|
||||
return errors.New("unexpected provider name")
|
||||
}
|
||||
|
|
@ -322,6 +333,15 @@ func uploadRequestToTarget(u uploadRequest, imageType distro.ImageType) *target.
|
|||
Namespace: options.Namespace,
|
||||
Compartment: options.Compartment,
|
||||
}
|
||||
case *containerUploadSettings:
|
||||
t.Name = "org.osbuild.container"
|
||||
t.Options = &target.ContainerTargetOptions{
|
||||
Username: options.Username,
|
||||
Password: options.Password,
|
||||
|
||||
Filename: imageType.Filename(),
|
||||
TlsVerify: options.TlsVerify,
|
||||
}
|
||||
}
|
||||
|
||||
return &t
|
||||
|
|
|
|||
|
|
@ -39,6 +39,9 @@ BuildRequires: systemd
|
|||
BuildRequires: krb5-devel
|
||||
BuildRequires: python3-docutils
|
||||
BuildRequires: make
|
||||
# Build requirements of 'theproglottis/gpgme' package
|
||||
BuildRequires: gpgme-devel
|
||||
BuildRequires: libassuan-devel
|
||||
%if 0%{?fedora}
|
||||
BuildRequires: systemd-rpm-macros
|
||||
BuildRequires: git
|
||||
|
|
|
|||
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
*.exe
|
||||
1
vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/CODEOWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
* @microsoft/containerplat
|
||||
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
37
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
37
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# go-winio [](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
|
||||
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
|
||||
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
|
||||
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
|
||||
|
||||
## Special Thanks
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
||||
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
329
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
329
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
Normal file
|
|
@ -0,0 +1,329 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
|
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
|
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
|
||||
func (b *atomicBool) swap(new bool) bool {
|
||||
var newInt int32
|
||||
if new {
|
||||
newInt = 1
|
||||
}
|
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1
|
||||
}
|
||||
|
||||
const (
|
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
|
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFileClosed = errors.New("file has already been closed")
|
||||
ErrTimeout = &timeoutError{}
|
||||
)
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
||||
type timeoutChan chan struct{}
|
||||
|
||||
var ioInitOnce sync.Once
|
||||
var ioCompletionPort syscall.Handle
|
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
type ioResult struct {
|
||||
bytes uint32
|
||||
err error
|
||||
}
|
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
type ioOperation struct {
|
||||
o syscall.Overlapped
|
||||
ch chan ioResult
|
||||
}
|
||||
|
||||
func initIo() {
|
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go ioCompletionProcessor(h)
|
||||
}
|
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct {
|
||||
handle syscall.Handle
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
|
||||
type deadlineHandler struct {
|
||||
setLock sync.Mutex
|
||||
channel timeoutChan
|
||||
channelLock sync.RWMutex
|
||||
timer *time.Timer
|
||||
timedout atomicBool
|
||||
}
|
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) {
|
||||
f := &win32File{handle: h}
|
||||
ioInitOnce.Do(initIo)
|
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.readDeadline.channel = make(timeoutChan)
|
||||
f.writeDeadline.channel = make(timeoutChan)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
|
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() {
|
||||
f.wgLock.Lock()
|
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) {
|
||||
f.wgLock.Unlock()
|
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil)
|
||||
f.wg.Wait()
|
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle)
|
||||
f.handle = 0
|
||||
} else {
|
||||
f.wgLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error {
|
||||
f.closeHandle()
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
f.wgLock.RLock()
|
||||
if f.closing.isSet() {
|
||||
f.wgLock.RUnlock()
|
||||
return nil, ErrFileClosed
|
||||
}
|
||||
f.wg.Add(1)
|
||||
f.wgLock.RUnlock()
|
||||
c := &ioOperation{}
|
||||
c.ch = make(chan ioResult)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) {
|
||||
for {
|
||||
var bytes uint32
|
||||
var key uintptr
|
||||
var op *ioOperation
|
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
|
||||
if op == nil {
|
||||
panic(err)
|
||||
}
|
||||
op.ch <- ioResult{bytes, err}
|
||||
}
|
||||
}
|
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
|
||||
if err != syscall.ERROR_IO_PENDING {
|
||||
return int(bytes), err
|
||||
}
|
||||
|
||||
if f.closing.isSet() {
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
}
|
||||
|
||||
var timeout timeoutChan
|
||||
if d != nil {
|
||||
d.channelLock.Lock()
|
||||
timeout = d.channel
|
||||
d.channelLock.Unlock()
|
||||
}
|
||||
|
||||
var r ioResult
|
||||
select {
|
||||
case r = <-c.ch:
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||
var bytes, flags uint32
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
r = <-c.ch
|
||||
err = r.err
|
||||
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||
err = ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
runtime.KeepAlive(c)
|
||||
return int(r.bytes), err
|
||||
}
|
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.readDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 {
|
||||
return 0, io.EOF
|
||||
} else if err == syscall.ERROR_BROKEN_PIPE {
|
||||
return 0, io.EOF
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) {
|
||||
c, err := f.prepareIo()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.wg.Done()
|
||||
|
||||
if f.writeDeadline.timedout.isSet() {
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
|
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
|
||||
runtime.KeepAlive(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error {
|
||||
return f.readDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error {
|
||||
return f.writeDeadline.set(deadline)
|
||||
}
|
||||
|
||||
func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
return uintptr(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
if !d.timer.Stop() {
|
||||
<-d.channel
|
||||
}
|
||||
d.timer = nil
|
||||
}
|
||||
d.timedout.setFalse()
|
||||
|
||||
select {
|
||||
case <-d.channel:
|
||||
d.channelLock.Lock()
|
||||
d.channel = make(chan struct{})
|
||||
d.channelLock.Unlock()
|
||||
default:
|
||||
}
|
||||
|
||||
if deadline.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutIO := func() {
|
||||
d.timedout.setTrue()
|
||||
close(d.channel)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
duration := deadline.Sub(now)
|
||||
if deadline.After(now) {
|
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO)
|
||||
} else {
|
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
73
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
73
vendor/github.com/Microsoft/go-winio/fileinfo.go
generated
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct {
|
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
|
||||
FileAttributes uint32
|
||||
pad uint32 // padding
|
||||
}
|
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
|
||||
bi := &FileBasicInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return bi, nil
|
||||
}
|
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
|
||||
if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
|
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileStandardInfo contains extended information for the file.
|
||||
// FILE_STANDARD_INFO in WinBase.h
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
|
||||
type FileStandardInfo struct {
|
||||
AllocationSize, EndOfFile int64
|
||||
NumberOfLinks uint32
|
||||
DeletePending, Directory bool
|
||||
}
|
||||
|
||||
// GetFileStandardInfo retrieves ended information for the file.
|
||||
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
|
||||
si := &FileStandardInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct {
|
||||
VolumeSerialNumber uint64
|
||||
FileID [16]byte
|
||||
}
|
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) {
|
||||
fileID := &FileIDInfo{}
|
||||
if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
|
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
|
||||
}
|
||||
runtime.KeepAlive(f)
|
||||
return fileID, nil
|
||||
}
|
||||
8
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
8
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
)
|
||||
11
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
11
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
316
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
316
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const (
|
||||
afHvSock = 34 // AF_HYPERV
|
||||
|
||||
socketError = ^uintptr(0)
|
||||
)
|
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct {
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
type rawHvsockAddr struct {
|
||||
Family uint16
|
||||
_ uint16
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (addr *HvsockAddr) Network() string {
|
||||
return "hvsock"
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) String() string {
|
||||
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||
}
|
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID {
|
||||
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||
g.Data1 = port
|
||||
return g
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||
return rawHvsockAddr{
|
||||
Family: afHvSock,
|
||||
VMID: addr.VMID,
|
||||
ServiceID: addr.ServiceID,
|
||||
}
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||
addr.VMID = raw.VMID
|
||||
addr.ServiceID = raw.ServiceID
|
||||
}
|
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct {
|
||||
sock *win32File
|
||||
addr HvsockAddr
|
||||
}
|
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct {
|
||||
sock *win32File
|
||||
local, remote HvsockAddr
|
||||
}
|
||||
|
||||
func newHvSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
f, err := makeWin32File(fd)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
f.socket = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
sa := addr.raw()
|
||||
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
err = syscall.Listen(sock.handle, 16)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||
}
|
||||
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||
}
|
||||
|
||||
func (l *HvsockListener) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *HvsockListener) Addr() net.Addr {
|
||||
return &l.addr
|
||||
}
|
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := l.sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer l.sock.wg.Done()
|
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
}
|
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Close closes the listener, causing any pending Accept calls to fail.
|
||||
func (l *HvsockListener) Close() error {
|
||||
return l.sock.Close()
|
||||
}
|
||||
|
||||
/* Need to finish ConnectEx handling
|
||||
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sock.wg.Done()
|
||||
var bytes uint32
|
||||
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
remote: *addr,
|
||||
}
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
*/
|
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsarecv", err)
|
||||
}
|
||||
return 0, conn.opErr("read", err)
|
||||
} else if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||
t := 0
|
||||
for len(b) != 0 {
|
||||
n, err := conn.write(b)
|
||||
if err != nil {
|
||||
return t + n, err
|
||||
}
|
||||
t += n
|
||||
b = b[n:]
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsasend", err)
|
||||
}
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the socket connection, failing any pending read or write calls.
|
||||
func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
if conn.IsClosed() {
|
||||
return ErrFileClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address of the connection.
|
||||
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||
return &conn.local
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address of the connection.
|
||||
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||
return &conn.remote
|
||||
}
|
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||
conn.SetReadDeadline(t)
|
||||
conn.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||
return conn.sock.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||
return conn.sock.SetWriteDeadline(t)
|
||||
}
|
||||
517
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
Normal file
517
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
Normal file
|
|
@ -0,0 +1,517 @@
|
|||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
type objectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
ObjectName *unicodeString
|
||||
Attributes uintptr
|
||||
SecurityDescriptor *securityDescriptor
|
||||
SecurityQoS uintptr
|
||||
}
|
||||
|
||||
type unicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer uintptr
|
||||
}
|
||||
|
||||
type securityDescriptor struct {
|
||||
Revision byte
|
||||
Sbz1 byte
|
||||
Control uint16
|
||||
Owner uintptr
|
||||
Group uintptr
|
||||
Sacl uintptr
|
||||
Dacl uintptr
|
||||
}
|
||||
|
||||
type ntstatus int32
|
||||
|
||||
func (status ntstatus) Err() error {
|
||||
if status >= 0 {
|
||||
return nil
|
||||
}
|
||||
return rtlNtStatusToDosError(status)
|
||||
}
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
cERROR_NO_DATA = syscall.Errno(232)
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
|
||||
cFILE_OPEN = 1
|
||||
cFILE_CREATE = 2
|
||||
|
||||
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||
|
||||
cSE_DACL_PRESENT = 4
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||
ErrPipeListenerClosed = errors.New("use of closed network connection")
|
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write")
|
||||
)
|
||||
|
||||
type win32Pipe struct {
|
||||
*win32File
|
||||
path string
|
||||
}
|
||||
|
||||
type win32MessageBytePipe struct {
|
||||
win32Pipe
|
||||
writeClosed bool
|
||||
readEOF bool
|
||||
}
|
||||
|
||||
type pipeAddress string
|
||||
|
||||
func (f *win32Pipe) LocalAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) RemoteAddr() net.Addr {
|
||||
return pipeAddress(f.path)
|
||||
}
|
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error {
|
||||
f.SetReadDeadline(t)
|
||||
f.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error {
|
||||
if f.writeClosed {
|
||||
return errPipeWriteClosed
|
||||
}
|
||||
err := f.win32File.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.win32File.Write(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.writeClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||
// they are used to implement CloseWrite().
|
||||
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
|
||||
if f.writeClosed {
|
||||
return 0, errPipeWriteClosed
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return f.win32File.Write(b)
|
||||
}
|
||||
|
||||
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
|
||||
if f.readEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := f.win32File.Read(b)
|
||||
if err == io.EOF {
|
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true
|
||||
} else if err == syscall.ERROR_MORE_DATA {
|
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s pipeAddress) Network() string {
|
||||
return "pipe"
|
||||
}
|
||||
|
||||
func (s pipeAddress) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(2 * time.Second)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
||||
}
|
||||
|
||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path, access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&cPIPE_TYPE_MESSAGE != 0 {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: f, path: path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: f, path: path}, nil
|
||||
}
|
||||
|
||||
type acceptResponse struct {
|
||||
f *win32File
|
||||
err error
|
||||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
path16, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var oa objectAttributes
|
||||
oa.Length = unsafe.Sizeof(oa)
|
||||
|
||||
var ntPath unicodeString
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
oa.ObjectName = &ntPath
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
len := uint32(len(sd))
|
||||
sdb := localAlloc(0, len)
|
||||
defer localFree(sdb)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
} else {
|
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr
|
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||
}
|
||||
defer localFree(dacl)
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
Control: cSE_DACL_PRESENT,
|
||||
Dacl: dacl,
|
||||
}
|
||||
oa.SecurityDescriptor = sdb
|
||||
}
|
||||
}
|
||||
|
||||
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
if c.MessageMode {
|
||||
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := uint32(cFILE_OPEN)
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||
if first {
|
||||
disposition = cFILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = syscall.SYNCHRONIZE
|
||||
}
|
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var (
|
||||
h syscall.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(ntPath)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeWin32File(h)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
|
||||
p, err := l.makeServerPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error)
|
||||
go func(p *win32File) {
|
||||
ch <- connectPipe(p)
|
||||
}(p)
|
||||
|
||||
select {
|
||||
case err = <-ch:
|
||||
if err != nil {
|
||||
p.Close()
|
||||
p = nil
|
||||
}
|
||||
case <-l.closeCh:
|
||||
// Abort the connect request by closing the handle.
|
||||
p.Close()
|
||||
p = nil
|
||||
err = <-ch
|
||||
if err == nil || err == ErrFileClosed {
|
||||
err = ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) listenerRoutine() {
|
||||
closed := false
|
||||
for !closed {
|
||||
select {
|
||||
case <-l.closeCh:
|
||||
closed = true
|
||||
case responseCh := <-l.acceptCh:
|
||||
var (
|
||||
p *win32File
|
||||
err error
|
||||
)
|
||||
for {
|
||||
p, err = l.makeConnectedServerPipe()
|
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA {
|
||||
break
|
||||
}
|
||||
}
|
||||
responseCh <- acceptResponse{p, err}
|
||||
closed = err == ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
syscall.Close(l.firstHandle)
|
||||
l.firstHandle = 0
|
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh)
|
||||
}
|
||||
|
||||
// PipeConfig contain configuration for the pipe listener.
|
||||
type PipeConfig struct {
|
||||
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||
SecurityDescriptor string
|
||||
|
||||
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||
// case the pipe is read in byte mode by default. The only practical difference in
|
||||
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size of the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size of the output buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
var (
|
||||
sd []byte
|
||||
err error
|
||||
)
|
||||
if c == nil {
|
||||
c = &PipeConfig{}
|
||||
}
|
||||
if c.SecurityDescriptor != "" {
|
||||
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
h, err := makeServerPipeHandle(path, sd, c, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func connectPipe(p *win32File) error {
|
||||
c, err := p.prepareIo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.wg.Done()
|
||||
|
||||
err = connectNamedPipe(p.handle, &c.o)
|
||||
_, err = p.asyncIo(c, nil, 0, err)
|
||||
if err != nil && err != cERROR_PIPE_CONNECTED {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Accept() (net.Conn, error) {
|
||||
ch := make(chan acceptResponse)
|
||||
select {
|
||||
case l.acceptCh <- ch:
|
||||
response := <-ch
|
||||
err := response.err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if l.config.MessageMode {
|
||||
return &win32MessageBytePipe{
|
||||
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
|
||||
}, nil
|
||||
}
|
||||
return &win32Pipe{win32File: response.f, path: l.path}, nil
|
||||
case <-l.doneCh:
|
||||
return nil, ErrPipeListenerClosed
|
||||
}
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Close() error {
|
||||
select {
|
||||
case l.closeCh <- 1:
|
||||
<-l.doneCh
|
||||
case <-l.doneCh:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) Addr() net.Addr {
|
||||
return pipeAddress(l.path)
|
||||
}
|
||||
228
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
228
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
// +build windows
|
||||
|
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
// and the Windows (mixed-endian) encoding. See here for details:
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||
package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8
|
||||
|
||||
// The variants specified by RFC 4122.
|
||||
const (
|
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota
|
||||
VariantNCS
|
||||
VariantRFC4122
|
||||
VariantMicrosoft
|
||||
VariantFuture
|
||||
)
|
||||
|
||||
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||
// hash of an input string.
|
||||
type Version uint8
|
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
return GUID{}, err
|
||||
}
|
||||
|
||||
g := FromArray(b)
|
||||
g.setVersion(4) // Version 4 means randomly generated.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||
//
|
||||
// Some implementations, such as those found on Windows, treat the name as a
|
||||
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||
// encoded as such before being passed to this function.
|
||||
func NewV5(namespace GUID, name []byte) (GUID, error) {
|
||||
b := sha1.New()
|
||||
namespaceBytes := namespace.ToArray()
|
||||
b.Write(namespaceBytes[:])
|
||||
b.Write(name)
|
||||
|
||||
a := [16]byte{}
|
||||
copy(a[:], b.Sum(nil))
|
||||
|
||||
g := FromArray(a)
|
||||
g.setVersion(5) // Version 5 means generated from a string.
|
||||
g.setVariant(VariantRFC4122)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||
var g GUID
|
||||
g.Data1 = order.Uint32(b[0:4])
|
||||
g.Data2 = order.Uint16(b[4:6])
|
||||
g.Data3 = order.Uint16(b[6:8])
|
||||
copy(g.Data4[:], b[8:16])
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||
b := [16]byte{}
|
||||
order.PutUint32(b[0:4], g.Data1)
|
||||
order.PutUint16(b[4:6], g.Data2)
|
||||
order.PutUint16(b[6:8], g.Data3)
|
||||
copy(b[8:16], g.Data4[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||
func FromArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.BigEndian)
|
||||
}
|
||||
|
||||
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||
// encoding.
|
||||
func (g GUID) ToArray() [16]byte {
|
||||
return g.toArray(binary.BigEndian)
|
||||
}
|
||||
|
||||
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||
func FromWindowsArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.LittleEndian)
|
||||
}
|
||||
|
||||
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||
// encoding.
|
||||
func (g GUID) ToWindowsArray() [16]byte {
|
||||
return g.toArray(binary.LittleEndian)
|
||||
}
|
||||
|
||||
func (g GUID) String() string {
|
||||
return fmt.Sprintf(
|
||||
"%08x-%04x-%04x-%04x-%012x",
|
||||
g.Data1,
|
||||
g.Data2,
|
||||
g.Data3,
|
||||
g.Data4[:2],
|
||||
g.Data4[2:])
|
||||
}
|
||||
|
||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||
// format.
|
||||
func FromString(s string) (GUID, error) {
|
||||
if len(s) != 36 {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
|
||||
var g GUID
|
||||
|
||||
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data1 = uint32(data1)
|
||||
|
||||
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data2 = uint16(data2)
|
||||
|
||||
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data3 = uint16(data3)
|
||||
|
||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data4[i] = uint8(v)
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (g *GUID) setVariant(v Variant) {
|
||||
d := g.Data4[0]
|
||||
switch v {
|
||||
case VariantNCS:
|
||||
d = (d & 0x7f)
|
||||
case VariantRFC4122:
|
||||
d = (d & 0x3f) | 0x80
|
||||
case VariantMicrosoft:
|
||||
d = (d & 0x1f) | 0xc0
|
||||
case VariantFuture:
|
||||
d = (d & 0x0f) | 0xe0
|
||||
case VariantUnknown:
|
||||
fallthrough
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid variant: %d", v))
|
||||
}
|
||||
g.Data4[0] = d
|
||||
}
|
||||
|
||||
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||
func (g GUID) Variant() Variant {
|
||||
b := g.Data4[0]
|
||||
if b&0x80 == 0 {
|
||||
return VariantNCS
|
||||
} else if b&0xc0 == 0x80 {
|
||||
return VariantRFC4122
|
||||
} else if b&0xe0 == 0xc0 {
|
||||
return VariantMicrosoft
|
||||
} else if b&0xe0 == 0xe0 {
|
||||
return VariantFuture
|
||||
}
|
||||
return VariantUnknown
|
||||
}
|
||||
|
||||
func (g *GUID) setVersion(v Version) {
|
||||
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
|
||||
}
|
||||
|
||||
// Version returns the GUID version, as defined in RFC 4122.
|
||||
func (g GUID) Version() Version {
|
||||
return Version((g.Data3 & 0xF000) >> 12)
|
||||
}
|
||||
|
||||
// MarshalText returns the textual representation of the GUID.
|
||||
func (g GUID) MarshalText() ([]byte, error) {
|
||||
return []byte(g.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||
// into this GUID.
|
||||
func (g *GUID) UnmarshalText(text []byte) error {
|
||||
g2, err := FromString(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*g = g2
|
||||
return nil
|
||||
}
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type as that is only available to builds
|
||||
// targeted at `windows`. The representation matches that used by native Windows
|
||||
// code.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
203
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
Normal file
203
vendor/github.com/Microsoft/go-winio/privilege.go
generated
vendored
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const (
|
||||
SE_PRIVILEGE_ENABLED = 2
|
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
|
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege"
|
||||
SeRestorePrivilege = "SeRestorePrivilege"
|
||||
SeSecurityPrivilege = "SeSecurityPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
securityAnonymous = iota
|
||||
securityIdentification
|
||||
securityImpersonation
|
||||
securityDelegation
|
||||
)
|
||||
|
||||
var (
|
||||
privNames = make(map[string]uint64)
|
||||
privNameMutex sync.Mutex
|
||||
)
|
||||
|
||||
// PrivilegeError represents an error enabling privileges.
|
||||
type PrivilegeError struct {
|
||||
privileges []uint64
|
||||
}
|
||||
|
||||
func (e *PrivilegeError) Error() string {
|
||||
s := ""
|
||||
if len(e.privileges) > 1 {
|
||||
s = "Could not enable privileges "
|
||||
} else {
|
||||
s = "Could not enable privilege "
|
||||
}
|
||||
for i, p := range e.privileges {
|
||||
if i != 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += `"`
|
||||
s += getPrivilegeName(p)
|
||||
s += `"`
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RunWithPrivilege enables a single privilege for a function call.
|
||||
func RunWithPrivilege(name string, fn func() error) error {
|
||||
return RunWithPrivileges([]string{name}, fn)
|
||||
}
|
||||
|
||||
// RunWithPrivileges enables privileges for a function call.
|
||||
func RunWithPrivileges(names []string, fn func() error) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
token, err := newThreadToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer releaseThreadToken(token)
|
||||
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn()
|
||||
}
|
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) {
|
||||
var privileges []uint64
|
||||
privNameMutex.Lock()
|
||||
defer privNameMutex.Unlock()
|
||||
for _, name := range names {
|
||||
p, ok := privNames[name]
|
||||
if !ok {
|
||||
err := lookupPrivilegeValue("", name, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privNames[name] = p
|
||||
}
|
||||
privileges = append(privileges, p)
|
||||
}
|
||||
return privileges, nil
|
||||
}
|
||||
|
||||
// EnableProcessPrivileges enables privileges globally for the process.
|
||||
func EnableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
|
||||
}
|
||||
|
||||
// DisableProcessPrivileges disables privileges globally for the process.
|
||||
func DisableProcessPrivileges(names []string) error {
|
||||
return enableDisableProcessPrivilege(names, 0)
|
||||
}
|
||||
|
||||
func enableDisableProcessPrivilege(names []string, action uint32) error {
|
||||
privileges, err := mapPrivileges(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, _ := windows.GetCurrentProcess()
|
||||
var token windows.Token
|
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer token.Close()
|
||||
return adjustPrivileges(token, privileges, action)
|
||||
}
|
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
|
||||
for _, p := range privileges {
|
||||
binary.Write(&b, binary.LittleEndian, p)
|
||||
binary.Write(&b, binary.LittleEndian, action)
|
||||
}
|
||||
prevState := make([]byte, b.Len())
|
||||
reqSize := uint32(0)
|
||||
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
|
||||
if !success {
|
||||
return err
|
||||
}
|
||||
if err == ERROR_NOT_ALL_ASSIGNED {
|
||||
return &PrivilegeError{privileges}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrivilegeName(luid uint64) string {
|
||||
var nameBuffer [256]uint16
|
||||
bufSize := uint32(len(nameBuffer))
|
||||
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %d>", luid)
|
||||
}
|
||||
|
||||
var displayNameBuffer [256]uint16
|
||||
displayBufSize := uint32(len(displayNameBuffer))
|
||||
var langID uint32
|
||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize])))
|
||||
}
|
||||
|
||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
|
||||
}
|
||||
|
||||
func newThreadToken() (windows.Token, error) {
|
||||
err := impersonateSelf(securityImpersonation)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var token windows.Token
|
||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
|
||||
if err != nil {
|
||||
rerr := revertToSelf()
|
||||
if rerr != nil {
|
||||
panic(rerr)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func releaseThreadToken(h windows.Token) {
|
||||
err := revertToSelf()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h.Close()
|
||||
}
|
||||
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
Normal file
128
vendor/github.com/Microsoft/go-winio/reparse.go
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
reparseTagMountPoint = 0xA0000003
|
||||
reparseTagSymlink = 0xA000000C
|
||||
)
|
||||
|
||||
type reparseDataBuffer struct {
|
||||
ReparseTag uint32
|
||||
ReparseDataLength uint16
|
||||
Reserved uint16
|
||||
SubstituteNameOffset uint16
|
||||
SubstituteNameLength uint16
|
||||
PrintNameOffset uint16
|
||||
PrintNameLength uint16
|
||||
}
|
||||
|
||||
// ReparsePoint describes a Win32 symlink or mount point.
|
||||
type ReparsePoint struct {
|
||||
Target string
|
||||
IsMountPoint bool
|
||||
}
|
||||
|
||||
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||
// mount point reparse point.
|
||||
type UnsupportedReparsePointError struct {
|
||||
Tag uint32
|
||||
}
|
||||
|
||||
func (e *UnsupportedReparsePointError) Error() string {
|
||||
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
|
||||
}
|
||||
|
||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||
// or a mount point.
|
||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
|
||||
tag := binary.LittleEndian.Uint32(b[0:4])
|
||||
return DecodeReparsePointData(tag, b[8:])
|
||||
}
|
||||
|
||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
|
||||
isMountPoint := false
|
||||
switch tag {
|
||||
case reparseTagMountPoint:
|
||||
isMountPoint = true
|
||||
case reparseTagSymlink:
|
||||
default:
|
||||
return nil, &UnsupportedReparsePointError{tag}
|
||||
}
|
||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
|
||||
if !isMountPoint {
|
||||
nameOffset += 4
|
||||
}
|
||||
nameLength := binary.LittleEndian.Uint16(b[6:8])
|
||||
name := make([]uint16, nameLength/2)
|
||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
|
||||
}
|
||||
|
||||
func isDriveLetter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||
// mount point.
|
||||
func EncodeReparsePoint(rp *ReparsePoint) []byte {
|
||||
// Generate an NT path and determine if this is a relative path.
|
||||
var ntTarget string
|
||||
relative := false
|
||||
if strings.HasPrefix(rp.Target, `\\?\`) {
|
||||
ntTarget = `\??\` + rp.Target[4:]
|
||||
} else if strings.HasPrefix(rp.Target, `\\`) {
|
||||
ntTarget = `\??\UNC\` + rp.Target[2:]
|
||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
|
||||
ntTarget = `\??\` + rp.Target
|
||||
} else {
|
||||
ntTarget = rp.Target
|
||||
relative = true
|
||||
}
|
||||
|
||||
// The paths must be NUL-terminated even though they are counted strings.
|
||||
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
|
||||
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
|
||||
|
||||
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
|
||||
size += len(ntTarget16)*2 + len(target16)*2
|
||||
|
||||
tag := uint32(reparseTagMountPoint)
|
||||
if !rp.IsMountPoint {
|
||||
tag = reparseTagSymlink
|
||||
size += 4 // Add room for symlink flags
|
||||
}
|
||||
|
||||
data := reparseDataBuffer{
|
||||
ReparseTag: tag,
|
||||
ReparseDataLength: uint16(size),
|
||||
SubstituteNameOffset: 0,
|
||||
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
|
||||
PrintNameOffset: uint16(len(ntTarget16) * 2),
|
||||
PrintNameLength: uint16((len(target16) - 1) * 2),
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
binary.Write(&b, binary.LittleEndian, &data)
|
||||
if !rp.IsMountPoint {
|
||||
flags := uint32(0)
|
||||
if relative {
|
||||
flags |= 1
|
||||
}
|
||||
binary.Write(&b, binary.LittleEndian, flags)
|
||||
}
|
||||
|
||||
binary.Write(&b, binary.LittleEndian, ntTarget16)
|
||||
binary.Write(&b, binary.LittleEndian, target16)
|
||||
return b.Bytes()
|
||||
}
|
||||
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
Normal file
98
vendor/github.com/Microsoft/go-winio/sd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
const (
|
||||
cERROR_NONE_MAPPED = syscall.Errno(1332)
|
||||
)
|
||||
|
||||
type AccountLookupError struct {
|
||||
Name string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AccountLookupError) Error() string {
|
||||
if e.Name == "" {
|
||||
return "lookup account: empty account name specified"
|
||||
}
|
||||
var s string
|
||||
switch e.Err {
|
||||
case cERROR_NONE_MAPPED:
|
||||
s = "not found"
|
||||
default:
|
||||
s = e.Err.Error()
|
||||
}
|
||||
return "lookup account " + e.Name + ": " + s
|
||||
}
|
||||
|
||||
type SddlConversionError struct {
|
||||
Sddl string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *SddlConversionError) Error() string {
|
||||
return "convert " + e.Sddl + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
func LookupSidByName(name string) (sid string, err error) {
|
||||
if name == "" {
|
||||
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
|
||||
}
|
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32
|
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
|
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sidBuffer := make([]byte, sidSize)
|
||||
refDomainBuffer := make([]uint16, refDomainSize)
|
||||
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
var strBuffer *uint16
|
||||
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
|
||||
if err != nil {
|
||||
return "", &AccountLookupError{name, err}
|
||||
}
|
||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
|
||||
localFree(uintptr(unsafe.Pointer(strBuffer)))
|
||||
return sid, nil
|
||||
}
|
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
|
||||
var sdBuffer uintptr
|
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
|
||||
if err != nil {
|
||||
return nil, &SddlConversionError{sddl, err}
|
||||
}
|
||||
defer localFree(sdBuffer)
|
||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
|
||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) {
|
||||
var sddl *uint16
|
||||
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer localFree(uintptr(unsafe.Pointer(sddl)))
|
||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
|
||||
}
|
||||
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
Normal file
3
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
package winio
|
||||
|
||||
//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||
427
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
Normal file
427
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,427 @@
|
|||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
|
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
|
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
|
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
|
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
|
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
)
|
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
|
||||
var _p0 uint32
|
||||
if releaseAll {
|
||||
_p0 = 1
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
|
||||
success = r0 != 0
|
||||
if true {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(str)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
|
||||
}
|
||||
|
||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
|
||||
len = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func impersonateSelf(level uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
|
||||
}
|
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeName(_p0, luid, buffer, size)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(systemName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _lookupPrivilegeValue(_p0, _p1, luid)
|
||||
}
|
||||
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
|
||||
var _p0 uint32
|
||||
if openAsSelf {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func revertToSelf() (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
|
||||
var _p0 *byte
|
||||
if len(b) > 0 {
|
||||
_p0 = &b[0]
|
||||
}
|
||||
var _p1 uint32
|
||||
if abort {
|
||||
_p1 = 1
|
||||
}
|
||||
var _p2 uint32
|
||||
if processSecurity {
|
||||
_p2 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
if newport == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
|
||||
}
|
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCurrentThread() (h syscall.Handle) {
|
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
|
||||
h = syscall.Handle(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(mem uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
3
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
3
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
.DS_Store
|
||||
.*.sw?
|
||||
/coverage.txt
|
||||
3
vendor/github.com/VividCortex/ewma/.whitesource
generated
vendored
Normal file
3
vendor/github.com/VividCortex/ewma/.whitesource
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"settingsInheritedFrom": "VividCortex/whitesource-config@master"
|
||||
}
|
||||
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License
|
||||
|
||||
Copyright (c) 2013 VividCortex
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
145
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
145
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
# EWMA
|
||||
|
||||
[](https://godoc.org/github.com/VividCortex/ewma)
|
||||

|
||||
[](https://codecov.io/gh/VividCortex/ewma)
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
|
||||
### Exponentially Weighted Moving Average
|
||||
|
||||
An exponentially weighted moving average is a way to continuously compute a type of
|
||||
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||
added to the average, its weight in the average decreases exponentially over time. This
|
||||
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||
the recent central tendency of the series of values.
|
||||
|
||||
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||
|
||||
The algorithm works thus, in pseudocode:
|
||||
|
||||
1. Multiply the next number in the series by alpha.
|
||||
2. Multiply the current value of the average by 1 minus alpha.
|
||||
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||
4. Repeat for each number in the series.
|
||||
|
||||
There are special-case behaviors for how to initialize the current value, and these vary
|
||||
between implementations. One approach is to start with the first value in the series;
|
||||
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||
|
||||
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||
|
||||

|
||||
|
||||
Now let's take the moving average of those numbers. First we set the average to the value
|
||||
of the first number.
|
||||
|
||||

|
||||
|
||||
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||
them to generate a new value.
|
||||
|
||||

|
||||
|
||||
This continues until we are done.
|
||||
|
||||

|
||||
|
||||
Notice how each of the values in the series decays by half each time a new value
|
||||
is added, and the top of the bars in the lower portion of the image represents the
|
||||
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||
series.
|
||||
|
||||
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||
|
||||
### Choosing Alpha
|
||||
|
||||
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||
|
||||
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||
"Production and Operations Analysis" by Steven Nahmias.
|
||||
|
||||
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||
is the constant alpha used for this repository's SimpleEWMA.
|
||||
|
||||
### Implementations
|
||||
|
||||
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||
|
||||
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||
that type.
|
||||
|
||||
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||
this package will not support your needs at present.
|
||||
|
||||
#### SimpleEWMA
|
||||
|
||||
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||
for multiple reasons. It has no warm-up period and it uses a constant
|
||||
decay. These properties let it use less memory. It will also behave
|
||||
differently when it's equal to zero, which is assumed to mean
|
||||
uninitialized, so if a value is likely to actually become zero over time,
|
||||
then any non-zero value will cause a sharp jump instead of a small change.
|
||||
|
||||
#### VariableEWMA
|
||||
|
||||
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||
until you have added the required number of samples to it. It uses some memory to store the
|
||||
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||
|
||||
## Usage
|
||||
|
||||
### API Documentation
|
||||
|
||||
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We only accept pull requests for minor fixes or improvements. This includes:
|
||||
|
||||
* Small bug fixes
|
||||
* Typos
|
||||
* Documentation or comments
|
||||
|
||||
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||
so we recommend forking the repository and making changes in your fork for your use case.
|
||||
|
||||
## License
|
||||
|
||||
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
||||
6
vendor/github.com/VividCortex/ewma/codecov.yml
generated
vendored
Normal file
6
vendor/github.com/VividCortex/ewma/codecov.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 15%
|
||||
patch: off
|
||||
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
// Package ewma implements exponentially weighted moving averages.
|
||||
package ewma
|
||||
|
||||
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
// Please see the LICENSE file for applicable license terms.
|
||||
|
||||
const (
|
||||
// By default, we average over a one-minute period, which means the average
|
||||
// age of the metrics in the period is 30 seconds.
|
||||
AVG_METRIC_AGE float64 = 30.0
|
||||
|
||||
// The formula for computing the decay factor from the average age comes
|
||||
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||
|
||||
// For best results, the moving average should not be initialized to the
|
||||
// samples it sees immediately. The book "Production and Operations
|
||||
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||
// many samples, it is not "ready" to be queried for the value of the
|
||||
// moving average. This adds some memory cost.
|
||||
WARMUP_SAMPLES uint8 = 10
|
||||
)
|
||||
|
||||
// MovingAverage is the interface that computes a moving average over a time-
|
||||
// series stream of numbers. The average may be over a window or exponentially
|
||||
// decaying.
|
||||
type MovingAverage interface {
|
||||
Add(float64)
|
||||
Value() float64
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||
// desired characteristics in the moving window or exponential decay. If no
|
||||
// age is given, it constructs a default exponentially weighted implementation
|
||||
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||
// by the formula given for the DECAY constant. It signifies the average age
|
||||
// of the samples as time goes to infinity.
|
||||
func NewMovingAverage(age ...float64) MovingAverage {
|
||||
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||
return new(SimpleEWMA)
|
||||
}
|
||||
return &VariableEWMA{
|
||||
decay: 2 / (age[0] + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||
// decay. These properties let it use less memory. It will also behave
|
||||
// differently when it's equal to zero, which is assumed to mean
|
||||
// uninitialized, so if a value is likely to actually become zero over time,
|
||||
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||
// However, note that this takes a long time, and the value may just
|
||||
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||
type SimpleEWMA struct {
|
||||
// The current value of the average. After adding with Add(), this is
|
||||
// updated to reflect the average of all values seen thus far.
|
||||
value float64
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *SimpleEWMA) Add(value float64) {
|
||||
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||
e.value = value
|
||||
} else {
|
||||
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the moving average.
|
||||
func (e *SimpleEWMA) Value() float64 {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *SimpleEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
}
|
||||
|
||||
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||
type VariableEWMA struct {
|
||||
// The multiplier factor by which the previous samples decay.
|
||||
decay float64
|
||||
// The current value of the average.
|
||||
value float64
|
||||
// The number of samples added to this instance.
|
||||
count uint8
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *VariableEWMA) Add(value float64) {
|
||||
switch {
|
||||
case e.count < WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value += value
|
||||
case e.count == WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
default:
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||
// warmed up yet.
|
||||
func (e *VariableEWMA) Value() float64 {
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *VariableEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
e.count = WARMUP_SAMPLES + 1
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/VividCortex/ewma/go.mod
generated
vendored
Normal file
3
vendor/github.com/VividCortex/ewma/go.mod
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/VividCortex/ewma
|
||||
|
||||
go 1.12
|
||||
21
vendor/github.com/acarl005/stripansi/LICENSE
generated
vendored
Normal file
21
vendor/github.com/acarl005/stripansi/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018 Andrew Carlson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
30
vendor/github.com/acarl005/stripansi/README.md
generated
vendored
Normal file
30
vendor/github.com/acarl005/stripansi/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
Strip ANSI
|
||||
==========
|
||||
|
||||
This Go package removes ANSI escape codes from strings.
|
||||
|
||||
Ideally, we would prevent these from appearing in any text we want to process.
|
||||
However, sometimes this can't be helped, and we need to be able to deal with that noise.
|
||||
This will use a regexp to remove those unwanted escape codes.
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
$ go get -u github.com/acarl005/stripansi
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/acarl005/stripansi"
|
||||
)
|
||||
|
||||
func main() {
|
||||
msg := "\x1b[38;5;140m foo\x1b[0m bar"
|
||||
cleanMsg := stripansi.Strip(msg)
|
||||
fmt.Println(cleanMsg) // " foo bar"
|
||||
}
|
||||
```
|
||||
13
vendor/github.com/acarl005/stripansi/stripansi.go
generated
vendored
Normal file
13
vendor/github.com/acarl005/stripansi/stripansi.go
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
package stripansi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
|
||||
|
||||
var re = regexp.MustCompile(ansi)
|
||||
|
||||
func Strip(str string) string {
|
||||
return re.ReplaceAllString(str, "")
|
||||
}
|
||||
1706
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
1706
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load diff
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.43"
|
||||
const SDKVersion = "1.44.4"
|
||||
|
|
|
|||
862
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
862
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
27
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
|
|
@ -8113,9 +8113,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
|
|||
// Rules
|
||||
//
|
||||
// You specify the lifecycle configuration in your request body. The lifecycle
|
||||
// configuration is specified as XML consisting of one or more rules. An Amazon
|
||||
// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not
|
||||
// adjustable. Each rule consists of the following:
|
||||
// configuration is specified as XML consisting of one or more rules. Each rule
|
||||
// consists of the following:
|
||||
//
|
||||
// * Filter identifying a subset of objects to which the rule applies. The
|
||||
// filter can be based on a key name prefix, object tags, or a combination
|
||||
|
|
@ -10919,11 +10918,9 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
|
|||
// Part numbers can be any number from 1 to 10,000, inclusive. A part number
|
||||
// uniquely identifies a part and also defines its position within the object
|
||||
// being created. If you upload a new part using the same part number that was
|
||||
// used with a previous part, the previously uploaded part is overwritten.
|
||||
//
|
||||
// For information about maximum and minimum part sizes and other multipart
|
||||
// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
|
||||
// in the Amazon S3 User Guide.
|
||||
// used with a previous part, the previously uploaded part is overwritten. Each
|
||||
// part must be at least 5 MB in size, except the last part. There is no size
|
||||
// limit on the last part of your multipart upload.
|
||||
//
|
||||
// To ensure that data is not corrupted when traversing the network, specify
|
||||
// the Content-MD5 header in the upload part request. Amazon S3 checks the part
|
||||
|
|
@ -11071,8 +11068,8 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
|
|||
// your request and a byte range by adding the request header x-amz-copy-source-range
|
||||
// in your request.
|
||||
//
|
||||
// For information about maximum and minimum part sizes and other multipart
|
||||
// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
|
||||
// The minimum allowable part size for a multipart upload is 5 MB. For more
|
||||
// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
|
||||
// in the Amazon S3 User Guide.
|
||||
//
|
||||
// Instead of using an existing object as part data, you might use the UploadPart
|
||||
|
|
@ -29350,9 +29347,9 @@ type NoncurrentVersionExpiration struct {
|
|||
NewerNoncurrentVersions *int64 `type:"integer"`
|
||||
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. The value must be a non-zero positive integer.
|
||||
// For information about the noncurrent days calculations, see How Amazon S3
|
||||
// Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
|
||||
// in the Amazon S3 User Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
}
|
||||
|
|
@ -29665,9 +29662,7 @@ type Object struct {
|
|||
//
|
||||
// * If an object is created by either the Multipart Upload or Part Copy
|
||||
// operation, the ETag is not an MD5 digest, regardless of the method of
|
||||
// encryption. If an object is larger than 16 MB, the Amazon Web Services
|
||||
// Management Console will upload or copy that object as a Multipart Upload,
|
||||
// and therefore the ETag will not be an MD5 digest.
|
||||
// encryption.
|
||||
ETag *string `type:"string"`
|
||||
|
||||
// The name that you assign to an object. You use the object key to retrieve
|
||||
|
|
|
|||
6
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
|
@ -1279,12 +1279,6 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
|
|||
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// No permissions are required for users to perform this operation. The purpose
|
||||
// of the sts:GetSessionToken operation is to authenticate the user using MFA.
|
||||
// You cannot use policies to control authentication operations. For more information,
|
||||
// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Session Duration
|
||||
//
|
||||
// The GetSessionToken operation must be called by using the long-term Amazon
|
||||
|
|
|
|||
201
vendor/github.com/containers/common/LICENSE
generated
vendored
Normal file
201
vendor/github.com/containers/common/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
108
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
Normal file
108
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/url"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
errcodev2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RetryOptions defines the option to retry
|
||||
type RetryOptions struct {
|
||||
MaxRetry int // The number of times to possibly retry
|
||||
Delay time.Duration // The delay to use between retries, if set
|
||||
}
|
||||
|
||||
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
|
||||
func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
|
||||
err := operation()
|
||||
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
|
||||
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
|
||||
if retryOptions.Delay != 0 {
|
||||
delay = retryOptions.Delay
|
||||
}
|
||||
logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err)
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return err
|
||||
}
|
||||
err = operation()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
err = errors.Cause(err)
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
case context.Canceled, context.DeadlineExceeded:
|
||||
return false
|
||||
default: // continue
|
||||
}
|
||||
|
||||
type unwrapper interface {
|
||||
Unwrap() error
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
|
||||
case errcode.Error:
|
||||
switch e.Code {
|
||||
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied,
|
||||
errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
case *net.OpError:
|
||||
return isRetryable(e.Err)
|
||||
case *url.Error: // This includes errors returned by the net/http client.
|
||||
if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF
|
||||
return true
|
||||
}
|
||||
return isRetryable(e.Err)
|
||||
case syscall.Errno:
|
||||
return isErrnoRetryable(e)
|
||||
case errcode.Errors:
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range e {
|
||||
if !isRetryable(e[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *multierror.Error:
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range e.Errors {
|
||||
if !isRetryable(e.Errors[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case unwrapper: // Test this last, because various error types might implement .Unwrap()
|
||||
err = e.Unwrap()
|
||||
return isRetryable(err)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isErrnoRetryable(e error) bool {
|
||||
switch e {
|
||||
case syscall.ECONNREFUSED, syscall.EINTR, syscall.EAGAIN, syscall.EBUSY, syscall.ENETDOWN, syscall.ENETUNREACH, syscall.ENETRESET, syscall.ECONNABORTED, syscall.ECONNRESET, syscall.ETIMEDOUT, syscall.EHOSTDOWN, syscall.EHOSTUNREACH:
|
||||
return true
|
||||
}
|
||||
return isErrnoERESTART(e)
|
||||
}
|
||||
9
vendor/github.com/containers/common/pkg/retry/retry_linux.go
generated
vendored
Normal file
9
vendor/github.com/containers/common/pkg/retry/retry_linux.go
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
package retry
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isErrnoERESTART(e error) bool {
|
||||
return e == syscall.ERESTART
|
||||
}
|
||||
8
vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
generated
vendored
Normal file
8
vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package retry
|
||||
|
||||
func isErrnoERESTART(e error) bool {
|
||||
return false
|
||||
}
|
||||
189
vendor/github.com/containers/image/v5/LICENSE
generated
vendored
Normal file
189
vendor/github.com/containers/image/v5/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1682
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
Normal file
1682
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
62
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
Normal file
62
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
hash hash.Hash
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
var digester digest.Digester
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
}
|
||||
digestAlgorithm := expectedDigest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
digester = digestAlgorithm.Digester()
|
||||
|
||||
return &digestingReader{
|
||||
source: source,
|
||||
digester: digester,
|
||||
hash: digester.Hash(),
|
||||
expectedDigest: expectedDigest,
|
||||
validationFailed: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
n, err := d.source.Read(p)
|
||||
if n > 0 {
|
||||
if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil {
|
||||
// Coverage: This should not happen, the hash.Hash interface requires
|
||||
// d.digest.Write to never return an error, and the io.Writer interface
|
||||
// requires n2 == len(input) if no error is returned.
|
||||
return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
actualDigest := d.digester.Digest()
|
||||
if actualDigest != d.expectedDigest {
|
||||
d.validationFailed = true
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
d.validationSucceeded = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
24
vendor/github.com/containers/image/v5/copy/encrypt.go
generated
vendored
Normal file
24
vendor/github.com/containers/image/v5/copy/encrypt.go
generated
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
|
||||
// This function will be moved to be part of OCI spec when adopted.
|
||||
func isOciEncrypted(mediatype string) bool {
|
||||
return strings.HasSuffix(mediatype, "+encrypted")
|
||||
}
|
||||
|
||||
// isEncrypted checks if an image is encrypted
|
||||
func isEncrypted(i types.Image) bool {
|
||||
layers := i.LayerInfos()
|
||||
for _, l := range layers {
|
||||
if isOciEncrypted(l.MediaType) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
170
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
Normal file
170
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||
// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location.
|
||||
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
|
||||
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
|
||||
|
||||
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
|
||||
type orderedSet struct {
|
||||
list []string
|
||||
included map[string]struct{}
|
||||
}
|
||||
|
||||
// newOrderedSet creates a correctly initialized orderedSet.
|
||||
// [Sometimes it would be really nice if Golang had constructors…]
|
||||
func newOrderedSet() *orderedSet {
|
||||
return &orderedSet{
|
||||
list: []string{},
|
||||
included: map[string]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// append adds s to the end of os, only if it is not included already.
|
||||
func (os *orderedSet) append(s string) {
|
||||
if _, ok := os.included[s]; !ok {
|
||||
os.list = append(os.list, s)
|
||||
os.included[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
|
||||
// Note that the conversion will only happen later, through ic.src.UpdatedImage
|
||||
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
|
||||
// and a list of other possible alternatives, in order.
|
||||
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
|
||||
_, srcType, err := ic.src.Manifest(ctx)
|
||||
if err != nil { // This should have been cached?!
|
||||
return "", nil, errors.Wrap(err, "reading manifest")
|
||||
}
|
||||
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
|
||||
if srcType != normalizedSrcType {
|
||||
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
|
||||
srcType = normalizedSrcType
|
||||
}
|
||||
|
||||
if forceManifestMIMEType != "" {
|
||||
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
|
||||
}
|
||||
|
||||
if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
|
||||
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
|
||||
}
|
||||
supportedByDest := map[string]struct{}{}
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
|
||||
supportedByDest[t] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
|
||||
// So, build a list of types to try in order of decreasing preference.
|
||||
// FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct,
|
||||
// although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other.
|
||||
// In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types
|
||||
// and never attempt the other one.
|
||||
prioritizedTypes := newOrderedSet()
|
||||
|
||||
// First of all, prefer to keep the original manifest unmodified.
|
||||
if _, ok := supportedByDest[srcType]; ok {
|
||||
prioritizedTypes.append(srcType)
|
||||
}
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
// We could also drop this check and have the caller
|
||||
// make the choice; it is already doing that to an extent, to improve error
|
||||
// messages. But it is nice to hide the “if we can't modify, do no conversion”
|
||||
// special case in here; the caller can then worry (or not) only about a good UI.
|
||||
logrus.Debugf("We can't modify the manifest, hoping for the best...")
|
||||
return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
|
||||
}
|
||||
|
||||
// Then use our list of preferred types.
|
||||
for _, t := range preferredManifestMIMETypes {
|
||||
if _, ok := supportedByDest[t]; ok {
|
||||
prioritizedTypes.append(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, try anything else the destination supports.
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
prioritizedTypes.append(t)
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
|
||||
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
|
||||
return "", nil, errors.New("Internal error: no candidate MIME types")
|
||||
}
|
||||
preferredType := prioritizedTypes.list[0]
|
||||
if preferredType != srcType {
|
||||
ic.manifestUpdates.ManifestMIMEType = preferredType
|
||||
} else {
|
||||
logrus.Debugf("... will first try using the original manifest unmodified")
|
||||
}
|
||||
return preferredType, prioritizedTypes.list[1:], nil
|
||||
}
|
||||
|
||||
// isMultiImage returns true if img is a list of images
|
||||
func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) {
|
||||
_, mt, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return manifest.MIMETypeIsMultiImage(mt), nil
|
||||
}
|
||||
|
||||
// determineListConversion takes the current MIME type of a list of manifests,
|
||||
// the list of MIME types supported for a given destination, and a possible
|
||||
// forced value, and returns the MIME type to which we should convert the list
|
||||
// of manifests (regardless of whether we are converting to it or using it
|
||||
// unmodified) and a slice of other list types which might be supported by the
|
||||
// destination.
|
||||
func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) {
|
||||
// If there's no list of supported types, then anything we support is expected to be supported.
|
||||
if len(destSupportedMIMETypes) == 0 {
|
||||
destSupportedMIMETypes = manifest.SupportedListMIMETypes
|
||||
}
|
||||
// If we're forcing it, replace the list of supported types with the forced value.
|
||||
if forcedListMIMEType != "" {
|
||||
destSupportedMIMETypes = []string{forcedListMIMEType}
|
||||
}
|
||||
|
||||
prioritizedTypes := newOrderedSet()
|
||||
// The first priority is the current type, if it's in the list, since that lets us avoid a
|
||||
// conversion that isn't strictly necessary.
|
||||
for _, t := range destSupportedMIMETypes {
|
||||
if t == currentListMIMEType {
|
||||
prioritizedTypes.append(currentListMIMEType)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Pick out the other list types that we support.
|
||||
for _, t := range destSupportedMIMETypes {
|
||||
if manifest.MIMETypeIsMultiImage(t) {
|
||||
prioritizedTypes.append(t)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
|
||||
if len(prioritizedTypes.list) == 0 {
|
||||
return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
|
||||
}
|
||||
selectedType := prioritizedTypes.list[0]
|
||||
otherSupportedTypes := prioritizedTypes.list[1:]
|
||||
if selectedType != currentListMIMEType {
|
||||
logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
|
||||
} else {
|
||||
logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes)
|
||||
}
|
||||
// Done.
|
||||
return selectedType, otherSupportedTypes, nil
|
||||
}
|
||||
148
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
Normal file
148
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/vbauerster/mpb/v7"
|
||||
"github.com/vbauerster/mpb/v7/decor"
|
||||
)
|
||||
|
||||
// newProgressPool creates a *mpb.Progress.
|
||||
// The caller must eventually call pool.Wait() after the pool will no longer be updated.
|
||||
// NOTE: Every progress bar created within the progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
|
||||
func (c *copier) newProgressPool() *mpb.Progress {
|
||||
return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
|
||||
}
|
||||
|
||||
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
|
||||
func customPartialBlobDecorFunc(s decor.Statistics) string {
|
||||
if s.Total == 0 {
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||
}
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||
}
|
||||
|
||||
// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
|
||||
type progressBar struct {
|
||||
*mpb.Bar
|
||||
originalSize int64 // or -1 if unknown
|
||||
}
|
||||
|
||||
// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
|
||||
// is io.Discard, the progress bar's output will be discarded
|
||||
//
|
||||
// NOTE: Every progress bar created within a progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
|
||||
//
|
||||
// As a convention, most users of progress bars should call mark100PercentComplete on full success;
|
||||
// by convention, we don't leave progress bars in partial state when fully done
|
||||
// (even if we copied much less data than anticipated).
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
// onComplete will replace prefix once the bar/spinner has completed
|
||||
onComplete = prefix + " " + onComplete
|
||||
|
||||
// Use a normal progress bar when we know the size (i.e., size > 0).
|
||||
// Otherwise, use a spinner to indicate that something's happening.
|
||||
var bar *mpb.Bar
|
||||
if info.Size > 0 {
|
||||
if partial {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.Any(customPartialBlobDecorFunc),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
bar = pool.New(0,
|
||||
mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
)
|
||||
}
|
||||
if c.progressOutput == io.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return &progressBar{
|
||||
Bar: bar,
|
||||
originalSize: info.Size,
|
||||
}
|
||||
}
|
||||
|
||||
// mark100PercentComplete marks the progres bars as 100% complete;
|
||||
// it may do so by possibly advancing the current state if it is below the known total.
|
||||
func (bar *progressBar) mark100PercentComplete() {
|
||||
if bar.originalSize > 0 {
|
||||
// We can't call bar.SetTotal even if we wanted to; the total can not be changed
|
||||
// after a progress bar is created with a definite total.
|
||||
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
|
||||
} else {
|
||||
// -1 = unknown size
|
||||
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
|
||||
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
|
||||
// as unknown size, in particular they are not configured to be marked as
|
||||
// complete on bar.Current() reaching bar.total (because that would happen already
|
||||
// when creating the progress bar).
|
||||
// That means that we are both _allowed_ to call SetTotal, and we _have to_.
|
||||
bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
|
||||
}
|
||||
}
|
||||
|
||||
// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
|
||||
// with the number of received bytes.
|
||||
type blobChunkAccessorProxy struct {
|
||||
wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
|
||||
bar *progressBar // A progress bar updated with the number of bytes read so far
|
||||
}
|
||||
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.bar.IncrInt64(total)
|
||||
}
|
||||
return rc, errs, err
|
||||
}
|
||||
79
vendor/github.com/containers/image/v5/copy/progress_channel.go
generated
vendored
Normal file
79
vendor/github.com/containers/image/v5/copy/progress_channel.go
generated
vendored
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
|
||||
type progressReader struct {
|
||||
source io.Reader
|
||||
channel chan<- types.ProgressProperties
|
||||
interval time.Duration
|
||||
artifact types.BlobInfo
|
||||
lastUpdate time.Time
|
||||
offset uint64
|
||||
offsetUpdate uint64
|
||||
}
|
||||
|
||||
// newProgressReader creates a new progress reader for:
|
||||
// `source`: The source when internally reading bytes
|
||||
// `channel`: The reporter channel to which the progress will be sent
|
||||
// `interval`: The update interval to indicate how often the progress should update
|
||||
// `artifact`: The blob metadata which is currently being progressed
|
||||
func newProgressReader(
|
||||
source io.Reader,
|
||||
channel chan<- types.ProgressProperties,
|
||||
interval time.Duration,
|
||||
artifact types.BlobInfo,
|
||||
) *progressReader {
|
||||
// The progress reader constructor informs the progress channel
|
||||
// that a new artifact will be read
|
||||
channel <- types.ProgressProperties{
|
||||
Event: types.ProgressEventNewArtifact,
|
||||
Artifact: artifact,
|
||||
}
|
||||
return &progressReader{
|
||||
source: source,
|
||||
channel: channel,
|
||||
interval: interval,
|
||||
artifact: artifact,
|
||||
lastUpdate: time.Now(),
|
||||
offset: 0,
|
||||
offsetUpdate: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// reportDone indicates to the internal channel that the progress has been
|
||||
// finished
|
||||
func (r *progressReader) reportDone() {
|
||||
r.channel <- types.ProgressProperties{
|
||||
Event: types.ProgressEventDone,
|
||||
Artifact: r.artifact,
|
||||
Offset: r.offset,
|
||||
OffsetUpdate: r.offsetUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
// Read continuously reads bytes into the progress reader and reports the
|
||||
// status via the internal channel
|
||||
func (r *progressReader) Read(p []byte) (int, error) {
|
||||
n, err := r.source.Read(p)
|
||||
r.offset += uint64(n)
|
||||
r.offsetUpdate += uint64(n)
|
||||
|
||||
// Fire the progress reader in the provided interval
|
||||
if time.Since(r.lastUpdate) > r.interval {
|
||||
r.channel <- types.ProgressProperties{
|
||||
Event: types.ProgressEventRead,
|
||||
Artifact: r.artifact,
|
||||
Offset: r.offset,
|
||||
OffsetUpdate: r.offsetUpdate,
|
||||
}
|
||||
r.lastUpdate = time.Now()
|
||||
r.offsetUpdate = 0
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
38
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
Normal file
38
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package copy
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) ([]byte, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "initializing GPG")
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, errors.Wrap(err, "Signing not supported")
|
||||
}
|
||||
|
||||
if identity != nil {
|
||||
if reference.IsNameOnly(identity) {
|
||||
return nil, errors.Errorf("Sign identity must be a fully specified reference %s", identity)
|
||||
}
|
||||
} else {
|
||||
identity = c.dest.Reference().DockerReference()
|
||||
if identity == nil {
|
||||
return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating signature")
|
||||
}
|
||||
return newSig, nil
|
||||
}
|
||||
56
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
Normal file
56
vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package explicitfilepath
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
|
||||
// To do so, all elements of the input path must exist; as a special case, the final component may be
|
||||
// a non-existent name (but not a symlink pointing to a non-existent name)
|
||||
// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
|
||||
func ResolvePathToFullyExplicit(path string) (string, error) {
|
||||
switch _, err := os.Lstat(path); {
|
||||
case err == nil:
|
||||
return resolveExistingPathToFullyExplicit(path)
|
||||
case os.IsNotExist(err):
|
||||
parent, file := filepath.Split(path)
|
||||
resolvedParent, err := resolveExistingPathToFullyExplicit(parent)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if file == "." || file == ".." {
|
||||
// Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well.
|
||||
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
|
||||
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
|
||||
// in the resulting path, and especially not at the end.
|
||||
return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
|
||||
}
|
||||
resolvedPath := filepath.Join(resolvedParent, file)
|
||||
// As a sanity check, ensure that there are no "." or ".." components.
|
||||
cleanedResolvedPath := filepath.Clean(resolvedPath)
|
||||
if cleanedResolvedPath != resolvedPath {
|
||||
// Coverage: This should never happen.
|
||||
return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
|
||||
}
|
||||
return resolvedPath, nil
|
||||
default: // err != nil, unrecognized
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit,
|
||||
// but without the special case for missing final component.
|
||||
func resolveExistingPathToFullyExplicit(path string) (string, error) {
|
||||
resolved, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return "", err // Coverage: This can fail only if os.Getwd() fails.
|
||||
}
|
||||
resolved, err = filepath.EvalSymlinks(resolved)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Clean(resolved), nil
|
||||
}
|
||||
81
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
Normal file
81
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type archiveImageDestination struct {
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
ref archiveReference
|
||||
archive *tarfile.Writer // Should only be closed if writer != nil
|
||||
writer io.Closer // May be nil if the archive is shared
|
||||
}
|
||||
|
||||
func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
|
||||
if ref.sourceIndex != -1 {
|
||||
return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
|
||||
var archive *tarfile.Writer
|
||||
var writer io.Closer
|
||||
if ref.archiveWriter != nil {
|
||||
archive = ref.archiveWriter
|
||||
writer = nil
|
||||
} else {
|
||||
fh, err := openArchiveForWriting(ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archive = tarfile.NewWriter(fh)
|
||||
writer = fh
|
||||
}
|
||||
tarDest := tarfile.NewDestination(sys, archive, ref.ref)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarDest,
|
||||
ref: ref,
|
||||
archive: archive,
|
||||
writer: writer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
|
||||
func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
return types.Decompress
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||
return d.ref
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *archiveImageDestination) Close() error {
|
||||
if d.writer != nil {
|
||||
return d.writer.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
if d.writer != nil {
|
||||
return d.archive.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
120
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
Normal file
120
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader manages a single Docker archive, allows listing its contents and accessing
|
||||
// individual images with less overhead than creating image references individually
|
||||
// (because the archive is, if necessary, copied or decompressed only once).
|
||||
type Reader struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Reader
|
||||
}
|
||||
|
||||
// NewReader returns a Reader for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewReader(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
archive, err := tarfile.NewReaderFromFile(sys, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Reader{
|
||||
path: path,
|
||||
archive: archive,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close deletes temporary files associated with the Reader, if any.
|
||||
func (r *Reader) Close() error {
|
||||
return r.archive.Close()
|
||||
}
|
||||
|
||||
// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport,
|
||||
// and a variant of imageReference that points at the same image within the reader.
|
||||
// The caller should call .Close() on the returned Reader.
|
||||
func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) {
|
||||
standalone, ok := ref.(archiveReference)
|
||||
if !ok {
|
||||
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
|
||||
}
|
||||
if standalone.archiveReader != nil {
|
||||
return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport())
|
||||
}
|
||||
reader, err := NewReader(sys, standalone.path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
reader.Close()
|
||||
}
|
||||
}()
|
||||
readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
succeeded = true
|
||||
return reader, readerRef, nil
|
||||
}
|
||||
|
||||
// List returns the a set of references for images in the Reader,
|
||||
// grouped by the image the references point to.
|
||||
// The references are valid only until the Reader is closed.
|
||||
func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||
res := [][]types.ImageReference{}
|
||||
for imageIndex, image := range r.archive.Manifest {
|
||||
refs := []types.ImageReference{}
|
||||
for _, tag := range image.RepoTags {
|
||||
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
|
||||
}
|
||||
nt, ok := parsedTag.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String())
|
||||
}
|
||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
res = append(res, refs)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings
|
||||
// (i.e. exposing the short names before normalization).
|
||||
// The function reports an error if ref does not identify a single image.
|
||||
// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned;
|
||||
// If ref contains a source index, or neither a NamedTagged nor a source index, all tags
|
||||
// matching the image are returned.
|
||||
// Almost all users should use List() or ImageReference.DockerReference() instead.
|
||||
func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) {
|
||||
archiveRef, ok := ref.(archiveReference)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref))
|
||||
}
|
||||
manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tagIndex != -1 {
|
||||
return []string{manifestItem.RepoTags[tagIndex]}, nil
|
||||
}
|
||||
return manifestItem.RepoTags, nil
|
||||
}
|
||||
42
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
Normal file
42
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
type archiveImageSource struct {
|
||||
*tarfile.Source // Implements most of types.ImageSource
|
||||
ref archiveReference
|
||||
}
|
||||
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
|
||||
var archive *tarfile.Reader
|
||||
var closeArchive bool
|
||||
if ref.archiveReader != nil {
|
||||
archive = ref.archiveReader
|
||||
closeArchive = false
|
||||
} else {
|
||||
a, err := tarfile.NewReaderFromFile(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive = a
|
||||
closeArchive = true
|
||||
}
|
||||
src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex)
|
||||
return &archiveImageSource{
|
||||
Source: src,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
func (s *archiveImageSource) Reference() types.ImageReference {
|
||||
return s.ref
|
||||
}
|
||||
211
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
Normal file
211
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
ctrImage "github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for local Docker archives.
|
||||
var Transport = archiveTransport{}
|
||||
|
||||
type archiveTransport struct{}
|
||||
|
||||
func (t archiveTransport) Name() string {
|
||||
return "docker-archive"
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
|
||||
func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||
return ParseReference(reference)
|
||||
}
|
||||
|
||||
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// See the explanation in archiveReference.PolicyConfigurationIdentity.
|
||||
return errors.New(`docker-archive: does not support any scopes except the default "" one`)
|
||||
}
|
||||
|
||||
// archiveReference is an ImageReference for Docker images.
|
||||
type archiveReference struct {
|
||||
path string
|
||||
// May be nil to read the only image in an archive, or to create an untagged image.
|
||||
ref reference.NamedTagged
|
||||
// If not -1, a zero-based index of the image in the manifest. Valid only for sources.
|
||||
// Must not be set if ref is set.
|
||||
sourceIndex int
|
||||
// If not nil, must have been created from path (but archiveReader.path may point at a temporary
|
||||
// file, not necessarily path precisely).
|
||||
archiveReader *tarfile.Reader
|
||||
// If not nil, must have been created for path
|
||||
archiveWriter *tarfile.Writer
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if refString == "" {
|
||||
return nil, errors.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
path := parts[0]
|
||||
var nt reference.NamedTagged
|
||||
sourceIndex := -1
|
||||
|
||||
if len(parts) == 2 {
|
||||
// A :tag or :@index was specified.
|
||||
if len(parts[1]) > 0 && parts[1][0] == '@' {
|
||||
i, err := strconv.Atoi(parts[1][1:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Invalid source index %s", parts[1])
|
||||
}
|
||||
if i < 0 {
|
||||
return nil, errors.Errorf("Invalid source index @%d: must not be negative", i)
|
||||
}
|
||||
sourceIndex = i
|
||||
} else {
|
||||
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "docker-archive parsing reference")
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
refTagged, isTagged := ref.(reference.NamedTagged)
|
||||
if !isTagged { // If ref contains a digest, TagNameOnly does not change it
|
||||
return nil, errors.Errorf("reference does not include a tag: %s", ref.String())
|
||||
}
|
||||
nt = refTagged
|
||||
}
|
||||
}
|
||||
|
||||
return newReference(path, nt, sourceIndex, nil, nil)
|
||||
}
|
||||
|
||||
// NewReference returns a Docker archive reference for a path and an optional reference.
|
||||
func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(path, ref, -1, nil, nil)
|
||||
}
|
||||
|
||||
// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index.
|
||||
func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) {
|
||||
return newReference(path, nil, sourceIndex, nil, nil)
|
||||
}
|
||||
|
||||
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
|
||||
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
|
||||
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
|
||||
if strings.Contains(path, ":") {
|
||||
return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
|
||||
}
|
||||
if ref != nil && sourceIndex != -1 {
|
||||
return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index")
|
||||
}
|
||||
if _, isDigest := ref.(reference.Canonical); isDigest {
|
||||
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String())
|
||||
}
|
||||
if sourceIndex != -1 && sourceIndex < 0 {
|
||||
return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex)
|
||||
}
|
||||
return archiveReference{
|
||||
path: path,
|
||||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
archiveReader: archiveReader,
|
||||
archiveWriter: archiveWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ref archiveReference) Transport() types.ImageTransport {
|
||||
return Transport
|
||||
}
|
||||
|
||||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref archiveReference) StringWithinTransport() string {
|
||||
switch {
|
||||
case ref.ref != nil:
|
||||
return fmt.Sprintf("%s:%s", ref.path, ref.ref.String())
|
||||
case ref.sourceIndex != -1:
|
||||
return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex)
|
||||
default:
|
||||
return ref.path
|
||||
}
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
|
||||
func (ref archiveReference) DockerReference() reference.Named {
|
||||
return ref.ref
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
|
||||
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
|
||||
// (i.e. various references with exactly the same semantics should return the same configuration identity)
|
||||
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
|
||||
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||
// Returns "" if configuration identities for these references are not supported.
|
||||
func (ref archiveReference) PolicyConfigurationIdentity() string {
|
||||
// Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
|
||||
return ""
|
||||
}
|
||||
|
||||
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
|
||||
// in order, terminating on first match, and an implicit "" is always checked at the end.
|
||||
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
|
||||
// and each following element to be a prefix of the element preceding it.
|
||||
func (ref archiveReference) PolicyConfigurationNamespaces() []string {
|
||||
// TODO
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
|
||||
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
|
||||
src, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ctrImage.FromSource(ctx, sys, src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(sys, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
// Not really supported, for safety reasons.
|
||||
return errors.New("Deleting images not implemented for docker-archive: images")
|
||||
}
|
||||
82
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
Normal file
82
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Writer manages a single in-progress Docker archive and allows adding images to it.
|
||||
type Writer struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
|
||||
fh, err := openArchiveForWriting(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
return &Writer{
|
||||
path: path,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
err := w.archive.Close()
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w.archive)
|
||||
}
|
||||
|
||||
// openArchiveForWriting opens path for writing a tar archive,
|
||||
// making a few sanity checks.
|
||||
func openArchiveForWriting(path string) (*os.File, error) {
|
||||
// path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
|
||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "opening file %q", path)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
fh.Close()
|
||||
}
|
||||
}()
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "statting file %q", path)
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return fh, nil
|
||||
}
|
||||
23
vendor/github.com/containers/image/v5/docker/cache.go
generated
vendored
Normal file
23
vendor/github.com/containers/image/v5/docker/cache.go
generated
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// bicTransportScope returns a BICTransportScope appropriate for ref.
|
||||
func bicTransportScope(ref dockerReference) types.BICTransportScope {
|
||||
// Blobs can be reused across the whole registry.
|
||||
return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
|
||||
}
|
||||
|
||||
// newBICLocationReference returns a BICLocationReference appropriate for ref.
|
||||
func newBICLocationReference(ref dockerReference) types.BICLocationReference {
|
||||
// Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
|
||||
return types.BICLocationReference{Opaque: ref.ref.Name()}
|
||||
}
|
||||
|
||||
// parseBICLocationReference returns a repository for encoded lr.
|
||||
func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(lr.Opaque)
|
||||
}
|
||||
828
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
Normal file
828
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
Normal file
|
|
@ -0,0 +1,828 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/image/v5/version"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
clientLib "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerHostname = "docker.io"
|
||||
dockerV1Hostname = "index.docker.io"
|
||||
dockerRegistry = "registry-1.docker.io"
|
||||
|
||||
resolvedPingV2URL = "%s://%s/v2/"
|
||||
resolvedPingV1URL = "%s://%s/v1/_ping"
|
||||
tagsPath = "/v2/%s/tags/list"
|
||||
manifestPath = "/v2/%s/manifests/%s"
|
||||
blobsPath = "/v2/%s/blobs/%s"
|
||||
blobUploadPath = "/v2/%s/blobs/uploads/"
|
||||
extensionsSignaturePath = "/extensions/v2/%s/signatures/%s"
|
||||
|
||||
minimumTokenLifetimeSeconds = 60
|
||||
|
||||
extensionSignatureSchemaVersion = 2 // extensionSignature.Version
|
||||
extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
|
||||
|
||||
backoffNumIterations = 5
|
||||
backoffInitialDelay = 2 * time.Second
|
||||
backoffMaxDelay = 60 * time.Second
|
||||
)
|
||||
|
||||
type certPath struct {
|
||||
path string
|
||||
absolute bool
|
||||
}
|
||||
|
||||
var (
|
||||
homeCertDir = filepath.FromSlash(".config/containers/certs.d")
|
||||
perHostCertDirs = []certPath{
|
||||
{path: "/etc/containers/certs.d", absolute: true},
|
||||
{path: "/etc/docker/certs.d", absolute: true},
|
||||
}
|
||||
|
||||
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
|
||||
)
|
||||
|
||||
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||
// signature represents a Docker image signature.
|
||||
type extensionSignature struct {
|
||||
Version int `json:"schemaVersion"` // Version specifies the schema version
|
||||
Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format
|
||||
Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1"
|
||||
Content []byte `json:"content"` // Content contains the signature
|
||||
}
|
||||
|
||||
// signatureList represents list of Docker image signatures.
|
||||
type extensionSignatureList struct {
|
||||
Signatures []extensionSignature `json:"signatures"`
|
||||
}
|
||||
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single container registry.
|
||||
type dockerClient struct {
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
userAgent string
|
||||
|
||||
// tlsClientConfig is setup by newDockerClient and will be used and updated
|
||||
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
|
||||
tlsClientConfig *tls.Config
|
||||
// The following members are not set by newDockerClient and must be set by callers if needed.
|
||||
auth types.DockerAuthConfig
|
||||
registryToken string
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
client *http.Client
|
||||
scheme string
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
|
||||
// Private state for setupRequestAuth (key: string, value: bearerToken)
|
||||
tokenCache sync.Map
|
||||
// Private state for detectProperties:
|
||||
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
|
||||
detectPropertiesError error // detectPropertiesError caches the initial error.
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
remoteName string
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
type sendAuth int
|
||||
|
||||
const (
|
||||
// v2 endpoint with authentication.
|
||||
v2Auth sendAuth = iota
|
||||
// v1 endpoint with authentication.
|
||||
// TODO: Get v1Auth working
|
||||
// v1Auth
|
||||
// no authentication, works for both v1 and v2.
|
||||
noAuth
|
||||
)
|
||||
|
||||
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
token := new(bearerToken)
|
||||
if err := json.Unmarshal(blob, &token); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if token.Token == "" {
|
||||
token.Token = token.AccessToken
|
||||
}
|
||||
if token.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
token.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
|
||||
}
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// this is cloned from docker/go-connections because upstream docker has changed
|
||||
// it and make deps here fails otherwise.
|
||||
// We'll drop this once we upgrade to docker 1.13.x deps.
|
||||
func serverDefault() *tls.Config {
|
||||
return &tls.Config{
|
||||
// Avoid fallback to SSL protocols < TLS1.0
|
||||
MinVersion: tls.VersionTLS10,
|
||||
PreferServerCipherSuites: true,
|
||||
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
|
||||
}
|
||||
}
|
||||
|
||||
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
|
||||
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
if sys != nil && sys.DockerCertPath != "" {
|
||||
return sys.DockerCertPath, nil
|
||||
}
|
||||
if sys != nil && sys.DockerPerHostCertDirPath != "" {
|
||||
return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil
|
||||
}
|
||||
|
||||
var (
|
||||
hostCertDir string
|
||||
fullCertDirPath string
|
||||
)
|
||||
|
||||
for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) {
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute {
|
||||
hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path)
|
||||
} else {
|
||||
hostCertDir = perHostCertDir.path
|
||||
}
|
||||
|
||||
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
|
||||
_, err := os.Stat(fullCertDirPath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return fullCertDirPath, nil
|
||||
}
|
||||
|
||||
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
// signatureBase is always set in the return value
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
auth, err := config.GetCredentialsForRef(sys, ref.ref)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "getting username and password")
|
||||
}
|
||||
|
||||
sigBase, err := SignatureStorageBaseURL(sys, ref, write)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registry := reference.Domain(ref.ref)
|
||||
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.auth = auth
|
||||
if sys != nil {
|
||||
client.registryToken = sys.DockerBearerRegistryToken
|
||||
}
|
||||
client.signatureBase = sigBase
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// newDockerClient returns a new dockerClient instance for the given registry
|
||||
// and reference. The reference is used to query the registry configuration
|
||||
// and can either be a registry (e.g, "registry.com[:5000]"), a repository
|
||||
// (e.g., "registry.com[:5000][/some/namespace]/repo").
|
||||
// Please note that newDockerClient does not set all members of dockerClient
|
||||
// (e.g., username and password); those must be set by callers if necessary.
|
||||
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
|
||||
hostName := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
tlsClientConfig := serverDefault()
|
||||
|
||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
|
||||
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
|
||||
// undocumented and may change if docker/docker changes.
|
||||
certDir, err := dockerCertDir(sys, hostName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if TLS verification shall be skipped (default=false) which can
|
||||
// be specified in the sysregistriesv2 configuration.
|
||||
skipVerify := false
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading registries")
|
||||
}
|
||||
if reg != nil {
|
||||
if reg.Blocked {
|
||||
return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys))
|
||||
}
|
||||
skipVerify = reg.Insecure
|
||||
}
|
||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
userAgent := defaultUserAgent
|
||||
if sys != nil && sys.DockerRegistryUserAgent != "" {
|
||||
userAgent = sys.DockerRegistryUserAgent
|
||||
}
|
||||
|
||||
return &dockerClient{
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
userAgent: userAgent,
|
||||
tlsClientConfig: tlsClientConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckAuth validates the credentials by attempting to log into the registry
|
||||
// returns an error if an error occurred while making the http request or the status code received was 401
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "creating new docker client")
|
||||
}
|
||||
client.auth = types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return httpResponseToError(resp, "")
|
||||
}
|
||||
|
||||
// SearchResult holds the information of each matching image
|
||||
// It matches the output returned by the v1 endpoint
|
||||
type SearchResult struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
// StarCount states the number of stars the image has
|
||||
StarCount int `json:"star_count"`
|
||||
IsTrusted bool `json:"is_trusted"`
|
||||
// IsAutomated states whether the image is an automated build
|
||||
IsAutomated bool `json:"is_automated"`
|
||||
// IsOfficial states whether the image is an official build
|
||||
IsOfficial bool `json:"is_official"`
|
||||
}
|
||||
|
||||
// SearchRegistry queries a registry for images that contain "image" in their name
|
||||
// The limit is the max number of results desired
|
||||
// Note: The limit value doesn't work with all registries
|
||||
// for example registry.access.redhat.com returns all the results without limiting it to the limit value
|
||||
func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
|
||||
type V2Results struct {
|
||||
// Repositories holds the results returned by the /v2/_catalog endpoint
|
||||
Repositories []string `json:"repositories"`
|
||||
}
|
||||
type V1Results struct {
|
||||
// Results holds the results returned by the /v1/search endpoint
|
||||
Results []SearchResult `json:"results"`
|
||||
}
|
||||
v1Res := &V1Results{}
|
||||
|
||||
// Get credentials from authfile for the underlying hostname
|
||||
// We can't use GetCredentialsForRef here because we want to search the whole registry.
|
||||
auth, err := config.GetCredentials(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "getting username and password")
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||
// the call made to that endpoint will fail. So using the v1 hostname
|
||||
// for docker.io for simplicity of implementation and the fact that it
|
||||
// returns search results.
|
||||
hostname := registry
|
||||
if registry == dockerHostname {
|
||||
hostname = dockerV1Hostname
|
||||
}
|
||||
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating new docker client")
|
||||
}
|
||||
client.auth = auth
|
||||
if sys != nil {
|
||||
client.registryToken = sys.DockerBearerRegistryToken
|
||||
}
|
||||
|
||||
// Only try the v1 search endpoint if the search query is not empty. If it is
|
||||
// empty skip to the v2 endpoint.
|
||||
if image != "" {
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, ""))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint")
|
||||
searchRes := []SearchResult{}
|
||||
path := "/v2/_catalog"
|
||||
for len(searchRes) < limit {
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := httpResponseToError(resp, "")
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
v2Res := &V2Results{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, repo := range v2Res.Repositories {
|
||||
if len(searchRes) == limit {
|
||||
break
|
||||
}
|
||||
if strings.Contains(repo, image) {
|
||||
res := SearchResult{
|
||||
Name: repo,
|
||||
}
|
||||
// bugzilla.redhat.com/show_bug.cgi?id=1976283
|
||||
// If we have a full match, make sure it's listed as the first result.
|
||||
// (Note there might be a full match we never see if we reach the result limit first.)
|
||||
if repo == image {
|
||||
searchRes = append([]SearchResult{res}, searchRes...)
|
||||
} else {
|
||||
searchRes = append(searchRes, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
link := resp.Header.Get("Link")
|
||||
if link == "" {
|
||||
break
|
||||
}
|
||||
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURL, err := url.Parse(linkURLStr)
|
||||
if err != nil {
|
||||
return searchRes, err
|
||||
}
|
||||
|
||||
// can be relative or absolute, but we only want the path (and I
|
||||
// guess we're in trouble if it forwards to a new place...)
|
||||
path = linkURL.Path
|
||||
if linkURL.RawQuery != "" {
|
||||
path += "?"
|
||||
path += linkURL.RawQuery
|
||||
}
|
||||
}
|
||||
return searchRes, nil
|
||||
}
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
url, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
|
||||
// silently falling back to fallbackDelay if the header is missing or invalid.
|
||||
func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
|
||||
after := res.Header.Get("Retry-After")
|
||||
if after == "" {
|
||||
return fallbackDelay
|
||||
}
|
||||
logrus.Debugf("Detected 'Retry-After' header %q", after)
|
||||
// First, check if we have a numerical value.
|
||||
if num, err := strconv.ParseInt(after, 10, 64); err == nil {
|
||||
return time.Duration(num) * time.Second
|
||||
}
|
||||
// Second, check if we have an HTTP date.
|
||||
// If the delta between the date and now is positive, use it.
|
||||
// Otherwise, fall back to using the default exponential back off.
|
||||
if t, err := http.ParseTime(after); err == nil {
|
||||
delta := time.Until(t)
|
||||
if delta > 0 {
|
||||
return delta
|
||||
}
|
||||
logrus.Debugf("Retry-After date in the past, ignoring it")
|
||||
return fallbackDelay
|
||||
}
|
||||
// If the header contents are bogus, fall back to using the default exponential back off.
|
||||
logrus.Debugf("Invalid Retry-After format, ignoring it")
|
||||
return fallbackDelay
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
delay := backoffInitialDelay
|
||||
attempts := 0
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
|
||||
attempts++
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
return res, err
|
||||
}
|
||||
// close response body before retry or context done
|
||||
res.Body.Close()
|
||||
|
||||
delay = parseRetryAfter(res, delay)
|
||||
if delay > backoffMaxDelay {
|
||||
delay = backoffMaxDelay
|
||||
}
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(delay):
|
||||
// Nothing
|
||||
}
|
||||
delay = delay * 2 // exponential back off
|
||||
}
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// Note that no exponential back off is performed when receiving an http 429 status code.
|
||||
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, url.String(), stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||
req.ContentLength = streamLen
|
||||
}
|
||||
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
|
||||
for n, h := range headers {
|
||||
for _, hh := range h {
|
||||
req.Header.Add(n, hh)
|
||||
}
|
||||
}
|
||||
req.Header.Add("User-Agent", c.userAgent)
|
||||
if auth == v2Auth {
|
||||
if err := c.setupRequestAuth(req, extraScope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
logrus.Debugf("%s %s", method, url.Redacted())
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// we're using the challenges from the /v2/ ping response and not the one from the destination
|
||||
// URL in this request because:
|
||||
//
|
||||
// 1) docker does that as well
|
||||
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
|
||||
//
|
||||
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error {
|
||||
if len(c.challenges) == 0 {
|
||||
return nil
|
||||
}
|
||||
schemeNames := make([]string, 0, len(c.challenges))
|
||||
for _, challenge := range c.challenges {
|
||||
schemeNames = append(schemeNames, challenge.Scheme)
|
||||
switch challenge.Scheme {
|
||||
case "basic":
|
||||
req.SetBasicAuth(c.auth.Username, c.auth.Password)
|
||||
return nil
|
||||
case "bearer":
|
||||
registryToken := c.registryToken
|
||||
if registryToken == "" {
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
t, inCache := c.tokenCache.Load(cacheKey)
|
||||
if inCache {
|
||||
token = t.(bearerToken)
|
||||
}
|
||||
if !inCache || time.Now().After(token.expirationTime) {
|
||||
var (
|
||||
t *bearerToken
|
||||
err error
|
||||
)
|
||||
if c.auth.IdentityToken != "" {
|
||||
t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes)
|
||||
} else {
|
||||
t, err = c.getBearerToken(req.Context(), challenge, scopes)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token = *t
|
||||
c.tokenCache.Store(cacheKey, token)
|
||||
}
|
||||
registryToken = token.Token
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
|
||||
return nil
|
||||
default:
|
||||
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
|
||||
}
|
||||
}
|
||||
logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge,
|
||||
scopes []authScope) (*bearerToken, error) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make the form data required against the oauth2 authentication
|
||||
// More details here: https://docs.docker.com/registry/spec/auth/oauth/
|
||||
params := authReq.URL.Query()
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
params.Add("service", service)
|
||||
}
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
params.Add("grant_type", "refresh_token")
|
||||
params.Add("refresh_token", c.auth.IdentityToken)
|
||||
params.Add("client_id", "containers/image")
|
||||
|
||||
authReq.Body = io.NopCloser(strings.NewReader(params.Encode()))
|
||||
authReq.Header.Add("User-Agent", c.userAgent)
|
||||
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
|
||||
res, err := c.client.Do(authReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := httpResponseToError(res, "Trying to obtain access token"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
scopes []authScope) (*bearerToken, error) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := authReq.URL.Query()
|
||||
if c.auth.Username != "" {
|
||||
params.Add("account", c.auth.Username)
|
||||
}
|
||||
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
params.Add("service", service)
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
|
||||
authReq.URL.RawQuery = params.Encode()
|
||||
|
||||
if c.auth.Username != "" && c.auth.Password != "" {
|
||||
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
|
||||
}
|
||||
authReq.Header.Add("User-Agent", c.userAgent)
|
||||
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
|
||||
res, err := c.client.Do(authReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
}
|
||||
|
||||
// detectPropertiesHelper performs the work of detectProperties which executes
|
||||
// it at most once.
|
||||
func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
// We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
|
||||
// specified by the system context
|
||||
if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
|
||||
c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = c.tlsClientConfig
|
||||
c.client = &http.Client{Transport: tr}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return httpResponseToError(resp, "")
|
||||
}
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
|
||||
return nil
|
||||
}
|
||||
err := ping("https")
|
||||
if err != nil && c.tlsClientConfig.InsecureSkipVerify {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "pinging container registry %s", c.registry)
|
||||
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
||||
return err
|
||||
}
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
isV1 := pingV1("https")
|
||||
if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
|
||||
isV1 = pingV1("http")
|
||||
}
|
||||
if isV1 {
|
||||
err = ErrV1NotSupported
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
|
||||
return c.detectPropertiesError
|
||||
}
|
||||
|
||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||
}
|
||||
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parsedBody extensionSignatureList
|
||||
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
||||
return nil, errors.Wrapf(err, "decoding signature list")
|
||||
}
|
||||
return &parsedBody, nil
|
||||
}
|
||||
153
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
Normal file
153
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
|
||||
// which are specific to Docker.
|
||||
type Image struct {
|
||||
types.ImageCloser
|
||||
src *dockerImageSource
|
||||
}
|
||||
|
||||
// newImage returns a new Image interface type after setting up
|
||||
// a client to the registry hosting the given image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
|
||||
s, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img, err := image.FromSource(ctx, sys, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Image{ImageCloser: img, src: s}, nil
|
||||
}
|
||||
|
||||
// SourceRefFullName returns a fully expanded name for the repository this image is in.
|
||||
func (i *Image) SourceRefFullName() string {
|
||||
return i.src.logicalRef.ref.Name()
|
||||
}
|
||||
|
||||
// GetRepositoryTags list all tags available in the repository. The tag
|
||||
// provided inside the ImageReference will be ignored. (This is a
|
||||
// backward-compatible shim method which calls the module-level
|
||||
// GetRepositoryTags)
|
||||
func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
|
||||
return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef)
|
||||
}
|
||||
|
||||
// GetRepositoryTags list all tags available in the repository. The tag
|
||||
// provided inside the ImageReference will be ignored.
|
||||
func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) {
|
||||
dr, ok := ref.(dockerReference)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("ref must be a dockerReference")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(tagsPath, reference.Path(dr.ref))
|
||||
client, err := newDockerClientFromRef(sys, dr, false, "pull")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create client")
|
||||
}
|
||||
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := httpResponseToError(res, "fetching tags list"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tagsHolder struct {
|
||||
Tags []string
|
||||
}
|
||||
if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, tagsHolder.Tags...)
|
||||
|
||||
link := res.Header.Get("Link")
|
||||
if link == "" {
|
||||
break
|
||||
}
|
||||
|
||||
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURL, err := url.Parse(linkURLStr)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
// can be relative or absolute, but we only want the path (and I
|
||||
// guess we're in trouble if it forwards to a new place...)
|
||||
path = linkURL.Path
|
||||
if linkURL.RawQuery != "" {
|
||||
path += "?"
|
||||
path += linkURL.RawQuery
|
||||
}
|
||||
}
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// GetDigest returns the image's digest
|
||||
// Use this to optimize and avoid use of an ImageSource based on the returned digest;
|
||||
// if you are going to use an ImageSource anyway, it’s more efficient to create it first
|
||||
// and compute the digest from the value returned by GetManifest.
|
||||
// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be
|
||||
// ignored (but may be implemented in the future)
|
||||
func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) {
|
||||
dr, ok := ref.(dockerReference)
|
||||
if !ok {
|
||||
return "", errors.Errorf("ref must be a dockerReference")
|
||||
}
|
||||
|
||||
tagOrDigest, err := dr.tagOrDigest()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(sys, dr, false, "pull")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create client")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
|
||||
headers := map[string][]string{
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
}
|
||||
|
||||
res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", errors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
||||
}
|
||||
|
||||
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dig, nil
|
||||
}
|
||||
704
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
Normal file
704
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,704 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/streamdigest"
|
||||
"github.com/containers/image/v5/internal/uploadreader"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type dockerImageDestination struct {
|
||||
ref dockerReference
|
||||
c *dockerClient
|
||||
// State
|
||||
manifestDigest digest.Digest // or "" if not yet known.
|
||||
}
|
||||
|
||||
// newImageDestination creates a new ImageDestination for the specified image reference.
|
||||
func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerImageDestination{
|
||||
ref: ref,
|
||||
c: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (d *dockerImageDestination) Reference() types.ImageReference {
|
||||
return d.ref
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *dockerImageDestination) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||
mimeTypes := []string{
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
imgspecv1.MediaTypeImageIndex,
|
||||
manifest.DockerV2ListMediaType,
|
||||
}
|
||||
if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes {
|
||||
mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
|
||||
}
|
||||
return mimeTypes
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
|
||||
if err := d.c.detectProperties(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case d.c.supportsSignatures:
|
||||
return nil
|
||||
case d.c.signatureBase != nil:
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
return types.Compress
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
|
||||
}
|
||||
|
||||
// sizeCounter is an io.Writer which only counts the total size of its input.
|
||||
type sizeCounter struct{ size int64 }
|
||||
|
||||
func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
c.size += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
||||
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
||||
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
||||
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
|
||||
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer cleanup()
|
||||
stream = streamCopy
|
||||
}
|
||||
|
||||
if inputInfo.Digest != "" {
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, cache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return reusedInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "determining upload URL")
|
||||
}
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
sizeCounter := &sizeCounter{}
|
||||
stream = io.TeeReader(stream, sizeCounter)
|
||||
|
||||
uploadLocation, err = func() (*url.URL, error) { // A scope for defer
|
||||
uploadReader := uploadreader.NewUploadReader(stream)
|
||||
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
|
||||
// returns, so there isn’t a way for the error text to be provided to any of our callers.
|
||||
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
return nil, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked")
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "determining upload URL")
|
||||
}
|
||||
return uploadLocation, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
blobDigest := digester.Digest()
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
|
||||
|
||||
locationQuery := uploadLocation.Query()
|
||||
locationQuery.Set("digest", blobDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", blobDigest)
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
|
||||
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
logrus.Debugf("... already exists")
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
default:
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
|
||||
u := url.URL{
|
||||
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
|
||||
RawQuery: url.Values{
|
||||
"mount": {srcDigest.String()},
|
||||
"from": {reference.Path(srcRepo)},
|
||||
}.Encode(),
|
||||
}
|
||||
logrus.Debugf("Trying to mount %s", u.Redacted())
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusCreated:
|
||||
logrus.Debugf("... mount OK")
|
||||
return nil
|
||||
case http.StatusAccepted:
|
||||
// Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
|
||||
// Abort, and let the ultimate caller do an upload when its ready, instead.
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
defer res2.Body.Close()
|
||||
if res2.StatusCode != http.StatusNoContent {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
// Anyway, if canceling the upload fails, ignore it and return the more important error:
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
|
||||
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
|
||||
// The caller must ensure info.Digest is set.
|
||||
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (bool, types.BlobInfo, error) {
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, cache)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
bic := blobinfocache.FromBlobInfoCache(cache)
|
||||
candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
|
||||
for _, candidate := range candidates {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
}
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
// So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
|
||||
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||
// so, be a nice client and don't create unnecessary upload sessions on the server.
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
// FIXME? Should we drop the blob from cache here (and elsewhere?)?
|
||||
continue // logrus.Debug() already happened in blobExists
|
||||
}
|
||||
if candidateRepo.Name() != d.ref.ref.Name() {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
|
||||
logrus.Debugf("... Mount failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
|
||||
}
|
||||
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list
|
||||
// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the
|
||||
// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
|
||||
refTail := ""
|
||||
if instanceDigest != nil {
|
||||
// If the instanceDigest is provided, then use it as the refTail, because the reference,
|
||||
// whether it includes a tag or a digest, refers to the list as a whole, and not this
|
||||
// particular instance.
|
||||
refTail = instanceDigest.String()
|
||||
// Double-check that the manifest we've been given matches the digest we've been given.
|
||||
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "digesting manifest in PutManifest")
|
||||
}
|
||||
if !matches {
|
||||
manifestDigest, merr := manifest.Digest(m)
|
||||
if merr != nil {
|
||||
return errors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr)
|
||||
}
|
||||
return errors.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
|
||||
}
|
||||
} else {
|
||||
// Compute the digest of the main manifest, or the list if it's a list, so that we
|
||||
// have a digest value to use if we're asked to save a signature for the manifest.
|
||||
digest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.manifestDigest = digest
|
||||
// The refTail should be either a digest (which we expect to match the value we just
|
||||
// computed) or a tag name.
|
||||
refTail, err = d.ref.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
|
||||
|
||||
headers := map[string][]string{}
|
||||
mimeType := manifest.GuessMIMEType(m)
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
rawErr := registryHTTPResponseToError(res)
|
||||
err := errors.Wrapf(rawErr, "uploading manifest %s to %s", refTail, d.ref.ref.Name())
|
||||
if isManifestInvalidError(rawErr) {
|
||||
err = types.ManifestTypeRejectedError{Err: err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
// A HTTP server may not be a registry at all, and just return 200 OK to everything
|
||||
// (in particular that can fairly easily happen after tearing down a website and
|
||||
// replacing it with a global 302 redirect to a new website, completely ignoring the
|
||||
// path in the request); in that case we could “succeed” uploading a whole image.
|
||||
// With docker/distribution we could rely on a Docker-Content-Digest header being present
|
||||
// (because docker/distribution/registry/client has been failing uploads if it was missing),
|
||||
// but that has been defined as explicitly optional by
|
||||
// https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers
|
||||
// So, just note the missing header in a debug log.
|
||||
if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 {
|
||||
logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// successStatus returns true if the argument is a successful HTTP response
|
||||
// code (in the range 200 - 399 inclusive).
|
||||
func successStatus(status int) bool {
|
||||
return status >= 200 && status <= 399
|
||||
}
|
||||
|
||||
// isManifestInvalidError returns true iff err from client.HandleErrorResponse is a “manifest invalid” error.
|
||||
func isManifestInvalidError(err error) bool {
|
||||
errors, ok := err.(errcode.Errors)
|
||||
if !ok || len(errors) == 0 {
|
||||
return false
|
||||
}
|
||||
err = errors[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
switch ec.ErrorCode() {
|
||||
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
|
||||
case v2.ErrorCodeManifestInvalid:
|
||||
return true
|
||||
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
|
||||
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
|
||||
case v2.ErrorCodeTagInvalid:
|
||||
return true
|
||||
// ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when
|
||||
// uploading an OCI manifest that is (correctly, according to the spec) missing
|
||||
// a top-level media type. See libpod issue #1719
|
||||
// FIXME: remove this case when ECR behavior is fixed
|
||||
case errcode.ErrorCodeUnsupported:
|
||||
return strings.Contains(err.Error(), "Invalid JSON syntax")
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// PutSignatures uploads a set of signatures to the relevant lookaside or API extension point.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to upload the signatures for (when
|
||||
// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
|
||||
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
// Do not fail if we don’t really need to support signatures.
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
if instanceDigest == nil {
|
||||
if d.manifestDigest == "" {
|
||||
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
||||
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
instanceDigest = &d.manifestDigest
|
||||
}
|
||||
|
||||
if err := d.c.detectProperties(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case d.c.supportsSignatures:
|
||||
return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
|
||||
case d.c.signatureBase != nil:
|
||||
return d.putSignaturesToLookaside(signatures, *instanceDigest)
|
||||
default:
|
||||
return errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||
}
|
||||
}
|
||||
|
||||
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil, for a manifest with manifestDigest.
|
||||
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
|
||||
// FIXME? This overwrites files one at a time, definitely not atomic.
|
||||
// A failure when updating signatures with a reordered copy could lose some of them.
|
||||
|
||||
// Skip dealing with the manifest digest if not necessary.
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
for i, signature := range signatures {
|
||||
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
err := d.putOneSignature(url, signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Remove any other signatures, if present.
|
||||
// We stop at the first missing signature; if a previous deleting loop aborted
|
||||
// prematurely, this may not clean up all of them, but one missing signature
|
||||
// is enough for dockerImageSource to stop looking for other signatures, so that
|
||||
// is sufficient.
|
||||
for i := len(signatures); ; i++ {
|
||||
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
missing, err := d.c.deleteOneSignature(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if missing {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// putOneSignature stores one signature to url.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Writing to %s", url.Path)
|
||||
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(url.Path, signature, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case "http", "https":
|
||||
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted())
|
||||
default:
|
||||
return errors.Errorf("Unsupported scheme when writing signature to %s", url.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
// deleteOneSignature deletes a signature from url, if it exists.
|
||||
// If it successfully determines that the signature does not exist, returns (true, nil)
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Deleting %s", url.Path)
|
||||
err := os.Remove(url.Path)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
|
||||
case "http", "https":
|
||||
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted())
|
||||
default:
|
||||
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
|
||||
// for a manifest with manifestDigest.
|
||||
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
|
||||
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||
// always adds signatures. Eventually we should also allow removing signatures,
|
||||
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
||||
|
||||
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingSigNames := map[string]struct{}{}
|
||||
for _, sig := range existingSignatures.Signatures {
|
||||
existingSigNames[sig.Name] = struct{}{}
|
||||
}
|
||||
|
||||
sigExists:
|
||||
for _, newSig := range signatures {
|
||||
for _, existingSig := range existingSignatures.Signatures {
|
||||
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
|
||||
continue sigExists
|
||||
}
|
||||
}
|
||||
|
||||
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
|
||||
var signatureName string
|
||||
for {
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return errors.Wrapf(err, "generating random signature len %d", n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
sig := extensionSignature{
|
||||
Version: extensionSignatureSchemaVersion,
|
||||
Name: signatureName,
|
||||
Type: extensionSignatureTypeAtomic,
|
||||
Content: newSig,
|
||||
}
|
||||
body, err := json.Marshal(sig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||
return errors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list
|
||||
// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the
|
||||
// original manifest list digest, if desired.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *dockerImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
return nil
|
||||
}
|
||||
771
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
Normal file
771
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
Normal file
|
|
@ -0,0 +1,771 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type dockerImageSource struct {
|
||||
logicalRef dockerReference // The reference the user requested.
|
||||
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
|
||||
c *dockerClient
|
||||
// State
|
||||
cachedManifest []byte // nil if not loaded yet
|
||||
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||
}
|
||||
|
||||
// newImageSource creates a new ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading registries configuration")
|
||||
}
|
||||
if registry == nil {
|
||||
// No configuration was found for the provided reference, so use the
|
||||
// equivalent of a default configuration.
|
||||
registry = &sysregistriesv2.Registry{
|
||||
Endpoint: sysregistriesv2.Endpoint{
|
||||
Location: ref.ref.String(),
|
||||
},
|
||||
Prefix: ref.ref.String(),
|
||||
}
|
||||
}
|
||||
|
||||
// Check all endpoints for the manifest availability. If we find one that does
|
||||
// contain the image, it will be used for all future pull actions. Always try the
|
||||
// non-mirror original location last; this both transparently handles the case
|
||||
// of no mirrors configured, and ensures we return the error encountered when
|
||||
// accessing the upstream location if all endpoints fail.
|
||||
pullSources, err := registry.PullSourcesFromReference(ref.ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type attempt struct {
|
||||
ref reference.Named
|
||||
err error
|
||||
}
|
||||
attempts := []attempt{}
|
||||
for _, pullSource := range pullSources {
|
||||
if sys != nil && sys.DockerLogMirrorChoice {
|
||||
logrus.Infof("Trying to access %q", pullSource.Reference)
|
||||
} else {
|
||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||
}
|
||||
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
||||
if err == nil {
|
||||
return s, nil
|
||||
}
|
||||
logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err)
|
||||
attempts = append(attempts, attempt{
|
||||
ref: pullSource.Reference,
|
||||
err: err,
|
||||
})
|
||||
}
|
||||
switch len(attempts) {
|
||||
case 0:
|
||||
return nil, errors.New("Internal error: newImageSource returned without trying any endpoint")
|
||||
case 1:
|
||||
return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise.
|
||||
default:
|
||||
// Don’t just build a string, try to preserve the typed error.
|
||||
primary := &attempts[len(attempts)-1]
|
||||
extras := []string{}
|
||||
for i := 0; i < len(attempts)-1; i++ {
|
||||
// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
|
||||
// The paired [] at least have some chance of being unambiguous.
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
|
||||
}
|
||||
return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String())
|
||||
}
|
||||
}
|
||||
|
||||
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
|
||||
// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
|
||||
physicalRef, err := newReference(pullSource.Reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
copy.DockerBearerRegistryToken = ""
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
|
||||
|
||||
s := &dockerImageSource{
|
||||
logicalRef: logicalRef,
|
||||
physicalRef: physicalRef,
|
||||
c: client,
|
||||
}
|
||||
|
||||
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
func (s *dockerImageSource) Reference() types.ImageReference {
|
||||
return s.logicalRef
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *dockerImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
|
||||
func (s *dockerImageSource) SupportsGetBlobAt() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
|
||||
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
|
||||
// to read the image's layers.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
// (e.g. if the source never returns manifest lists).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (s *dockerImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
||||
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
|
||||
func simplifyContentType(contentType string) string {
|
||||
if contentType == "" {
|
||||
return contentType
|
||||
}
|
||||
mimeType, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||
func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil {
|
||||
return s.fetchManifest(ctx, instanceDigest.String())
|
||||
}
|
||||
err := s.ensureManifestIsLoaded(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
|
||||
headers := map[string][]string{
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
}
|
||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||
}
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
|
||||
}
|
||||
|
||||
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
|
||||
//
|
||||
// ImageSource implementations are not required or expected to do any caching,
|
||||
// but because our signatures are “attached” to the manifest digest,
|
||||
// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil)
|
||||
// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious
|
||||
// signature verification failures when pulling while a tag is being updated.
|
||||
func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
|
||||
if s.cachedManifest != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
reference, err := s.physicalRef.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manblob, mt, err := s.fetchManifest(ctx, reference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
|
||||
s.cachedManifest = manblob
|
||||
s.cachedManifestMIMEType = mt
|
||||
return nil
|
||||
}
|
||||
|
||||
// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
|
||||
// This function can return nil reader when no url is supported by this function. In this case, the caller
|
||||
// should fallback to fetch the non-external blob (i.e. pull from the registry).
|
||||
func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
var (
|
||||
resp *http.Response
|
||||
err error
|
||||
)
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
for _, u := range urls {
|
||||
url, err := url.Parse(u)
|
||||
if err != nil || (url.Scheme != "http" && url.Scheme != "https") {
|
||||
continue // unsupported url. skip this url.
|
||||
}
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
size = -1
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
|
||||
func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
|
||||
defer close(streams)
|
||||
defer close(errs)
|
||||
currentOffset := uint64(0)
|
||||
|
||||
body = makeBufferedNetworkReader(body, 64, 16384)
|
||||
defer body.Close()
|
||||
for _, c := range chunks {
|
||||
if c.Offset != currentOffset {
|
||||
if c.Offset < currentOffset {
|
||||
errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset)
|
||||
break
|
||||
}
|
||||
toSkip := c.Offset - currentOffset
|
||||
if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
|
||||
errs <- err
|
||||
break
|
||||
}
|
||||
currentOffset += toSkip
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan interface{}),
|
||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
consumeStream: true,
|
||||
}
|
||||
streams <- s
|
||||
|
||||
// Wait until the stream is closed before going to the next chunk
|
||||
<-s.closed
|
||||
currentOffset += c.Length
|
||||
}
|
||||
}
|
||||
|
||||
// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
|
||||
func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) {
|
||||
defer close(streams)
|
||||
defer close(errs)
|
||||
if !strings.HasPrefix(mediaType, "multipart/") {
|
||||
streams <- body
|
||||
return
|
||||
}
|
||||
boundary, found := params["boundary"]
|
||||
if !found {
|
||||
errs <- errors.Errorf("could not find boundary")
|
||||
body.Close()
|
||||
return
|
||||
}
|
||||
buffered := makeBufferedNetworkReader(body, 64, 16384)
|
||||
defer buffered.Close()
|
||||
mr := multipart.NewReader(buffered, boundary)
|
||||
for {
|
||||
p, err := mr.NextPart()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
errs <- err
|
||||
}
|
||||
return
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan interface{}),
|
||||
stream: p,
|
||||
}
|
||||
streams <- s
|
||||
// NextPart() cannot be called while the current part
|
||||
// is being read, so wait until it is closed
|
||||
<-s.closed
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
headers := make(map[string][]string)
|
||||
|
||||
var rangeVals []string
|
||||
for _, c := range chunks {
|
||||
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
|
||||
}
|
||||
|
||||
headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))}
|
||||
|
||||
if len(info.URLs) != 0 {
|
||||
return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
// if the server replied with a 200 status code, convert the full body response to a series of
|
||||
// streams as it would have been done with 206.
|
||||
streams := make(chan io.ReadCloser)
|
||||
errs := make(chan error)
|
||||
go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks)
|
||||
return streams, errs, nil
|
||||
case http.StatusPartialContent:
|
||||
mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
streams := make(chan io.ReadCloser)
|
||||
errs := make(chan error)
|
||||
|
||||
go handle206Response(streams, errs, res.Body, chunks, mediaType, params)
|
||||
return streams, errs, nil
|
||||
case http.StatusBadRequest:
|
||||
res.Body.Close()
|
||||
return nil, nil, private.BadPartialRequestError{Status: res.Status}
|
||||
default:
|
||||
err := httpResponseToError(res, "Error fetching partial blob")
|
||||
if err == nil {
|
||||
err = errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
res.Body.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
r, s, err := s.getExternalBlob(ctx, info.URLs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
} else if r != nil {
|
||||
return r, s, nil
|
||||
}
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
|
||||
res.Body.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
// (e.g. if the source never returns manifest lists).
|
||||
func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
if err := s.c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case s.c.supportsSignatures:
|
||||
return s.getSignaturesFromAPIExtension(ctx, instanceDigest)
|
||||
case s.c.signatureBase != nil:
|
||||
return s.getSignaturesFromLookaside(ctx, instanceDigest)
|
||||
default:
|
||||
return nil, errors.Errorf("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||
}
|
||||
}
|
||||
|
||||
// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference,
|
||||
// or finally, from a fetched manifest.
|
||||
func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) {
|
||||
if instanceDigest != nil {
|
||||
return *instanceDigest, nil
|
||||
}
|
||||
if digested, ok := s.physicalRef.ref.(reference.Digested); ok {
|
||||
d := digested.Digest()
|
||||
if d.Algorithm() == digest.Canonical {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return manifest.Digest(s.cachedManifest)
|
||||
}
|
||||
|
||||
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
signature, missing, err := s.getOneSignature(ctx, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if missing {
|
||||
break
|
||||
}
|
||||
signatures = append(signatures, signature)
|
||||
}
|
||||
return signatures, nil
|
||||
}
|
||||
|
||||
// getOneSignature downloads one signature from url.
|
||||
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Reading %s", url.Path)
|
||||
sig, err := os.ReadFile(url.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return sig, false, nil
|
||||
|
||||
case "http", "https":
|
||||
logrus.Debugf("GET %s", url.Redacted())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
res, err := s.c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return sig, false, nil
|
||||
|
||||
default:
|
||||
return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||
func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
for _, sig := range parsedBody.Signatures {
|
||||
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
|
||||
sigs = append(sigs, sig.Content)
|
||||
}
|
||||
}
|
||||
return sigs, nil
|
||||
}
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
|
||||
// docker/distribution does not document what action should be used for deleting images.
|
||||
//
|
||||
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
|
||||
// quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included.
|
||||
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
|
||||
//
|
||||
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string][]string{
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
}
|
||||
refTail, err := ref.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer get.Body.Close()
|
||||
manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch get.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusNotFound:
|
||||
return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
|
||||
default:
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
}
|
||||
|
||||
manifestDigest, err := manifest.Digest(manifestBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing manifest digest: %w", err)
|
||||
}
|
||||
deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest)
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer delete.Body.Close()
|
||||
|
||||
body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if delete.StatusCode != http.StatusAccepted {
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
||||
missing, err := c.deleteOneSignature(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if missing {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type bufferedNetworkReaderBuffer struct {
|
||||
data []byte
|
||||
len int
|
||||
consumed int
|
||||
err error
|
||||
}
|
||||
|
||||
type bufferedNetworkReader struct {
|
||||
stream io.ReadCloser
|
||||
emptyBuffer chan *bufferedNetworkReaderBuffer
|
||||
readyBuffer chan *bufferedNetworkReaderBuffer
|
||||
terminate chan bool
|
||||
current *bufferedNetworkReaderBuffer
|
||||
mutex sync.Mutex
|
||||
gotEOF bool
|
||||
}
|
||||
|
||||
// handleBufferedNetworkReader runs in a goroutine
|
||||
func handleBufferedNetworkReader(br *bufferedNetworkReader) {
|
||||
defer close(br.readyBuffer)
|
||||
for {
|
||||
select {
|
||||
case b := <-br.emptyBuffer:
|
||||
b.len, b.err = br.stream.Read(b.data)
|
||||
br.readyBuffer <- b
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
case <-br.terminate:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *bufferedNetworkReader) Close() error {
|
||||
close(n.terminate)
|
||||
close(n.emptyBuffer)
|
||||
return n.stream.Close()
|
||||
}
|
||||
|
||||
func (n *bufferedNetworkReader) read(p []byte) (int, error) {
|
||||
if n.current != nil {
|
||||
copied := copy(p, n.current.data[n.current.consumed:n.current.len])
|
||||
n.current.consumed += copied
|
||||
if n.current.consumed == n.current.len {
|
||||
n.emptyBuffer <- n.current
|
||||
n.current = nil
|
||||
}
|
||||
if copied > 0 {
|
||||
return copied, nil
|
||||
}
|
||||
}
|
||||
if n.gotEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var b *bufferedNetworkReaderBuffer
|
||||
|
||||
select {
|
||||
case b = <-n.readyBuffer:
|
||||
if b.err != nil {
|
||||
if b.err != io.EOF {
|
||||
return b.len, b.err
|
||||
}
|
||||
n.gotEOF = true
|
||||
}
|
||||
b.consumed = 0
|
||||
n.current = b
|
||||
return n.read(p)
|
||||
case <-n.terminate:
|
||||
return 0, io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
func (n *bufferedNetworkReader) Read(p []byte) (int, error) {
|
||||
n.mutex.Lock()
|
||||
defer n.mutex.Unlock()
|
||||
|
||||
return n.read(p)
|
||||
}
|
||||
|
||||
func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader {
|
||||
br := bufferedNetworkReader{
|
||||
stream: stream,
|
||||
emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
|
||||
readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers),
|
||||
terminate: make(chan bool),
|
||||
}
|
||||
|
||||
go func() {
|
||||
handleBufferedNetworkReader(&br)
|
||||
}()
|
||||
|
||||
for i := uint(0); i < nBuffers; i++ {
|
||||
b := bufferedNetworkReaderBuffer{
|
||||
data: make([]byte, bufferSize),
|
||||
}
|
||||
br.emptyBuffer <- &b
|
||||
}
|
||||
|
||||
return &br
|
||||
}
|
||||
|
||||
type signalCloseReader struct {
|
||||
closed chan interface{}
|
||||
stream io.ReadCloser
|
||||
consumeStream bool
|
||||
}
|
||||
|
||||
func (s signalCloseReader) Read(p []byte) (int, error) {
|
||||
return s.stream.Read(p)
|
||||
}
|
||||
|
||||
func (s signalCloseReader) Close() error {
|
||||
defer close(s.closed)
|
||||
if s.consumeStream {
|
||||
if _, err := io.Copy(io.Discard, s.stream); err != nil {
|
||||
s.stream.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.stream.Close()
|
||||
}
|
||||
168
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
Normal file
168
vendor/github.com/containers/image/v5/docker/docker_transport.go
generated
vendored
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/policyconfiguration"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for container registry-hosted images.
|
||||
var Transport = dockerTransport{}
|
||||
|
||||
type dockerTransport struct{}
|
||||
|
||||
func (t dockerTransport) Name() string {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
|
||||
func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||
return ParseReference(reference)
|
||||
}
|
||||
|
||||
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// FIXME? We could be verifying the various character set and length restrictions
|
||||
// from docker/distribution/reference.regexp.go, but other than that there
|
||||
// are few semantically invalid strings.
|
||||
return nil
|
||||
}
|
||||
|
||||
// dockerReference is an ImageReference for Docker images.
|
||||
type dockerReference struct {
|
||||
ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, "//") {
|
||||
return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
|
||||
}
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
return NewReference(ref)
|
||||
}
|
||||
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
return newReference(ref)
|
||||
}
|
||||
|
||||
// newReference returns a dockerReference for a named reference.
|
||||
func newReference(ref reference.Named) (dockerReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
|
||||
// tag and digest), so fail. This MAY be accepted in the future.
|
||||
// (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
|
||||
// the tag or the digest first?)
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
}
|
||||
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ref dockerReference) Transport() types.ImageTransport {
|
||||
return Transport
|
||||
}
|
||||
|
||||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref dockerReference) StringWithinTransport() string {
|
||||
return "//" + reference.FamiliarString(ref.ref)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
|
||||
func (ref dockerReference) DockerReference() reference.Named {
|
||||
return ref.ref
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
|
||||
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
|
||||
// (i.e. various references with exactly the same semantics should return the same configuration identity)
|
||||
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
|
||||
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||
// Returns "" if configuration identities for these references are not supported.
|
||||
func (ref dockerReference) PolicyConfigurationIdentity() string {
|
||||
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
|
||||
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
|
||||
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
|
||||
// in order, terminating on first match, and an implicit "" is always checked at the end.
|
||||
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
|
||||
// and each following element to be a prefix of the element preceding it.
|
||||
func (ref dockerReference) PolicyConfigurationNamespaces() []string {
|
||||
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
|
||||
}
|
||||
|
||||
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
|
||||
func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
|
||||
return newImage(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(sys, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
return deleteImage(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// tagOrDigest returns a tag or digest from the reference.
|
||||
func (ref dockerReference) tagOrDigest() (string, error) {
|
||||
if ref, ok := ref.ref.(reference.Canonical); ok {
|
||||
return ref.Digest().String(), nil
|
||||
}
|
||||
if ref, ok := ref.ref.(reference.NamedTagged); ok {
|
||||
return ref.Tag(), nil
|
||||
}
|
||||
// This should not happen, NewReference above refuses reference.IsNameOnly values.
|
||||
return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
|
||||
}
|
||||
61
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
Normal file
61
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/client"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||
// docker V1 registry.
|
||||
ErrV1NotSupported = errors.New("can't talk to a V1 container registry")
|
||||
// ErrTooManyRequests is returned when the status code returned is 429
|
||||
ErrTooManyRequests = errors.New("too many requests to registry")
|
||||
)
|
||||
|
||||
// ErrUnauthorizedForCredentials is returned when the status code returned is 401
|
||||
type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise.
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e ErrUnauthorizedForCredentials) Error() string {
|
||||
return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error())
|
||||
}
|
||||
|
||||
// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns
|
||||
// nil if the response is not considered an error.
|
||||
// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead.
|
||||
func httpResponseToError(res *http.Response, context string) error {
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
return nil
|
||||
case http.StatusTooManyRequests:
|
||||
return ErrTooManyRequests
|
||||
case http.StatusUnauthorized:
|
||||
err := client.HandleErrorResponse(res)
|
||||
return ErrUnauthorizedForCredentials{Err: err}
|
||||
default:
|
||||
if context != "" {
|
||||
context = context + ": "
|
||||
}
|
||||
return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
|
||||
// registry
|
||||
func registryHTTPResponseToError(res *http.Response) error {
|
||||
err := client.HandleErrorResponse(res)
|
||||
if e, ok := err.(*client.UnexpectedHTTPResponseError); ok {
|
||||
response := string(e.Response)
|
||||
if len(response) > 50 {
|
||||
response = response[:50] + "..."
|
||||
}
|
||||
err = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
|
||||
}
|
||||
return err
|
||||
}
|
||||
200
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
Normal file
200
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
package tarfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/streamdigest"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
|
||||
type Destination struct {
|
||||
archive *Writer
|
||||
repoTags []reference.NamedTagged
|
||||
// Other state.
|
||||
config []byte
|
||||
sysCtx *types.SystemContext
|
||||
}
|
||||
|
||||
// NewDestination returns a tarfile.Destination adding images to the specified Writer.
|
||||
func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination {
|
||||
repoTags := []reference.NamedTagged{}
|
||||
if ref != nil {
|
||||
repoTags = append(repoTags, ref)
|
||||
}
|
||||
return &Destination{
|
||||
archive: archive,
|
||||
repoTags: repoTags,
|
||||
sysCtx: sys,
|
||||
}
|
||||
}
|
||||
|
||||
// AddRepoTags adds the specified tags to the destination's repoTags.
|
||||
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
|
||||
d.repoTags = append(d.repoTags, tags...)
|
||||
}
|
||||
|
||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||
func (d *Destination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *Destination) SupportsSignatures(ctx context.Context) error {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (d *Destination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *Destination) HasThreadSafePutBlob() bool {
|
||||
// The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where
|
||||
// this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t
|
||||
// much benefit from concurrency, mostly just extra CPU, memory and I/O contention.
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest == "" {
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer cleanup()
|
||||
stream = streamCopy
|
||||
logrus.Debugf("... streaming done")
|
||||
}
|
||||
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream")
|
||||
}
|
||||
d.config = buf
|
||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "writing Config file")
|
||||
}
|
||||
} else {
|
||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
}
|
||||
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
return d.archive.tryReusingBlobLocked(info)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
|
||||
// there can be no secondary manifests.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
|
||||
if instanceDigest != nil {
|
||||
return errors.New(`Manifest lists are not supported for docker tar files`)
|
||||
}
|
||||
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
|
||||
// so the caller trying a different manifest kind would be pointless.
|
||||
var man manifest.Schema2
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return errors.Wrap(err, "parsing manifest")
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
}
|
||||
|
||||
if err := d.archive.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer d.archive.unlock()
|
||||
|
||||
if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags)
|
||||
}
|
||||
|
||||
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
|
||||
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
|
||||
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
|
||||
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
if instanceDigest != nil {
|
||||
return errors.Errorf(`Manifest lists are not supported for docker tar files`)
|
||||
}
|
||||
if len(signatures) != 0 {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
268
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
Normal file
268
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
|
||||
type Reader struct {
|
||||
// None of the fields below are modified after the archive is created, until .Close();
|
||||
// this allows concurrent readers of the same archive.
|
||||
path string // "" if the archive has already been closed.
|
||||
removeOnClose bool // Remove file on close if true
|
||||
Manifest []ManifestItem // Guaranteed to exist after the archive is created.
|
||||
}
|
||||
|
||||
// NewReaderFromFile returns a Reader for the specified path.
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "opening file %q", path)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewReaderFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "detecting compression for file %q", path)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return newReader(path, false)
|
||||
}
|
||||
return NewReaderFromStream(sys, stream)
|
||||
}
|
||||
|
||||
// NewReaderFromStream returns a Reader for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewReaderFromFile returns.
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating temporary file")
|
||||
}
|
||||
defer tarCopyFile.Close()
|
||||
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
os.Remove(tarCopyFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to be compatible with docker-load, we need to support
|
||||
// auto-decompression (it's also a nice quality-of-life thing to avoid
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "auto-decompressing input")
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
// Copy the plain archive to the temporary file.
|
||||
//
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name())
|
||||
}
|
||||
succeeded = true
|
||||
|
||||
return newReader(tarCopyFile.Name(), true)
|
||||
}
|
||||
|
||||
// newReader creates a Reader for the specified path and removeOnClose flag.
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func newReader(path string, removeOnClose bool) (*Reader, error) {
|
||||
// This is a valid enough archive, except Manifest is not yet filled.
|
||||
r := Reader{
|
||||
path: path,
|
||||
removeOnClose: removeOnClose,
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
r.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// We initialize Manifest immediately when constructing the Reader instead
|
||||
// of later on-demand because every caller will need the data, and because doing it now
|
||||
// removes the need to synchronize the access/creation of the data if the archive is later
|
||||
// used from multiple goroutines to access different images.
|
||||
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
|
||||
return nil, errors.Wrap(err, "decoding tar manifest.json")
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized Reader, if any.
|
||||
func (r *Reader) Close() error {
|
||||
path := r.path
|
||||
r.path = "" // Mark the archive as closed
|
||||
if r.removeOnClose {
|
||||
return os.Remove(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
|
||||
// both of which should be (nil, -1).
|
||||
// On success, it returns the manifest item and an index of the matching tag, if a tag was used
|
||||
// for matching; the index is -1 if a tag was not used.
|
||||
func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
|
||||
switch {
|
||||
case ref != nil && sourceIndex != -1:
|
||||
return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||
ref.String(), sourceIndex)
|
||||
|
||||
case ref != nil:
|
||||
refString := ref.String()
|
||||
for i := range r.Manifest {
|
||||
for tagIndex, tag := range r.Manifest[i].RepoTags {
|
||||
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
||||
if err != nil {
|
||||
return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
|
||||
}
|
||||
if parsedTag.String() == refString {
|
||||
return &r.Manifest[i], tagIndex, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, -1, errors.Errorf("Tag %#v not found", refString)
|
||||
|
||||
case sourceIndex != -1:
|
||||
if sourceIndex >= len(r.Manifest) {
|
||||
return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
|
||||
sourceIndex, len(r.Manifest))
|
||||
}
|
||||
return &r.Manifest[sourceIndex], -1, nil
|
||||
|
||||
default:
|
||||
if len(r.Manifest) != 1 {
|
||||
return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
|
||||
}
|
||||
return &r.Manifest[0], -1, nil
|
||||
}
|
||||
}
|
||||
|
||||
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
||||
type tarReadCloser struct {
|
||||
*tar.Reader
|
||||
backingFile *os.File
|
||||
}
|
||||
|
||||
func (t *tarReadCloser) Close() error {
|
||||
return t.backingFile.Close()
|
||||
}
|
||||
|
||||
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
||||
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
||||
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
|
||||
// It is safe to call this method from multiple goroutines simultaneously.
|
||||
// The caller should call .Close() on the returned stream.
|
||||
func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||
// This is only a sanity check; if anyone did concurrently close ra, this access is technically
|
||||
// racy against the write in .Close().
|
||||
if r.path == "" {
|
||||
return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader")
|
||||
}
|
||||
|
||||
f, err := os.Open(r.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
tarReader, header, err := findTarComponent(f, componentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
||||
// We follow only one symlink; so no loops are possible.
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
||||
// so we don't care.
|
||||
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
}
|
||||
|
||||
// findTarComponent returns a header and a reader matching componentPath within inputFile,
|
||||
// or (nil, nil, nil) if not found.
|
||||
func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
|
||||
t := tar.NewReader(inputFile)
|
||||
componentPath = path.Clean(componentPath)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if path.Clean(h.Name) == componentPath {
|
||||
return t, h, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
// It is safe to call this method from multiple goroutines simultaneously.
|
||||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := r.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
330
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
Normal file
330
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
type Source struct {
|
||||
archive *Reader
|
||||
closeArchive bool // .Close() the archive when the source is closed.
|
||||
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
|
||||
ref reference.NamedTagged // May be nil
|
||||
sourceIndex int // May be -1
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
tarManifest *ManifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
configDigest digest.Digest
|
||||
orderedDiffIDList []digest.Digest
|
||||
knownLayers map[digest.Digest]*layerInfo
|
||||
// Other state
|
||||
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
|
||||
cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe
|
||||
cacheDataResult error // Private state for ensureCachedDataIsPresent
|
||||
}
|
||||
|
||||
type layerInfo struct {
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
|
||||
// and sourceIndex (or the only image if they are (nil, -1)).
|
||||
// The archive will be closed if closeArchive
|
||||
func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source {
|
||||
return &Source{
|
||||
archive: archive,
|
||||
closeArchive: closeArchive,
|
||||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
}
|
||||
}
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
// It is safe to call this from multi-threaded code.
|
||||
func (s *Source) ensureCachedDataIsPresent() error {
|
||||
s.cacheDataLock.Do(func() {
|
||||
s.cacheDataResult = s.ensureCachedDataIsPresentPrivate()
|
||||
})
|
||||
return s.cacheDataResult
|
||||
}
|
||||
|
||||
// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
|
||||
// Call ensureCachedDataIsPresent instead.
|
||||
func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||
tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config)
|
||||
}
|
||||
if parsedConfig.RootFS == nil {
|
||||
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = tarManifest
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized Source, if any.
|
||||
func (s *Source) Close() error {
|
||||
if s.closeArchive {
|
||||
return s.archive.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TarManifest returns contents of manifest.json
|
||||
func (s *Source) TarManifest() []ManifestItem {
|
||||
return s.archive.Manifest
|
||||
}
|
||||
|
||||
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
|
||||
// Collect layer data available in manifest and config.
|
||||
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
||||
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
||||
}
|
||||
knownLayers := map[digest.Digest]*layerInfo{}
|
||||
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
||||
for i, diffID := range parsedConfig.RootFS.DiffIDs {
|
||||
if _, ok := knownLayers[diffID]; ok {
|
||||
// Apparently it really can happen that a single image contains the same layer diff more than once.
|
||||
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
|
||||
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
||||
continue
|
||||
}
|
||||
layerPath := path.Clean(tarManifest.Layers[i])
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
}
|
||||
li := &layerInfo{ // A new element in each iteration
|
||||
path: layerPath,
|
||||
size: -1,
|
||||
}
|
||||
knownLayers[diffID] = li
|
||||
unknownLayerSizes[layerPath] = li
|
||||
}
|
||||
|
||||
// Scan the tar file to collect layer sizes.
|
||||
file, err := os.Open(s.archive.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
t := tar.NewReader(file)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerPath := path.Clean(h.Name)
|
||||
// FIXME: Cache this data across images in Reader.
|
||||
if li, ok := unknownLayerSizes[layerPath]; ok {
|
||||
// Since GetBlob will decompress layers that are compressed we need
|
||||
// to do the decompression here as well, otherwise we will
|
||||
// incorrectly report the size. Pretty critical, since tools like
|
||||
// umoci always compress layer blobs. Obviously we only bother with
|
||||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
uncompressedSize := h.Size
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
delete(unknownLayerSizes, layerPath)
|
||||
}
|
||||
}
|
||||
if len(unknownLayerSizes) != 0 {
|
||||
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
||||
}
|
||||
|
||||
return knownLayers, nil
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
|
||||
// as the primary manifest can not be a list, so there can be no secondary instances.
|
||||
func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil {
|
||||
// How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
|
||||
return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`)
|
||||
}
|
||||
if s.generatedManifest == nil {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
m := manifest.Schema2{
|
||||
SchemaVersion: 2,
|
||||
MediaType: manifest.DockerV2Schema2MediaType,
|
||||
ConfigDescriptor: manifest.Schema2Descriptor{
|
||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||
Size: int64(len(s.configBytes)),
|
||||
Digest: s.configDigest,
|
||||
},
|
||||
LayersDescriptors: []manifest.Schema2Descriptor{},
|
||||
}
|
||||
for _, diffID := range s.orderedDiffIDList {
|
||||
li, ok := s.knownLayers[diffID]
|
||||
if !ok {
|
||||
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||
}
|
||||
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
|
||||
Digest: diffID, // diffID is a digest of the uncompressed tarball
|
||||
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
||||
Size: li.size,
|
||||
})
|
||||
}
|
||||
manifestBytes, err := json.Marshal(&m)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.generatedManifest = manifestBytes
|
||||
}
|
||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||
}
|
||||
|
||||
// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
|
||||
type uncompressedReadCloser struct {
|
||||
io.Reader
|
||||
underlyingCloser func() error
|
||||
uncompressedCloser func() error
|
||||
}
|
||||
|
||||
func (r uncompressedReadCloser) Close() error {
|
||||
var res error
|
||||
if err := r.uncompressedCloser(); err != nil {
|
||||
res = err
|
||||
}
|
||||
if err := r.underlyingCloser(); err != nil && res == nil {
|
||||
res = err
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *Source) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
||||
return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
underlyingStream, err := s.archive.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
closeUnderlyingStream := true
|
||||
defer func() {
|
||||
if closeUnderlyingStream {
|
||||
underlyingStream.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to handle the fact that digests != diffIDs (and thus that a
|
||||
// caller which is trying to verify the blob will run into problems),
|
||||
// we need to decompress blobs. This is a bit ugly, but it's a
|
||||
// consequence of making everything addressable by their DiffID rather
|
||||
// than by their digest...
|
||||
//
|
||||
// In particular, because the v2s2 manifest being generated uses
|
||||
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
||||
// layers not their _actual_ digest. The result is that copy/... will
|
||||
// be verifying a "digest" which is not the actual layer's digest (but
|
||||
// is instead the DiffID).
|
||||
|
||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest)
|
||||
}
|
||||
|
||||
newStream := uncompressedReadCloser{
|
||||
Reader: uncompressedStream,
|
||||
underlyingCloser: underlyingStream.Close,
|
||||
uncompressedCloser: uncompressedStream.Close,
|
||||
}
|
||||
closeUnderlyingStream = false
|
||||
|
||||
return newStream, li.size, nil
|
||||
}
|
||||
|
||||
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
|
||||
// as there can be no secondary manifests.
|
||||
func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
if instanceDigest != nil {
|
||||
// How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
|
||||
return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
||||
}
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
|
||||
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
|
||||
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
|
||||
// to read the image's layers.
|
||||
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
|
||||
// as the primary manifest can not be a list, so there can be no secondary manifests.
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
28
vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go
generated
vendored
Normal file
28
vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
package tarfile
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// Various data structures.
|
||||
|
||||
// Based on github.com/docker/docker/image/tarexport/tarexport.go
|
||||
const (
|
||||
manifestFileName = "manifest.json"
|
||||
legacyLayerFileName = "layer.tar"
|
||||
legacyConfigFileName = "json"
|
||||
legacyVersionFileName = "VERSION"
|
||||
legacyRepositoriesFileName = "repositories"
|
||||
)
|
||||
|
||||
// ManifestItem is an element of the array stored in the top-level manifest.json file.
|
||||
type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
Parent imageID `json:",omitempty"`
|
||||
LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
|
||||
}
|
||||
|
||||
type imageID string
|
||||
381
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
Normal file
381
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,381 @@
|
|||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
|
||||
type Writer struct {
|
||||
mutex sync.Mutex
|
||||
// ALL of the following members can only be accessed with the mutex held.
|
||||
// Use Writer.lock() to obtain the mutex.
|
||||
writer io.Writer
|
||||
tar *tar.Writer // nil if the Writer has already been closed.
|
||||
// Other state.
|
||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||
repositories map[string]map[string]string
|
||||
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
|
||||
manifest []ManifestItem
|
||||
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for the specified io.Writer.
|
||||
// The caller must eventually call .Close() on the returned object to create a valid archive.
|
||||
func NewWriter(dest io.Writer) *Writer {
|
||||
return &Writer{
|
||||
writer: dest,
|
||||
tar: tar.NewWriter(dest),
|
||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||
repositories: map[string]map[string]string{},
|
||||
legacyLayers: map[string]struct{}{},
|
||||
manifestByConfig: map[digest.Digest]int{},
|
||||
}
|
||||
}
|
||||
|
||||
// lock does some sanity checks and locks the Writer.
|
||||
// If this function succeeds, the caller must call w.unlock.
|
||||
// Do not use Writer.mutex directly.
|
||||
func (w *Writer) lock() error {
|
||||
w.mutex.Lock()
|
||||
if w.tar == nil {
|
||||
w.mutex.Unlock()
|
||||
return errors.New("Internal error: trying to use an already closed tarfile.Writer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlock releases the lock obtained by Writer.lock
|
||||
// Do not use Writer.mutex directly.
|
||||
func (w *Writer) unlock() {
|
||||
w.mutex.Unlock()
|
||||
}
|
||||
|
||||
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := w.blobs[info.Digest]; ok {
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) recordBlobLocked(info types.BlobInfo) {
|
||||
w.blobs[info.Digest] = info
|
||||
}
|
||||
|
||||
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
|
||||
if _, ok := w.legacyLayers[layerID]; !ok {
|
||||
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
|
||||
// See also the comment in physicalLayerPath.
|
||||
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
||||
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
||||
return errors.Wrap(err, "creating layer symbolic link")
|
||||
}
|
||||
|
||||
b := []byte("1.0")
|
||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
|
||||
return errors.Wrap(err, "writing VERSION file")
|
||||
}
|
||||
|
||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
|
||||
return errors.Wrap(err, "writing config json file")
|
||||
}
|
||||
|
||||
w.legacyLayers[layerID] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image.
|
||||
func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error {
|
||||
var chainID digest.Digest
|
||||
lastLayerID := ""
|
||||
for i, l := range layerDescriptors {
|
||||
// The legacy format requires a config file per layer
|
||||
layerConfig := make(map[string]interface{})
|
||||
|
||||
// The root layer doesn't have any parent
|
||||
if lastLayerID != "" {
|
||||
layerConfig["parent"] = lastLayerID
|
||||
}
|
||||
// The top layer configuration file is generated by using subpart of the image configuration
|
||||
if i == len(layerDescriptors)-1 {
|
||||
var config map[string]*json.RawMessage
|
||||
err := json.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unmarshaling config")
|
||||
}
|
||||
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
|
||||
layerConfig[attr] = config[attr]
|
||||
}
|
||||
}
|
||||
|
||||
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
|
||||
if chainID == "" {
|
||||
chainID = l.Digest
|
||||
} else {
|
||||
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
|
||||
}
|
||||
// … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because
|
||||
// we create the image configs differently in details. At least recent versions allocate new IDs on load,
|
||||
// so this is fine as long as the IDs we use are unique / cannot loop.
|
||||
//
|
||||
// For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created
|
||||
// config makes sure that everything uses the same “namespace”; a bit less efficient but clearer.
|
||||
//
|
||||
// Temporarily add the chainID to the config, only for the purpose of generating the image ID.
|
||||
layerConfig["layer_id"] = chainID
|
||||
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "marshaling layer config")
|
||||
}
|
||||
delete(layerConfig, "layer_id")
|
||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
||||
layerConfig["id"] = layerID
|
||||
|
||||
configBytes, err := json.Marshal(layerConfig)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "marshaling layer config")
|
||||
}
|
||||
|
||||
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastLayerID = layerID
|
||||
}
|
||||
|
||||
if lastLayerID != "" {
|
||||
for _, repoTag := range repoTags {
|
||||
if val, ok := w.repositories[repoTag.Name()]; ok {
|
||||
val[repoTag.Tag()] = lastLayerID
|
||||
} else {
|
||||
w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkManifestItemsMatch checks that a and b describe the same image,
|
||||
// and returns an error if that’s not the case (which should never happen).
|
||||
func checkManifestItemsMatch(a, b *ManifestItem) error {
|
||||
if a.Config != b.Config {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
|
||||
}
|
||||
if len(a.Layers) != len(b.Layers) {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
|
||||
}
|
||||
for i := range a.Layers {
|
||||
if a.Layers[i] != b.Layers[i] {
|
||||
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
|
||||
}
|
||||
}
|
||||
// Ignore RepoTags, that will be built later.
|
||||
// Ignore Parent and LayerSources, which we don’t set to anything meaningful.
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
|
||||
layerPaths := []string{}
|
||||
for _, l := range layerDescriptors {
|
||||
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
|
||||
}
|
||||
|
||||
var item *ManifestItem
|
||||
newItem := ManifestItem{
|
||||
Config: w.configPath(configDigest),
|
||||
RepoTags: []string{},
|
||||
Layers: layerPaths,
|
||||
Parent: "", // We don’t have this information
|
||||
LayerSources: nil,
|
||||
}
|
||||
if i, ok := w.manifestByConfig[configDigest]; ok {
|
||||
item = &w.manifest[i]
|
||||
if err := checkManifestItemsMatch(item, &newItem); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
i := len(w.manifest)
|
||||
w.manifestByConfig[configDigest] = i
|
||||
w.manifest = append(w.manifest, newItem)
|
||||
item = &w.manifest[i]
|
||||
}
|
||||
|
||||
knownRepoTags := map[string]struct{}{}
|
||||
for _, repoTag := range item.RepoTags {
|
||||
knownRepoTags[repoTag] = struct{}{}
|
||||
}
|
||||
for _, tag := range repoTags {
|
||||
// For github.com/docker/docker consumers, this works just as well as
|
||||
// refString := ref.String()
|
||||
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
||||
// normalizes both of them to the same value.
|
||||
//
|
||||
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
||||
// a difference for github.com/projectatomic/docker consumers, with the
|
||||
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
||||
// These consumers treat reference strings which include a hostname and reference
|
||||
// strings without a hostname differently.
|
||||
//
|
||||
// Using the host name here is more explicit about the intent, and it has the same
|
||||
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
||||
// a hostname-qualified reference.
|
||||
// See https://github.com/containers/image/issues/72 for a more detailed
|
||||
// analysis and explanation.
|
||||
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
|
||||
|
||||
if _, ok := knownRepoTags[refString]; !ok {
|
||||
item.RepoTags = append(item.RepoTags, refString)
|
||||
knownRepoTags[refString] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and finishes writing data
|
||||
// to the underlying io.Writer.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
if err := w.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.unlock()
|
||||
|
||||
b, err := json.Marshal(&w.manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.sendBytesLocked(manifestFileName, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = json.Marshal(w.repositories)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "marshaling repositories")
|
||||
}
|
||||
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
|
||||
return errors.Wrap(err, "writing config json file")
|
||||
}
|
||||
|
||||
if err := w.tar.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.tar = nil // Mark the Writer as closed.
|
||||
return nil
|
||||
}
|
||||
|
||||
// configPath returns a path we choose for storing a config with the specified digest.
|
||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) configPath(configDigest digest.Digest) string {
|
||||
return configDigest.Hex() + ".json"
|
||||
}
|
||||
|
||||
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
||||
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
|
||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||
// any time.
|
||||
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
|
||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
||||
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||
// in the root of the tarball.
|
||||
return layerDigest.Hex() + ".tar"
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
path string
|
||||
size int64
|
||||
isSymlink bool
|
||||
}
|
||||
|
||||
func (t *tarFI) Name() string {
|
||||
return t.path
|
||||
}
|
||||
func (t *tarFI) Size() int64 {
|
||||
return t.size
|
||||
}
|
||||
func (t *tarFI) Mode() os.FileMode {
|
||||
if t.isSymlink {
|
||||
return os.ModeSymlink
|
||||
}
|
||||
return 0444
|
||||
}
|
||||
func (t *tarFI) ModTime() time.Time {
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
func (t *tarFI) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (t *tarFI) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendSymlinkLocked sends a symlink into the tar stream.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) sendSymlinkLocked(path string, target string) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar link %s -> %s", path, target)
|
||||
return w.tar.WriteHeader(hdr)
|
||||
}
|
||||
|
||||
// sendBytesLocked sends a path into the tar stream.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) sendBytesLocked(path string, b []byte) error {
|
||||
return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b))
|
||||
}
|
||||
|
||||
// sendFileLocked sends a file into the tar stream.
|
||||
// The caller must have locked the Writer.
|
||||
func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar file %s", path)
|
||||
if err := w.tar.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
size, err := io.Copy(w.tar, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size != expectedSize {
|
||||
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
235
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
Normal file
235
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
Normal file
|
|
@ -0,0 +1,235 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||
// You can override this at build time with
|
||||
// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path'
|
||||
var systemRegistriesDirPath = builtinRegistriesDirPath
|
||||
|
||||
// builtinRegistriesDirPath is the path to registries.d.
|
||||
// DO NOT change this, instead see systemRegistriesDirPath above.
|
||||
const builtinRegistriesDirPath = "/etc/containers/registries.d"
|
||||
|
||||
// userRegistriesDirPath is the path to the per user registries.d.
|
||||
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")
|
||||
|
||||
// defaultUserDockerDir is the default sigstore directory for unprivileged user
|
||||
var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore")
|
||||
|
||||
// defaultDockerDir is the default sigstore directory for root
|
||||
var defaultDockerDir = "/var/lib/containers/sigstore"
|
||||
|
||||
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
|
||||
// NOTE: Keep this in sync with docs/registries.d.md!
|
||||
type registryConfiguration struct {
|
||||
DefaultDocker *registryNamespace `json:"default-docker"`
|
||||
// The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
|
||||
Docker map[string]registryNamespace `json:"docker"`
|
||||
}
|
||||
|
||||
// registryNamespace defines lookaside locations for a single namespace.
|
||||
type registryNamespace struct {
|
||||
SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
|
||||
SigStoreStaging string `json:"sigstore-staging"` // For writing only.
|
||||
}
|
||||
|
||||
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
||||
// Users outside of this file should use SignatureStorageBaseURL and signatureStorageURL below.
|
||||
type signatureStorageBase *url.URL
|
||||
|
||||
// SignatureStorageBaseURL reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
|
||||
// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md
|
||||
// Warning: This function only exposes configuration in registries.d;
|
||||
// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S).
|
||||
func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) {
|
||||
dr, ok := ref.(dockerReference)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("ref must be a dockerReference")
|
||||
}
|
||||
// FIXME? Loading and parsing the config could be cached across calls.
|
||||
dirPath := registriesDirPath(sys)
|
||||
logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
|
||||
config, err := loadAndMergeConfig(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
topLevel := config.signatureTopLevel(dr, write)
|
||||
var url *url.URL
|
||||
if topLevel != "" {
|
||||
url, err = url.Parse(topLevel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
||||
}
|
||||
} else {
|
||||
// returns default directory if no sigstore specified in configuration file
|
||||
url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID())
|
||||
logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted())
|
||||
}
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
// FIXME? Restrict to explicitly supported schemes?
|
||||
repo := reference.Path(dr.ref) // Note that this is without a tag or digest.
|
||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String())
|
||||
}
|
||||
url.Path = url.Path + "/" + repo
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// registriesDirPath returns a path to registries.d
|
||||
func registriesDirPath(sys *types.SystemContext) string {
|
||||
return registriesDirPathWithHomeDir(sys, homedir.Get())
|
||||
}
|
||||
|
||||
// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
|
||||
if sys != nil && sys.RegistriesDirPath != "" {
|
||||
return sys.RegistriesDirPath
|
||||
}
|
||||
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
|
||||
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
||||
return userRegistriesDirPath
|
||||
}
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
|
||||
}
|
||||
|
||||
return systemRegistriesDirPath
|
||||
}
|
||||
|
||||
// builtinDefaultSignatureStorageDir returns default signature storage URL as per euid
|
||||
func builtinDefaultSignatureStorageDir(euid int) *url.URL {
|
||||
if euid != 0 {
|
||||
return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)}
|
||||
}
|
||||
return &url.URL{Scheme: "file", Path: defaultDockerDir}
|
||||
}
|
||||
|
||||
// loadAndMergeConfig loads configuration files in dirPath
|
||||
func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
|
||||
dockerDefaultMergedFrom := ""
|
||||
nsMergedFrom := map[string]string{}
|
||||
|
||||
dir, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &mergedConfig, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
configNames, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, configName := range configNames {
|
||||
if !strings.HasSuffix(configName, ".yaml") {
|
||||
continue
|
||||
}
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config registryConfiguration
|
||||
err = yaml.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing %s", configPath)
|
||||
}
|
||||
|
||||
if config.DefaultDocker != nil {
|
||||
if mergedConfig.DefaultDocker != nil {
|
||||
return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
dockerDefaultMergedFrom, configPath)
|
||||
}
|
||||
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||
dockerDefaultMergedFrom = configPath
|
||||
}
|
||||
|
||||
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||
return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
nsName, nsMergedFrom[nsName], configPath)
|
||||
}
|
||||
mergedConfig.Docker[nsName] = nsConfig
|
||||
nsMergedFrom[nsName] = configPath
|
||||
}
|
||||
}
|
||||
|
||||
return &mergedConfig, nil
|
||||
}
|
||||
|
||||
// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
|
||||
// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured.
|
||||
func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
|
||||
if config.Docker != nil {
|
||||
// Look for a full match.
|
||||
identity := ref.PolicyConfigurationIdentity()
|
||||
if ns, ok := config.Docker[identity]; ok {
|
||||
logrus.Debugf(` Using "docker" namespace %s`, identity)
|
||||
if url := ns.signatureTopLevel(write); url != "" {
|
||||
return url
|
||||
}
|
||||
}
|
||||
|
||||
// Look for a match of the possible parent namespaces.
|
||||
for _, name := range ref.PolicyConfigurationNamespaces() {
|
||||
if ns, ok := config.Docker[name]; ok {
|
||||
logrus.Debugf(` Using "docker" namespace %s`, name)
|
||||
if url := ns.signatureTopLevel(write); url != "" {
|
||||
return url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Look for a default location
|
||||
if config.DefaultDocker != nil {
|
||||
logrus.Debugf(` Using "default-docker" configuration`)
|
||||
if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
|
||||
return url
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
|
||||
// or "" if nothing has been configured.
|
||||
func (ns registryNamespace) signatureTopLevel(write bool) string {
|
||||
if write && ns.SigStoreStaging != "" {
|
||||
logrus.Debugf(` Using %s`, ns.SigStoreStaging)
|
||||
return ns.SigStoreStaging
|
||||
}
|
||||
if ns.SigStore != "" {
|
||||
logrus.Debugf(` Using %s`, ns.SigStore)
|
||||
return ns.SigStore
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// signatureStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
|
||||
// base is not nil from the caller
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
||||
url := *base
|
||||
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||
return &url
|
||||
}
|
||||
77
vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
generated
vendored
Normal file
77
vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
generated
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
package policyconfiguration
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
|
||||
// as a backend for ImageReference.PolicyConfigurationIdentity.
|
||||
// The reference must satisfy !reference.IsNameOnly().
|
||||
func DockerReferenceIdentity(ref reference.Named) (string, error) {
|
||||
res := ref.Name()
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
digested, isDigested := ref.(reference.Canonical)
|
||||
switch {
|
||||
case isTagged && isDigested: // Note that this CAN actually happen.
|
||||
return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
|
||||
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
|
||||
return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
case isTagged:
|
||||
res = res + ":" + tagged.Tag()
|
||||
case isDigested:
|
||||
res = res + "@" + digested.Digest().String()
|
||||
default: // Coverage: The above was supposed to be exhaustive.
|
||||
return "", errors.New("Internal inconsistency, unexpected default branch")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search,
|
||||
// as a backend for ImageReference.PolicyConfigurationIdentity.
|
||||
// The reference must satisfy !reference.IsNameOnly().
|
||||
func DockerReferenceNamespaces(ref reference.Named) []string {
|
||||
// Look for a match of the repository, and then of the possible parent
|
||||
// namespaces. Note that this only happens on the expanded host names
|
||||
// and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox",
|
||||
// then in its parent "docker.io/library"; in none of "busybox",
|
||||
// un-namespaced "library" nor in "" supposedly implicitly representing "library/".
|
||||
//
|
||||
// ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
|
||||
// iteration matches the host name (for any namespace).
|
||||
res := []string{}
|
||||
name := ref.Name()
|
||||
for {
|
||||
res = append(res, name)
|
||||
|
||||
lastSlash := strings.LastIndex(name, "/")
|
||||
if lastSlash == -1 {
|
||||
break
|
||||
}
|
||||
name = name[:lastSlash]
|
||||
}
|
||||
|
||||
// Strip port number if any, before appending to res slice.
|
||||
// Currently, the most compatible behavior is to return
|
||||
// example.com:8443/ns, example.com:8443, *.com.
|
||||
// If a port number is not specified, the expected behavior would be
|
||||
// example.com/ns, example.com, *.com
|
||||
portNumColon := strings.Index(name, ":")
|
||||
if portNumColon != -1 {
|
||||
name = name[:portNumColon]
|
||||
}
|
||||
|
||||
// Append wildcarded domains to res slice
|
||||
for {
|
||||
firstDot := strings.Index(name, ".")
|
||||
if firstDot == -1 {
|
||||
break
|
||||
}
|
||||
name = name[firstDot+1:]
|
||||
|
||||
res = append(res, "*."+name)
|
||||
}
|
||||
return res
|
||||
}
|
||||
2
vendor/github.com/containers/image/v5/docker/reference/README.md
generated
vendored
Normal file
2
vendor/github.com/containers/image/v5/docker/reference/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8,
|
||||
except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset.
|
||||
42
vendor/github.com/containers/image/v5/docker/reference/helpers.go
generated
vendored
Normal file
42
vendor/github.com/containers/image/v5/docker/reference/helpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
||||
181
vendor/github.com/containers/image/v5/docker/reference/normalize.go
generated
vendored
Normal file
181
vendor/github.com/containers/image/v5/docker/reference/normalize.go
generated
vendored
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention. This is added
|
||||
// mainly for backward compatibility.
|
||||
// The reference returned can only be either tagged or digested. For reference contains both tag
|
||||
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||
func ParseDockerRef(ref string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := named.(NamedTagged); ok {
|
||||
if canonical, ok := named.(Canonical); ok {
|
||||
// The reference is both tagged and digested, only
|
||||
// return digested.
|
||||
newNamed, err := WithName(canonical.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newCanonical, err := WithDigest(newNamed, canonical.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCanonical, nil
|
||||
}
|
||||
}
|
||||
return TagNameOnly(named), nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
433
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
Normal file
433
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
Normal file
|
|
@ -0,0 +1,433 @@
|
|||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
Reference
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface {
|
||||
Named
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
type reference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (r reference) String() string {
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func (r repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
||||
6
vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
generated
vendored
Normal file
6
vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
package reference
|
||||
|
||||
// Return true if the specified string fully matches `IdentifierRegexp`.
|
||||
func IsFullIdentifier(s string) bool {
|
||||
return anchoredIdentifierRegexp.MatchString(s)
|
||||
}
|
||||
143
vendor/github.com/containers/image/v5/docker/reference/regexp.go
generated
vendored
Normal file
143
vendor/github.com/containers/image/v5/docker/reference/regexp.go
generated
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
||||
159
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
Normal file
159
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
package docker
|
||||
|
||||
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// challenge carries information from a WWW-Authenticate response header.
|
||||
// See RFC 7235.
|
||||
type challenge struct {
|
||||
// Scheme is the auth-scheme according to RFC 7235
|
||||
Scheme string
|
||||
|
||||
// Parameters are the auth-params according to RFC 7235
|
||||
Parameters map[string]string
|
||||
}
|
||||
|
||||
// Octet types from RFC 7230.
|
||||
type octetType byte
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
func parseAuthHeader(header http.Header) []challenge {
|
||||
challenges := []challenge{}
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
if v != "" {
|
||||
challenges = append(challenges, challenge{Scheme: v, Parameters: p})
|
||||
}
|
||||
}
|
||||
return challenges
|
||||
}
|
||||
|
||||
// NOTE: This is not a fully compliant parser per RFC 7235:
|
||||
// Most notably it does not support more than one challenge within a single header
|
||||
// Some of the whitespace parsing also seems noncompliant.
|
||||
// But it is clearly better than what we used to have…
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
s = "," + skipSpace(s)
|
||||
for strings.HasPrefix(s, ",") {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s[1:]))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
34
vendor/github.com/containers/image/v5/image/docker_list.go
generated
vendored
Normal file
34
vendor/github.com/containers/image/v5/image/docker_list.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
||||
list, err := manifest.Schema2ListFromManifest(manblob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing schema2 manifest list")
|
||||
}
|
||||
targetManifestDigest, err := list.ChooseInstance(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "choosing image instance")
|
||||
}
|
||||
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading manifest for target platform")
|
||||
}
|
||||
|
||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
||||
}
|
||||
|
||||
return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
|
||||
}
|
||||
248
vendor/github.com/containers/image/v5/image/docker_schema1.go
generated
vendored
Normal file
248
vendor/github.com/containers/image/v5/image/docker_schema1.go
generated
vendored
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type manifestSchema1 struct {
|
||||
m *manifest.Schema1
|
||||
}
|
||||
|
||||
func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
|
||||
m, err := manifest.Schema1FromManifest(manifestBlob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestSchema1{m: m}, nil
|
||||
}
|
||||
|
||||
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) {
|
||||
m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestSchema1{m: m}, nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) serialize() ([]byte, error) {
|
||||
return m.m.Serialize()
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) manifestMIMEType() string {
|
||||
return manifest.DockerV2Schema1SignedMediaType
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
|
||||
return m.m.ConfigInfo()
|
||||
}
|
||||
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
|
||||
v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v2s2.OCIConfig(ctx)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
// It returns false if the manifest does not embed a Docker reference.
|
||||
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
|
||||
func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
|
||||
// This is a bit convoluted: We can’t just have a "get embedded docker reference" method
|
||||
// and have the “does it conflict” logic in the generic copy code, because the manifest does not actually
|
||||
// embed a full docker/distribution reference, but only the repo name and tag (without the host name).
|
||||
// So we would have to provide a “return repo without host name, and tag” getter for the generic code,
|
||||
// which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the
|
||||
// generic copy code needs to know about is reference.Named and that a manifest may need updating
|
||||
// for some destinations.
|
||||
name := reference.Path(ref)
|
||||
var tag string
|
||||
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
} else {
|
||||
tag = ""
|
||||
}
|
||||
return m.m.Name != name || m.m.Tag != tag
|
||||
}
|
||||
|
||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
||||
func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) {
|
||||
return m.m.Inspect(nil)
|
||||
}
|
||||
|
||||
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
|
||||
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
|
||||
// (most importantly it forces us to download the full layers even if they are already present at the destination).
|
||||
func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
|
||||
return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest)
|
||||
}
|
||||
|
||||
// UpdatedImage returns a types.Image modified according to options.
|
||||
// This does not change the state of the original Image object.
|
||||
func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
||||
copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
|
||||
|
||||
// We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so,
|
||||
// handle conversions between them by doing nothing.
|
||||
if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType {
|
||||
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
|
||||
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
|
||||
manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if converted != nil {
|
||||
return converted, nil
|
||||
}
|
||||
}
|
||||
|
||||
// No conversion required, update manifest
|
||||
if options.LayerInfos != nil {
|
||||
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if options.EmbeddedDockerReference != nil {
|
||||
copy.m.Name = reference.Path(options.EmbeddedDockerReference)
|
||||
if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
|
||||
copy.m.Tag = tagged.Tag()
|
||||
} else {
|
||||
copy.m.Tag = ""
|
||||
}
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
}
|
||||
|
||||
// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestSchema1 object.
|
||||
//
|
||||
// We need this function just because a function returning an implementation of the genericManifest
|
||||
// interface is not automatically assignable to a function type returning the genericManifest interface
|
||||
func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
return m.convertToManifestSchema2(ctx, options)
|
||||
}
|
||||
|
||||
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestSchema1 object.
|
||||
//
|
||||
// Based on github.com/docker/docker/distribution/pull_v2.go
|
||||
func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
|
||||
uploadedLayerInfos := options.InformationOnly.LayerInfos
|
||||
layerDiffIDs := options.InformationOnly.LayerDiffIDs
|
||||
|
||||
if len(m.m.ExtractedV1Compatibility) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
|
||||
}
|
||||
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
|
||||
}
|
||||
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
|
||||
}
|
||||
|
||||
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
|
||||
if options.LayerInfos != nil {
|
||||
if len(options.LayerInfos) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
|
||||
len(options.LayerInfos), len(m.m.FSLayers))
|
||||
}
|
||||
convertedLayerUpdates = []types.BlobInfo{}
|
||||
}
|
||||
|
||||
// Build a list of the diffIDs for the non-empty layers.
|
||||
diffIDs := []digest.Digest{}
|
||||
var layers []manifest.Schema2Descriptor
|
||||
for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index
|
||||
|
||||
if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway {
|
||||
var size int64
|
||||
if uploadedLayerInfos != nil {
|
||||
size = uploadedLayerInfos[v2Index].Size
|
||||
}
|
||||
var d digest.Digest
|
||||
if layerDiffIDs != nil {
|
||||
d = layerDiffIDs[v2Index]
|
||||
}
|
||||
layers = append(layers, manifest.Schema2Descriptor{
|
||||
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
Size: size,
|
||||
Digest: m.m.FSLayers[v1Index].BlobSum,
|
||||
})
|
||||
if options.LayerInfos != nil {
|
||||
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index])
|
||||
}
|
||||
diffIDs = append(diffIDs, d)
|
||||
}
|
||||
}
|
||||
configJSON, err := m.m.ToSchema2Config(diffIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configDescriptor := manifest.Schema2Descriptor{
|
||||
MediaType: "application/vnd.docker.container.image.v1+json",
|
||||
Size: int64(len(configJSON)),
|
||||
Digest: digest.FromBytes(configJSON),
|
||||
}
|
||||
|
||||
if options.LayerInfos != nil {
|
||||
options.LayerInfos = convertedLayerUpdates
|
||||
}
|
||||
return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
|
||||
}
|
||||
|
||||
// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestSchema1 object.
|
||||
func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
// We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest
|
||||
m2, err := m.convertToManifestSchema2(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m2.convertToManifestOCI1(ctx, options)
|
||||
}
|
||||
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
|
||||
return false
|
||||
}
|
||||
400
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
Normal file
400
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
Normal file
|
|
@ -0,0 +1,400 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
|
||||
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
|
||||
// in registries, so let’s use the same values.
|
||||
var GzippedEmptyLayer = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
|
||||
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
|
||||
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
|
||||
type manifestSchema2 struct {
|
||||
src types.ImageSource // May be nil if configBlob is not nil
|
||||
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
|
||||
m *manifest.Schema2
|
||||
}
|
||||
|
||||
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
|
||||
m, err := manifest.Schema2FromManifest(manifestBlob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestSchema2{
|
||||
src: src,
|
||||
m: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
|
||||
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
|
||||
return &manifestSchema2{
|
||||
src: src,
|
||||
configBlob: configBlob,
|
||||
m: manifest.Schema2FromComponents(config, layers),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manifestSchema2) serialize() ([]byte, error) {
|
||||
return m.m.Serialize()
|
||||
}
|
||||
|
||||
func (m *manifestSchema2) manifestMIMEType() string {
|
||||
return m.m.MediaType
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
||||
return m.m.ConfigInfo()
|
||||
}
|
||||
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
|
||||
configBlob, err := m.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
|
||||
// than OCI v1. This unmarshal makes sure we drop docker v2s2
|
||||
// fields that aren't needed in OCI v1.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(configBlob, configOCI); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configOCI, nil
|
||||
}
|
||||
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.configBlob == nil {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
computedDigest := digest.FromBytes(blob)
|
||||
if computedDigest != m.m.ConfigDescriptor.Digest {
|
||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
|
||||
}
|
||||
m.configBlob = blob
|
||||
}
|
||||
return m.configBlob, nil
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
// It returns false if the manifest does not embed a Docker reference.
|
||||
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
|
||||
func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
||||
func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
|
||||
getter := func(info types.BlobInfo) ([]byte, error) {
|
||||
if info.Digest != m.ConfigInfo().Digest {
|
||||
// Shouldn't ever happen
|
||||
return nil, errors.New("asked for a different config blob")
|
||||
}
|
||||
config, err := m.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
return m.m.Inspect(getter)
|
||||
}
|
||||
|
||||
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
|
||||
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
|
||||
// (most importantly it forces us to download the full layers even if they are already present at the destination).
|
||||
func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// UpdatedImage returns a types.Image modified according to options.
|
||||
// This does not change the state of the original Image object.
|
||||
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
|
||||
// if the CompressionOperation and CompressionAlgorithm specified in one or more
|
||||
// options.LayerInfos items is anything other than gzip.
|
||||
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
||||
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
|
||||
src: m.src,
|
||||
configBlob: m.configBlob,
|
||||
m: manifest.Schema2Clone(m.m),
|
||||
}
|
||||
|
||||
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
|
||||
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
|
||||
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
|
||||
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if converted != nil {
|
||||
return converted, nil
|
||||
}
|
||||
|
||||
// No conversion required, update manifest
|
||||
if options.LayerInfos != nil {
|
||||
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
}
|
||||
|
||||
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
|
||||
return imgspecv1.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size,
|
||||
Digest: d.Digest,
|
||||
URLs: d.URLs,
|
||||
}
|
||||
}
|
||||
|
||||
// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestSchema2 object.
|
||||
func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
configOCI, err := m.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configOCIBytes, err := json.Marshal(configOCI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := imgspecv1.Descriptor{
|
||||
MediaType: imgspecv1.MediaTypeImageConfig,
|
||||
Size: int64(len(configOCIBytes)),
|
||||
Digest: digest.FromBytes(configOCIBytes),
|
||||
}
|
||||
|
||||
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
|
||||
for idx := range layers {
|
||||
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
|
||||
switch m.m.LayersDescriptors[idx].MediaType {
|
||||
case manifest.DockerV2Schema2ForeignLayerMediaType:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
|
||||
case manifest.DockerV2Schema2LayerMediaType:
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
|
||||
}
|
||||
|
||||
// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestSchema2 object.
|
||||
//
|
||||
// Based on docker/distribution/manifest/schema1/config_builder.go
|
||||
func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
dest := options.InformationOnly.Destination
|
||||
|
||||
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
|
||||
if options.LayerInfos != nil {
|
||||
if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
|
||||
return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
|
||||
len(options.LayerInfos), len(m.m.LayersDescriptors))
|
||||
}
|
||||
convertedLayerUpdates = []types.BlobInfo{}
|
||||
}
|
||||
|
||||
configBytes, err := m.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imageConfig := &manifest.Schema2Image{}
|
||||
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
|
||||
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
|
||||
history := make([]manifest.Schema1History, len(imageConfig.History))
|
||||
nonemptyLayerIndex := 0
|
||||
var parentV1ID string // Set in the loop
|
||||
v1ID := ""
|
||||
haveGzippedEmptyLayer := false
|
||||
if len(imageConfig.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
for v2Index, historyEntry := range imageConfig.History {
|
||||
parentV1ID = v1ID
|
||||
v1Index := len(imageConfig.History) - 1 - v2Index
|
||||
|
||||
var blobDigest digest.Digest
|
||||
if historyEntry.EmptyLayer {
|
||||
emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
|
||||
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "uploading empty layer")
|
||||
}
|
||||
if info.Digest != emptyLayerBlobInfo.Digest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
|
||||
}
|
||||
haveGzippedEmptyLayer = true
|
||||
}
|
||||
if options.LayerInfos != nil {
|
||||
convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
|
||||
}
|
||||
blobDigest = emptyLayerBlobInfo.Digest
|
||||
} else {
|
||||
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
|
||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
|
||||
}
|
||||
if options.LayerInfos != nil {
|
||||
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
|
||||
}
|
||||
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
|
||||
nonemptyLayerIndex++
|
||||
}
|
||||
|
||||
// AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
|
||||
v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1ID = v
|
||||
|
||||
fakeImage := manifest.Schema1V1Compatibility{
|
||||
ID: v1ID,
|
||||
Parent: parentV1ID,
|
||||
Comment: historyEntry.Comment,
|
||||
Created: historyEntry.Created,
|
||||
Author: historyEntry.Author,
|
||||
ThrowAway: historyEntry.EmptyLayer,
|
||||
}
|
||||
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
|
||||
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
||||
}
|
||||
|
||||
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
|
||||
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
|
||||
// Note that parentV1ID of the top layer is preserved when exiting this loop
|
||||
}
|
||||
|
||||
// Now patch in real configuration for the top layer (v1Index == 0)
|
||||
v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
history[0].V1Compatibility = string(v1Config)
|
||||
|
||||
if options.LayerInfos != nil {
|
||||
options.LayerInfos = convertedLayerUpdates
|
||||
}
|
||||
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
if err != nil {
|
||||
return nil, err // This should never happen, we should have created all the components correctly.
|
||||
}
|
||||
return m1, nil
|
||||
}
|
||||
|
||||
func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
|
||||
if err := blobDigest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts := append([]string{blobDigest.Hex()}, others...)
|
||||
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
|
||||
return hex.EncodeToString(v1IDHash[:]), nil
|
||||
}
|
||||
|
||||
func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
|
||||
// Preserve everything we don't specifically know about.
|
||||
// (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
|
||||
rawContents := map[string]*json.RawMessage{}
|
||||
if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
|
||||
return nil, err
|
||||
}
|
||||
delete(rawContents, "rootfs")
|
||||
delete(rawContents, "history")
|
||||
|
||||
updates := map[string]interface{}{"id": v1ID}
|
||||
if parentV1ID != "" {
|
||||
updates["parent"] = parentV1ID
|
||||
}
|
||||
if throwaway {
|
||||
updates["throwaway"] = throwaway
|
||||
}
|
||||
for field, value := range updates {
|
||||
encoded, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawContents[field] = (*json.RawMessage)(&encoded)
|
||||
}
|
||||
return json.Marshal(rawContents)
|
||||
}
|
||||
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
|
||||
return false
|
||||
}
|
||||
113
vendor/github.com/containers/image/v5/image/manifest.go
generated
vendored
Normal file
113
vendor/github.com/containers/image/v5/image/manifest.go
generated
vendored
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// genericManifest is an interface for parsing, modifying image manifests and related data.
|
||||
// Note that the public methods are intended to be a subset of types.Image
|
||||
// so that embedding a genericManifest into structs works.
|
||||
// will support v1 one day...
|
||||
type genericManifest interface {
|
||||
serialize() ([]byte, error)
|
||||
manifestMIMEType() string
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||
ConfigInfo() types.BlobInfo
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
ConfigBlob(context.Context) ([]byte, error)
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
OCIConfig(context.Context) (*imgspecv1.Image, error)
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
LayerInfos() []types.BlobInfo
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
// It returns false if the manifest does not embed a Docker reference.
|
||||
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
|
||||
EmbeddedDockerReferenceConflicts(ref reference.Named) bool
|
||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
||||
Inspect(context.Context) (*types.ImageInspectInfo, error)
|
||||
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
|
||||
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
|
||||
// (most importantly it forces us to download the full layers even if they are already present at the destination).
|
||||
UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool
|
||||
// UpdatedImage returns a types.Image modified according to options.
|
||||
// This does not change the state of the original Image object.
|
||||
UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
//
|
||||
// Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
|
||||
// the process of updating a manifest between different manifest types was to update then convert.
|
||||
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
|
||||
SupportsEncryption(ctx context.Context) bool
|
||||
}
|
||||
|
||||
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
|
||||
// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
|
||||
func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
|
||||
switch manifest.NormalizedMIMEType(mt) {
|
||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
|
||||
return manifestSchema1FromManifest(manblob)
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
return manifestOCI1FromManifest(src, manblob)
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
return manifestSchema2FromManifest(src, manblob)
|
||||
case manifest.DockerV2ListMediaType:
|
||||
return manifestSchema2FromManifestList(ctx, sys, src, manblob)
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
return manifestOCI1FromImageIndex(ctx, sys, src, manblob)
|
||||
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
||||
}
|
||||
}
|
||||
|
||||
// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
|
||||
func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
|
||||
blobs := make([]types.BlobInfo, len(layers))
|
||||
for i, layer := range layers {
|
||||
blobs[i] = layer.BlobInfo
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation
|
||||
// converted to a specific manifest MIME type.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original genericManifest object.
|
||||
type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error)
|
||||
|
||||
// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if
|
||||
// required and re-apply the options to the converted type.
|
||||
// It returns (nil, nil) if no conversion was requested.
|
||||
func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {
|
||||
if options.ManifestMIMEType == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
converter, ok := converters[options.ManifestMIMEType]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
optionsCopy := options
|
||||
convertedManifest, err := converter(ctx, &optionsCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
convertedImage := memoryImageFromManifest(convertedManifest)
|
||||
|
||||
optionsCopy.ManifestMIMEType = ""
|
||||
return convertedImage.UpdatedImage(ctx, optionsCopy)
|
||||
}
|
||||
64
vendor/github.com/containers/image/v5/image/memory.go
generated
vendored
Normal file
64
vendor/github.com/containers/image/v5/image/memory.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// memoryImage is a mostly-implementation of types.Image assembled from data
|
||||
// created in memory, used primarily as a return value of types.Image.UpdatedImage
|
||||
// as a way to carry various structured information in a type-safe and easy-to-use way.
|
||||
// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone
|
||||
// collection of all related information, e.g. there is no way to get layer blobs
|
||||
// from a memoryImage.
|
||||
type memoryImage struct {
|
||||
genericManifest
|
||||
serializedManifest []byte // A private cache for Manifest()
|
||||
}
|
||||
|
||||
func memoryImageFromManifest(m genericManifest) types.Image {
|
||||
return &memoryImage{
|
||||
genericManifest: m,
|
||||
serializedManifest: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
func (i *memoryImage) Reference() types.ImageReference {
|
||||
// It would really be inappropriate to return the ImageReference of the image this was based on.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Size returns the size of the image as stored, if known, or -1 if not.
|
||||
func (i *memoryImage) Size() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) {
|
||||
if i.serializedManifest == nil {
|
||||
m, err := i.genericManifest.serialize()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
i.serializedManifest = m
|
||||
}
|
||||
return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil
|
||||
}
|
||||
|
||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||
func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
|
||||
// Modifying an image invalidates signatures; a caller asking the updated image for signatures
|
||||
// is probably confused.
|
||||
return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
|
||||
}
|
||||
|
||||
// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
248
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
Normal file
248
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type manifestOCI1 struct {
|
||||
src types.ImageSource // May be nil if configBlob is not nil
|
||||
configBlob []byte // If set, corresponds to contents of m.Config.
|
||||
m *manifest.OCI1
|
||||
}
|
||||
|
||||
func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
|
||||
m, err := manifest.OCI1FromManifest(manifestBlob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestOCI1{
|
||||
src: src,
|
||||
m: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
|
||||
func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
|
||||
return &manifestOCI1{
|
||||
src: src,
|
||||
configBlob: configBlob,
|
||||
m: manifest.OCI1FromComponents(config, layers),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manifestOCI1) serialize() ([]byte, error) {
|
||||
return m.m.Serialize()
|
||||
}
|
||||
|
||||
func (m *manifestOCI1) manifestMIMEType() string {
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
||||
return m.m.ConfigInfo()
|
||||
}
|
||||
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.configBlob == nil {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
computedDigest := digest.FromBytes(blob)
|
||||
if computedDigest != m.m.Config.Digest {
|
||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
|
||||
}
|
||||
m.configBlob = blob
|
||||
}
|
||||
return m.configBlob, nil
|
||||
}
|
||||
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
|
||||
cb, err := m.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configOCI, nil
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
// It returns false if the manifest does not embed a Docker reference.
|
||||
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
|
||||
func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
||||
func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
|
||||
getter := func(info types.BlobInfo) ([]byte, error) {
|
||||
if info.Digest != m.ConfigInfo().Digest {
|
||||
// Shouldn't ever happen
|
||||
return nil, errors.New("asked for a different config blob")
|
||||
}
|
||||
config, err := m.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
return m.m.Inspect(getter)
|
||||
}
|
||||
|
||||
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
|
||||
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
|
||||
// (most importantly it forces us to download the full layers even if they are already present at the destination).
|
||||
func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// UpdatedImage returns a types.Image modified according to options.
|
||||
// This does not change the state of the original Image object.
|
||||
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
|
||||
// if the combination of CompressionOperation and CompressionAlgorithm specified
|
||||
// in one or more options.LayerInfos items indicates that a layer is compressed using
|
||||
// an algorithm that is not allowed in OCI.
|
||||
func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
||||
copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
|
||||
src: m.src,
|
||||
configBlob: m.configBlob,
|
||||
m: manifest.OCI1Clone(m.m),
|
||||
}
|
||||
|
||||
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
|
||||
manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic,
|
||||
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
|
||||
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if converted != nil {
|
||||
return converted, nil
|
||||
}
|
||||
|
||||
// No conversion required, update manifest
|
||||
if options.LayerInfos != nil {
|
||||
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
}
|
||||
|
||||
func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
|
||||
return manifest.Schema2Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size,
|
||||
Digest: d.Digest,
|
||||
URLs: d.URLs,
|
||||
}
|
||||
}
|
||||
|
||||
// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestSchema1 object.
|
||||
//
|
||||
// We need this function just because a function returning an implementation of the genericManifest
|
||||
// interface is not automatically assignable to a function type returning the genericManifest interface
|
||||
func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
return m.convertToManifestSchema2(ctx, options)
|
||||
}
|
||||
|
||||
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestOCI1 object.
|
||||
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
|
||||
// Create a copy of the descriptor.
|
||||
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
|
||||
|
||||
// The only difference between OCI and DockerSchema2 is the mediatypes. The
|
||||
// media type of the manifest is handled by manifestSchema2FromComponents.
|
||||
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
|
||||
|
||||
layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
|
||||
for idx := range layers {
|
||||
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
|
||||
switch layers[idx].MediaType {
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributable:
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributableGzip:
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributableZstd:
|
||||
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
|
||||
case imgspecv1.MediaTypeImageLayer:
|
||||
layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
|
||||
case imgspecv1.MediaTypeImageLayerGzip:
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
// Rather than copying the ConfigBlob now, we just pass m.src to the
|
||||
// translated manifest, since the only difference is the mediatype of
|
||||
// descriptors there is no change to any blob stored in m.src.
|
||||
return manifestSchema2FromComponents(config, m.src, nil, layers), nil
|
||||
}
|
||||
|
||||
// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
|
||||
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
|
||||
// value.
|
||||
// This does not change the state of the original manifestOCI1 object.
|
||||
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
|
||||
// We can't directly convert to V1, but we can transitively convert via a V2 image
|
||||
m2, err := m.convertToManifestSchema2(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m2.convertToManifestSchema1(ctx, options)
|
||||
}
|
||||
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
|
||||
return true
|
||||
}
|
||||
34
vendor/github.com/containers/image/v5/image/oci_index.go
generated
vendored
Normal file
34
vendor/github.com/containers/image/v5/image/oci_index.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
||||
index, err := manifest.OCI1IndexFromManifest(manblob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing OCI1 index")
|
||||
}
|
||||
targetManifestDigest, err := index.ChooseInstance(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "choosing image instance")
|
||||
}
|
||||
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "loading manifest for target platform")
|
||||
}
|
||||
|
||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
||||
}
|
||||
|
||||
return manifestInstanceFromBlob(ctx, sys, src, manblob, mt)
|
||||
}
|
||||
104
vendor/github.com/containers/image/v5/image/sourced.go
generated
vendored
Normal file
104
vendor/github.com/containers/image/v5/image/sourced.go
generated
vendored
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
// Package image consolidates knowledge about various container image formats
|
||||
// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
|
||||
// and exposes all of them using an unified interface.
|
||||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// imageCloser implements types.ImageCloser, perhaps allowing simple users
|
||||
// to use a single object without having keep a reference to a types.ImageSource
|
||||
// only to call types.ImageSource.Close().
|
||||
type imageCloser struct {
|
||||
types.Image
|
||||
src types.ImageSource
|
||||
}
|
||||
|
||||
// FromSource returns a types.ImageCloser implementation for the default instance of source.
|
||||
// If source is a manifest list, .Manifest() still returns the manifest list,
|
||||
// but other methods transparently return data from an appropriate image instance.
|
||||
//
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
//
|
||||
// FromSource “takes ownership” of the input ImageSource and will call src.Close()
|
||||
// when the image is closed. (This does not prevent callers from using both the
|
||||
// Image and ImageSource objects simultaneously, but it means that they only need to
|
||||
// the Image.)
|
||||
//
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
|
||||
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
|
||||
img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &imageCloser{
|
||||
Image: img,
|
||||
src: src,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ic *imageCloser) Close() error {
|
||||
return ic.src.Close()
|
||||
}
|
||||
|
||||
// sourcedImage is a general set of utilities for working with container images,
|
||||
// whatever is their underlying location (i.e. dockerImageSource-independent).
|
||||
// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
|
||||
// may not be a `sourcedImage` directly. However, most users of `types.Image`
|
||||
// do not care, and those who care about `skopeo/docker.Image` know they do.
|
||||
type sourcedImage struct {
|
||||
*UnparsedImage
|
||||
manifestBlob []byte
|
||||
manifestMIMEType string
|
||||
// genericManifest contains data corresponding to manifestBlob.
|
||||
// NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
|
||||
// if you want to preserve the original manifest; use manifestBlob directly.
|
||||
genericManifest
|
||||
}
|
||||
|
||||
// FromUnparsedImage returns a types.Image implementation for unparsed.
|
||||
// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
|
||||
// but other methods transparently return data from an appropriate single image.
|
||||
//
|
||||
// The Image must not be used after the underlying ImageSource is Close()d.
|
||||
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
|
||||
// Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
|
||||
// we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
|
||||
// this is the only UnparsedImage implementation around, anyway.
|
||||
|
||||
// NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
|
||||
manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sourcedImage{
|
||||
UnparsedImage: unparsed,
|
||||
manifestBlob: manifestBlob,
|
||||
manifestMIMEType: manifestMIMEType,
|
||||
genericManifest: parsedManifest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
|
||||
func (i *sourcedImage) Size() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
|
||||
func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
|
||||
return i.manifestBlob, i.manifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
|
||||
return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
|
||||
}
|
||||
95
vendor/github.com/containers/image/v5/image/unparsed.go
generated
vendored
Normal file
95
vendor/github.com/containers/image/v5/image/unparsed.go
generated
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UnparsedImage implements types.UnparsedImage .
|
||||
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
|
||||
type UnparsedImage struct {
|
||||
src types.ImageSource
|
||||
instanceDigest *digest.Digest
|
||||
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
|
||||
// A private cache for Manifest(), may be the empty string if guessing failed.
|
||||
// Valid iff cachedManifest is not nil.
|
||||
cachedManifestMIMEType string
|
||||
cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
|
||||
}
|
||||
|
||||
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
|
||||
//
|
||||
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
|
||||
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
|
||||
return &UnparsedImage{
|
||||
src: src,
|
||||
instanceDigest: instanceDigest,
|
||||
}
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
func (i *UnparsedImage) Reference() types.ImageReference {
|
||||
// Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
|
||||
return i.src.Reference()
|
||||
}
|
||||
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
|
||||
if i.cachedManifest == nil {
|
||||
m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// ImageSource.GetManifest does not do digest verification, but we do;
|
||||
// this immediately protects also any user of types.Image.
|
||||
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
|
||||
matches, err := manifest.MatchesDigest(m, digest)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||
}
|
||||
}
|
||||
|
||||
i.cachedManifest = m
|
||||
i.cachedManifestMIMEType = mt
|
||||
}
|
||||
return i.cachedManifest, i.cachedManifestMIMEType, nil
|
||||
}
|
||||
|
||||
// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
|
||||
// The bool return value seems redundant with digest != ""; it is used explicitly
|
||||
// to refuse (unexpected) situations when the digest exists but is "".
|
||||
func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
|
||||
if i.instanceDigest != nil {
|
||||
return *i.instanceDigest, true
|
||||
}
|
||||
ref := i.Reference().DockerReference()
|
||||
if ref != nil {
|
||||
if canonical, ok := ref.(reference.Canonical); ok {
|
||||
return canonical.Digest(), true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
|
||||
if i.cachedSignatures == nil {
|
||||
sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.cachedSignatures = sigs
|
||||
}
|
||||
return i.cachedSignatures, nil
|
||||
}
|
||||
64
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
Normal file
64
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package blobinfocache
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original
|
||||
// object if it implements BlobInfoCache2, or a wrapper which discards compression information
|
||||
// if it only implements BlobInfoCache.
|
||||
func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 {
|
||||
if bic2, ok := bic.(BlobInfoCache2); ok {
|
||||
return bic2
|
||||
}
|
||||
return &v1OnlyBlobInfoCache{
|
||||
BlobInfoCache: bic,
|
||||
}
|
||||
}
|
||||
|
||||
type v1OnlyBlobInfoCache struct {
|
||||
types.BlobInfoCache
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of
|
||||
// types.BICReplacementCandidate, dropping compression information.
|
||||
func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate {
|
||||
candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates))
|
||||
for _, c := range v2candidates {
|
||||
candidates = append(candidates, types.BICReplacementCandidate{
|
||||
Digest: c.Digest,
|
||||
Location: c.Location,
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
|
||||
// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
|
||||
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
|
||||
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
|
||||
// upon success.
|
||||
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) {
|
||||
switch compressorName {
|
||||
case Uncompressed:
|
||||
return types.Decompress, nil, nil
|
||||
case UnknownCompression:
|
||||
return types.PreserveOriginal, nil, nil
|
||||
default:
|
||||
algo, err := compression.AlgorithmByName(compressorName)
|
||||
if err == nil {
|
||||
return types.Compress, &algo, nil
|
||||
}
|
||||
return types.PreserveOriginal, nil, err
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
Normal file
45
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package blobinfocache
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// Uncompressed is the value we store in a blob info cache to indicate that we know that
|
||||
// the blob in the corresponding location is not compressed.
|
||||
Uncompressed = "uncompressed"
|
||||
// UnknownCompression is the value we store in a blob info cache to indicate that we don't
|
||||
// know if the blob in the corresponding location is compressed (and if so, how) or not.
|
||||
UnknownCompression = "unknown"
|
||||
)
|
||||
|
||||
// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind
|
||||
// of compression was applied to the blobs it keeps information about.
|
||||
type BlobInfoCache2 interface {
|
||||
types.BlobInfoCache
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
|
||||
}
|
||||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||
Location types.BICLocationReference
|
||||
}
|
||||
69
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
Normal file
69
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
package imagedestination
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// FromPublic(dest) returns an object that provides the private.ImageDestination API
|
||||
//
|
||||
// Eventually, we might want to expose this function, and methods of the returned object,
|
||||
// as a public API (or rather, a variant that does not include the already-superseded
|
||||
// methods of types.ImageDestination, and has added more future-proofing), and more strongly
|
||||
// deprecate direct use of types.ImageDestination.
|
||||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
func FromPublic(dest types.ImageDestination) private.ImageDestination {
|
||||
if dest2, ok := dest.(private.ImageDestination); ok {
|
||||
return dest2
|
||||
}
|
||||
return &wrapped{ImageDestination: dest}
|
||||
}
|
||||
|
||||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
types.ImageDestination
|
||||
}
|
||||
|
||||
// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
|
||||
func (w *wrapped) SupportsPutBlobPartial() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
|
||||
}
|
||||
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
|
||||
return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name())
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
||||
}
|
||||
47
vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
generated
vendored
Normal file
47
vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package imagesource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// FromPublic(src) returns an object that provides the private.ImageSource API
|
||||
//
|
||||
// Eventually, we might want to expose this function, and methods of the returned object,
|
||||
// as a public API (or rather, a variant that does not include the already-superseded
|
||||
// methods of types.ImageSource, and has added more future-proofing), and more strongly
|
||||
// deprecate direct use of types.ImageSource.
|
||||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
func FromPublic(src types.ImageSource) private.ImageSource {
|
||||
if src2, ok := src.(private.ImageSource); ok {
|
||||
return src2
|
||||
}
|
||||
return &wrapped{ImageSource: src}
|
||||
}
|
||||
|
||||
// wrapped provides the private.ImageSource operations
|
||||
// for a source that only implements types.ImageSource
|
||||
type wrapped struct {
|
||||
types.ImageSource
|
||||
}
|
||||
|
||||
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
|
||||
func (w *wrapped) SupportsGetBlobAt() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name())
|
||||
}
|
||||
59
vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
generated
vendored
Normal file
59
vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
package iolimits
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// All constants below are intended to be used as limits for `ReadAtMost`. The
|
||||
// immediate use-case for limiting the size of in-memory copied data is to
|
||||
// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of
|
||||
// copying data until running out of memory, we error out after hitting the
|
||||
// specified limit.
|
||||
const (
|
||||
// megaByte denotes one megabyte and is intended to be used as a limit in
|
||||
// `ReadAtMost`.
|
||||
megaByte = 1 << 20
|
||||
// MaxManifestBodySize is the maximum allowed size of a manifest. The limit
|
||||
// of 4 MB aligns with the one of a Docker registry:
|
||||
// https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30
|
||||
MaxManifestBodySize = 4 * megaByte
|
||||
// MaxAuthTokenBodySize is the maximum allowed size of an auth token.
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxAuthTokenBodySize = megaByte
|
||||
// MaxSignatureListBodySize is the maximum allowed size of a signature list.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxSignatureListBodySize = 4 * megaByte
|
||||
// MaxSignatureBodySize is the maximum allowed size of a signature.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxSignatureBodySize = 4 * megaByte
|
||||
// MaxErrorBodySize is the maximum allowed size of an error-response body.
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxErrorBodySize = megaByte
|
||||
// MaxConfigBodySize is the maximum allowed size of a config blob.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxConfigBodySize = 4 * megaByte
|
||||
// MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxOpenShiftStatusBody = 4 * megaByte
|
||||
// MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images)
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxTarFileManifestSize = megaByte
|
||||
)
|
||||
|
||||
// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded.
|
||||
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
|
||||
limitedReader := io.LimitReader(reader, int64(limit+1))
|
||||
|
||||
res, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(res) > limit {
|
||||
return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
196
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
Normal file
196
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
package platform
|
||||
|
||||
// Largely based on
|
||||
// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go
|
||||
// Copyright 2012-2017 Docker, Inc.
|
||||
//
|
||||
// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go
|
||||
// Copyright The containerd Authors.
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// For Linux, the kernel has already detected the ABI, ISA and Features.
|
||||
// So we don't need to access the ARM registers to detect platform information
|
||||
// by ourselves. We can just parse these information from /proc/cpuinfo
|
||||
func getCPUInfo(pattern string) (info string, err error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS)
|
||||
}
|
||||
|
||||
cpuinfo, err := os.Open("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cpuinfo.Close()
|
||||
|
||||
// Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
|
||||
// the first core is enough.
|
||||
scanner := bufio.NewScanner(cpuinfo)
|
||||
for scanner.Scan() {
|
||||
newline := scanner.Text()
|
||||
list := strings.Split(newline, ":")
|
||||
|
||||
if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
|
||||
return strings.TrimSpace(list[1]), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether the scanner encountered errors
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
|
||||
}
|
||||
|
||||
func getCPUVariantWindows(arch string) string {
|
||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
// runtime.GOARCH to determine the variants
|
||||
var variant string
|
||||
switch arch {
|
||||
case "arm64":
|
||||
variant = "v8"
|
||||
case "arm":
|
||||
variant = "v7"
|
||||
default:
|
||||
variant = ""
|
||||
}
|
||||
|
||||
return variant
|
||||
}
|
||||
|
||||
func getCPUVariantArm() string {
|
||||
variant, err := getCPUInfo("Cpu architecture")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
// TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286)
|
||||
|
||||
switch strings.ToLower(variant) {
|
||||
case "8", "aarch64":
|
||||
variant = "v8"
|
||||
case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
|
||||
variant = "v7"
|
||||
case "6", "6tej":
|
||||
variant = "v6"
|
||||
case "5", "5t", "5te", "5tej":
|
||||
variant = "v5"
|
||||
case "4", "4t":
|
||||
variant = "v4"
|
||||
case "3":
|
||||
variant = "v3"
|
||||
default:
|
||||
variant = ""
|
||||
}
|
||||
|
||||
return variant
|
||||
}
|
||||
|
||||
func getCPUVariant(os string, arch string) string {
|
||||
if os == "windows" {
|
||||
return getCPUVariantWindows(arch)
|
||||
}
|
||||
if arch == "arm" || arch == "arm64" {
|
||||
return getCPUVariantArm()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// compatibility contains, for a specified architecture, a list of known variants, in the
|
||||
// order from most capable (most restrictive) to least capable (most compatible).
|
||||
// Architectures that don’t have variants should not have an entry here.
|
||||
var compatibility = map[string][]string{
|
||||
"arm": {"v8", "v7", "v6", "v5"},
|
||||
"arm64": {"v8"},
|
||||
}
|
||||
|
||||
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
|
||||
// the most compatible platform is first.
|
||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
wantedArch := runtime.GOARCH
|
||||
wantedVariant := ""
|
||||
if ctx != nil && ctx.ArchitectureChoice != "" {
|
||||
wantedArch = ctx.ArchitectureChoice
|
||||
} else {
|
||||
// Only auto-detect the variant if we are using the default architecture.
|
||||
// If the user has specified the ArchitectureChoice, don't autodetect, even if
|
||||
// ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH
|
||||
// value is relevant to the use case, and if we do autodetect a variant,
|
||||
// ctx.VariantChoice can't be used to override it back to "".
|
||||
wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
if ctx != nil && ctx.VariantChoice != "" {
|
||||
wantedVariant = ctx.VariantChoice
|
||||
}
|
||||
|
||||
wantedOS := runtime.GOOS
|
||||
if ctx != nil && ctx.OSChoice != "" {
|
||||
wantedOS = ctx.OSChoice
|
||||
}
|
||||
|
||||
var variants []string = nil
|
||||
if wantedVariant != "" {
|
||||
// If the user requested a specific variant, we'll walk down
|
||||
// the list from most to least compatible.
|
||||
if compatibility[wantedArch] != nil {
|
||||
variantOrder := compatibility[wantedArch]
|
||||
for i, v := range variantOrder {
|
||||
if wantedVariant == v {
|
||||
variants = variantOrder[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if variants == nil {
|
||||
// user wants a variant which we know nothing about - not even compatibility
|
||||
variants = []string{wantedVariant}
|
||||
}
|
||||
// Make sure to have a candidate with an empty variant as well.
|
||||
variants = append(variants, "")
|
||||
} else {
|
||||
// Make sure to have a candidate with an empty variant as well.
|
||||
variants = append(variants, "")
|
||||
// If available add the entire compatibility matrix for the specific architecture.
|
||||
if possibleVariants, ok := compatibility[wantedArch]; ok {
|
||||
variants = append(variants, possibleVariants...)
|
||||
}
|
||||
}
|
||||
|
||||
res := make([]imgspecv1.Platform, 0, len(variants))
|
||||
for _, v := range variants {
|
||||
res = append(res, imgspecv1.Platform{
|
||||
OS: wantedOS,
|
||||
Architecture: wantedArch,
|
||||
Variant: v,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
|
||||
// an item from the return value of WantedPlatforms.
|
||||
func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool {
|
||||
return image.Architecture == wanted.Architecture &&
|
||||
image.OS == wanted.OS &&
|
||||
image.Variant == wanted.Variant
|
||||
}
|
||||
106
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
Normal file
106
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
package private
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// ImageSource is an internal extension to the types.ImageSource interface.
|
||||
type ImageSource interface {
|
||||
types.ImageSource
|
||||
|
||||
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
|
||||
SupportsGetBlobAt() bool
|
||||
// BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
|
||||
BlobChunkAccessor
|
||||
}
|
||||
|
||||
// ImageDestination is an internal extension to the types.ImageDestination
|
||||
// interface.
|
||||
type ImageDestination interface {
|
||||
types.ImageDestination
|
||||
|
||||
// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
|
||||
SupportsPutBlobPartial() bool
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
|
||||
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error)
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
|
||||
}
|
||||
|
||||
// PutBlobOptions are used in PutBlobWithOptions.
|
||||
type PutBlobOptions struct {
|
||||
Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos.
|
||||
IsConfig bool // True if the blob is a config
|
||||
|
||||
// The following fields are new to internal/private. Users of internal/private MUST fill them in,
|
||||
// but they also must expect that they will be ignored by types.ImageDestination transports.
|
||||
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
}
|
||||
|
||||
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
|
||||
type TryReusingBlobOptions struct {
|
||||
Cache types.BlobInfoCache // Cache to use and/or update.
|
||||
// If true, it is allowed to use an equivalent of the desired blob;
|
||||
// in that case the returned info may not match the input.
|
||||
CanSubstitute bool
|
||||
|
||||
// The following fields are new to internal/private. Users of internal/private MUST fill them in,
|
||||
// but they also must expect that they will be ignored by types.ImageDestination transports.
|
||||
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
Offset uint64
|
||||
Length uint64
|
||||
}
|
||||
|
||||
// BlobChunkAccessor allows fetching discontiguous chunks of a blob.
|
||||
type BlobChunkAccessor interface {
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||
}
|
||||
|
||||
// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request.
|
||||
type BadPartialRequestError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
func (e BadPartialRequestError) Error() string {
|
||||
return e.Status
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue