container: add support for uploading to registries
Add a new generic container registry client via a new `container` package. Use this to create a command line utility as well as a new upload target for container registries. The code uses the github.com/containers/* project and packages to interact with container registires that is also used by skopeo, podman et al. One if the dependencies is `proglottis/gpgme` that is using cgo to bind libgpgme, so we have to add the corresponding devel package to the BuildRequires as well as installing it on CI. Checks will follow later via an integration test.
This commit is contained in:
parent
d136a075bc
commit
986f076276
955 changed files with 164203 additions and 2549 deletions
393
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
Normal file
393
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
Normal file
|
|
@ -0,0 +1,393 @@
|
|||
// Package boltdb implements a BlobInfoCache backed by BoltDB.
|
||||
package boltdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
|
||||
// we can simply start over with a different filename; update blobInfoCacheFilename.
|
||||
|
||||
// FIXME: For CRI-O, does this need to hide information between different users?
|
||||
|
||||
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
||||
uncompressedDigestBucket = []byte("uncompressedDigest")
|
||||
// digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed
|
||||
// It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
|
||||
digestCompressorBucket = []byte("digestCompressor")
|
||||
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
||||
// (as a set of key=digest, value="" pairs)
|
||||
digestByUncompressedBucket = []byte("digestByUncompressed")
|
||||
// knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
|
||||
// a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
|
||||
knownLocationsBucket = []byte("knownLocations")
|
||||
)
|
||||
|
||||
// Concurrency:
|
||||
// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
|
||||
// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
|
||||
|
||||
// pathLock contains a lock for a specific BoltDB database path.
|
||||
type pathLock struct {
|
||||
refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
|
||||
mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
|
||||
}
|
||||
|
||||
var (
|
||||
// pathLocks contains a lock for each currently open file.
|
||||
// This must be global so that independently created instances of boltDBCache exclude each other.
|
||||
// The map is protected by pathLocksMutex.
|
||||
// FIXME? Should this be based on device:inode numbers instead of paths instead?
|
||||
pathLocks = map[string]*pathLock{}
|
||||
pathLocksMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
// lockPath obtains the pathLock for path.
|
||||
// The caller must call unlockPath eventually.
|
||||
func lockPath(path string) {
|
||||
pl := func() *pathLock { // A scope for defer
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if ok {
|
||||
pl.refCount++
|
||||
} else {
|
||||
pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
|
||||
pathLocks[path] = pl
|
||||
}
|
||||
return pl
|
||||
}()
|
||||
pl.mutex.Lock()
|
||||
}
|
||||
|
||||
// unlockPath releases the pathLock for path.
|
||||
func unlockPath(path string) {
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if !ok {
|
||||
// Should this return an error instead? BlobInfoCache ultimately ignores errors…
|
||||
panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
|
||||
}
|
||||
pl.mutex.Unlock()
|
||||
pl.refCount--
|
||||
if pl.refCount == 0 {
|
||||
delete(pathLocks, path)
|
||||
}
|
||||
}
|
||||
|
||||
// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
//
|
||||
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
|
||||
// users; instead, we need to open/close it for every single write or lookup.
|
||||
type cache struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
//
|
||||
// Most users should call blobinfocache.DefaultCache instead.
|
||||
func New(path string) types.BlobInfoCache {
|
||||
return new2(path)
|
||||
}
|
||||
func new2(path string) *cache {
|
||||
return &cache{path: path}
|
||||
}
|
||||
|
||||
// view returns runs the specified fn within a read-only transaction on the database.
|
||||
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
|
||||
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
|
||||
// a read lock, blocking any future writes.
|
||||
// Hence this preliminary check, which is RACY: Another process could remove the file
|
||||
// between the Lstat call and opening the database.
|
||||
if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.View(fn)
|
||||
}
|
||||
|
||||
// update returns runs the specified fn within a read-write transaction on the database.
|
||||
func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.Update(fn)
|
||||
}
|
||||
|
||||
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
|
||||
func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
|
||||
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
|
||||
d, err := digest.Parse(string(uncompressedBytes))
|
||||
if err == nil {
|
||||
return d
|
||||
}
|
||||
// FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
}
|
||||
// Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if b := tx.Bucket(digestByUncompressedBucket); b != nil {
|
||||
if b = b.Bucket([]byte(anyDigest.String())); b != nil {
|
||||
c := b.Cursor()
|
||||
if k, _ := c.First(); k != nil { // The bucket is non-empty
|
||||
return anyDigest
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
var res digest.Digest
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
res = bdc.uncompressedDigest(tx, anyDigest)
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
previous, err := digest.Parse(string(previousBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
|
||||
// compressor, or is blobinfocache.Uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
if string(previousBytes) != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
|
||||
}
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
return b.Delete(key)
|
||||
}
|
||||
return b.Put(key, []byte(compressorName))
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := time.Now().MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
|
||||
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
digestKey := []byte(digest.String())
|
||||
b := scopeBucket.Bucket(digestKey)
|
||||
if b == nil {
|
||||
return candidates
|
||||
}
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if compressionBucket != nil {
|
||||
// the bucket won't exist if the cache was created by a v1 implementation and
|
||||
// hasn't yet been updated by a v2 implementation
|
||||
if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
|
||||
compressorName = string(compressorNameValue)
|
||||
}
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo {
|
||||
return candidates
|
||||
}
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
t := time.Time{}
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: types.BICLocationReference{Opaque: string(k)},
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
var uncompressedDigestValue digest.Digest // = ""
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(knownLocationsBucket)
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
// compressionBucket won't have been created if previous writers never recorded info about compression,
|
||||
// and we don't want to fail just because of that
|
||||
compressionBucket := tx.Bucket(digestCompressorBucket)
|
||||
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo)
|
||||
if canSubstitute {
|
||||
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
||||
b := tx.Bucket(digestByUncompressedBucket)
|
||||
if b != nil {
|
||||
b = b.Bucket([]byte(uncompressedDigestValue.String()))
|
||||
if b != nil {
|
||||
if err := b.ForEach(func(k, _ []byte) error {
|
||||
d, err := digest.Parse(string(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d != primaryDigest && d != uncompressedDigestValue {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigestValue != primaryDigest {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
}
|
||||
66
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
Normal file
66
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
package blobinfocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/boltdb"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/memory"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// blobInfoCacheFilename is the file name used for blob info caches.
|
||||
// If the format changes in an incompatible way, increase the version number.
|
||||
blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
|
||||
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
|
||||
systemBlobInfoCacheDir = "/var/lib/containers/cache"
|
||||
)
|
||||
|
||||
// blobInfoCacheDir returns a path to a blob info cache appropriate for sys and euid.
|
||||
// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
|
||||
func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
||||
if sys != nil && sys.BlobInfoCacheDir != "" {
|
||||
return sys.BlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
|
||||
// and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
|
||||
if euid == 0 {
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
|
||||
}
|
||||
return systemBlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
|
||||
dataDir := os.Getenv("XDG_DATA_HOME")
|
||||
if dataDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
|
||||
}
|
||||
dataDir = filepath.Join(home, ".local", "share")
|
||||
}
|
||||
return filepath.Join(dataDir, "containers", "cache"), nil
|
||||
}
|
||||
|
||||
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
||||
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||
dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID())
|
||||
if err != nil {
|
||||
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
||||
return memory.New()
|
||||
}
|
||||
path := filepath.Join(dir, blobInfoCacheFilename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
|
||||
return memory.New()
|
||||
}
|
||||
|
||||
logrus.Debugf("Using blob info cache at %s", path)
|
||||
return boltdb.New(path)
|
||||
}
|
||||
110
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
Normal file
110
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
// Package prioritize provides utilities for prioritizing locations in
|
||||
// types.BlobInfoCache.CandidateLocations.
|
||||
package prioritize
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
type candidateSortState struct {
|
||||
cs []CandidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Len() int {
|
||||
return len(css.cs)
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Less(i, j int) bool {
|
||||
xi := css.cs[i]
|
||||
xj := css.cs[j]
|
||||
|
||||
// primaryDigest entries come first, more recent first.
|
||||
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
}
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
}
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.Candidate.Digest < xj.Candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
|
||||
resLength := len(cs)
|
||||
if resLength > maxCandidates {
|
||||
resLength = maxCandidates
|
||||
}
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].Candidate
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
}
|
||||
186
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
Normal file
186
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
// Package memory implements an in-memory BlobInfoCache.
|
||||
package memory
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// locationKey only exists to make lookup in knownLocations easier.
|
||||
type locationKey struct {
|
||||
transport string
|
||||
scope types.BICTransportScope
|
||||
blobDigest digest.Digest
|
||||
}
|
||||
|
||||
// cache implements an in-memory-only BlobInfoCache
|
||||
type cache struct {
|
||||
mutex sync.Mutex
|
||||
// The following fields can only be accessed with mutex held.
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
//
|
||||
// This is primarily intended for tests, but also used as a fallback
|
||||
// if blobinfocache.DefaultCache can’t determine, or set up, the
|
||||
// location for a persistent cache. Most users should use
|
||||
// blobinfocache.DefaultCache. instead of calling this directly.
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use
|
||||
// this instead of a persistent cache.
|
||||
func New() types.BlobInfoCache {
|
||||
return new2()
|
||||
}
|
||||
|
||||
func new2() *cache {
|
||||
return &cache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
return mem.uncompressedDigestLocked(anyDigest)
|
||||
}
|
||||
|
||||
// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
|
||||
func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
|
||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||
return d
|
||||
}
|
||||
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
|
||||
return anyDigest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
mem.uncompressedDigests[anyDigest] = uncompressed
|
||||
|
||||
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
|
||||
if !ok {
|
||||
anyDigestSet = map[digest.Digest]struct{}{}
|
||||
mem.digestsByUncompressed[uncompressed] = anyDigestSet
|
||||
}
|
||||
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||
locationScope, ok := mem.knownLocations[key]
|
||||
if !ok {
|
||||
locationScope = map[types.BICLocationReference]time.Time{}
|
||||
mem.knownLocations[key] = locationScope
|
||||
}
|
||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
|
||||
// algorithm, or uncompressed, or that we no longer know.
|
||||
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, blobDigest)
|
||||
return
|
||||
}
|
||||
mem.compressors[blobDigest] = compressorName
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
compressorName, compressorKnown := mem.compressors[digest]
|
||||
if !compressorKnown {
|
||||
if requireCompressionInfo {
|
||||
continue
|
||||
}
|
||||
compressorName = blobinfocache.UnknownCompression
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
}
|
||||
50
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
Normal file
50
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
// Package none implements a dummy BlobInfoCache which records no data.
|
||||
package none
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// noCache implements a dummy BlobInfoCache which records no data.
|
||||
type noCache struct {
|
||||
}
|
||||
|
||||
// NoCache implements BlobInfoCache by not recording any data.
|
||||
//
|
||||
// This exists primarily for implementations of configGetter for
|
||||
// Manifest.Inspect, because configs only have one representation.
|
||||
// Any use of BlobInfoCache with blobs should usually use at least a
|
||||
// short-lived cache, ideally blobinfocache.DefaultCache.
|
||||
var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{})
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return nil
|
||||
}
|
||||
166
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
Normal file
166
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
package compression
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/pkg/compression/internal"
|
||||
"github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm = types.Algorithm
|
||||
|
||||
var (
|
||||
// Gzip compression.
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
|
||||
[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
|
||||
// Bzip2 compression.
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
|
||||
[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
|
||||
// Xz compression.
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
|
||||
[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
|
||||
// Zstd compression.
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
|
||||
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
||||
// Zstd:chunked compression.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
|
||||
nil, ZstdDecompressor, compressor.ZstdCompressor)
|
||||
|
||||
compressionAlgorithms = map[string]Algorithm{
|
||||
Gzip.Name(): Gzip,
|
||||
Bzip2.Name(): Bzip2,
|
||||
Xz.Name(): Xz,
|
||||
Zstd.Name(): Zstd,
|
||||
ZstdChunked.Name(): ZstdChunked,
|
||||
}
|
||||
)
|
||||
|
||||
// AlgorithmByName returns the compressor by its name
|
||||
func AlgorithmByName(name string) (Algorithm, error) {
|
||||
algorithm, ok := compressionAlgorithms[name]
|
||||
if ok {
|
||||
return algorithm, nil
|
||||
}
|
||||
return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name)
|
||||
}
|
||||
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||
type DecompressorFunc = internal.DecompressorFunc
|
||||
|
||||
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
|
||||
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return pgzip.NewReader(r)
|
||||
}
|
||||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return io.NopCloser(bzip2.NewReader(r)), nil
|
||||
}
|
||||
|
||||
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
|
||||
func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
r, err := xz.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return io.NopCloser(r), nil
|
||||
}
|
||||
|
||||
// gzipCompressor is a CompressorFunc for the gzip compression algorithm.
|
||||
func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||
if level != nil {
|
||||
return pgzip.NewWriterLevel(r, *level)
|
||||
}
|
||||
return pgzip.NewWriter(r), nil
|
||||
}
|
||||
|
||||
// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm.
|
||||
func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||
return nil, fmt.Errorf("bzip2 compression not supported")
|
||||
}
|
||||
|
||||
// xzCompressor is a CompressorFunc for the xz compression algorithm.
|
||||
func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||
return xz.NewWriter(r)
|
||||
}
|
||||
|
||||
// CompressStream returns the compressor by its name
|
||||
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
|
||||
m := map[string]string{}
|
||||
return internal.AlgorithmCompressor(algo)(dest, m, level)
|
||||
}
|
||||
|
||||
// CompressStreamWithMetadata returns the compressor by its name. If the compression
|
||||
// generates any metadata, it is written to the provided metadata map.
|
||||
func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) {
|
||||
return internal.AlgorithmCompressor(algo)(dest, metadata, level)
|
||||
}
|
||||
|
||||
// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid
|
||||
// value and nil otherwise.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) {
|
||||
buffer := [8]byte{}
|
||||
|
||||
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
|
||||
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
|
||||
return Algorithm{}, nil, nil, err
|
||||
}
|
||||
|
||||
var retAlgo Algorithm
|
||||
var decompressor DecompressorFunc
|
||||
for _, algo := range compressionAlgorithms {
|
||||
prefix := internal.AlgorithmPrefix(algo)
|
||||
if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) {
|
||||
logrus.Debugf("Detected compression format %s", algo.Name())
|
||||
retAlgo = algo
|
||||
decompressor = internal.AlgorithmDecompressor(algo)
|
||||
break
|
||||
}
|
||||
}
|
||||
if decompressor == nil {
|
||||
logrus.Debugf("No compression detected")
|
||||
}
|
||||
|
||||
return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
}
|
||||
|
||||
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
_, d, r, e := DetectCompressionFormat(input)
|
||||
return d, r, e
|
||||
}
|
||||
|
||||
// AutoDecompress takes a stream and returns an uncompressed version of the
|
||||
// same stream.
|
||||
// The caller must call Close() on the returned stream (even if the input does not need,
|
||||
// or does not even support, closing!).
|
||||
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
|
||||
decompressor, stream, err := DetectCompression(stream)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "detecting compression")
|
||||
}
|
||||
var res io.ReadCloser
|
||||
if decompressor != nil {
|
||||
res, err = decompressor(stream)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "initializing decompression")
|
||||
}
|
||||
} else {
|
||||
res = io.NopCloser(stream)
|
||||
}
|
||||
return res, decompressor != nil, nil
|
||||
}
|
||||
65
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
Normal file
65
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package internal
|
||||
|
||||
import "io"
|
||||
|
||||
// CompressorFunc writes the compressed stream to the given writer using the specified compression level.
|
||||
// The caller must call Close() on the stream (even if the input stream does not need closing!).
|
||||
type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error)
|
||||
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||
type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
mime string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
}
|
||||
|
||||
// NewAlgorithm creates an Algorithm instance.
|
||||
// This function exists so that Algorithm instances can only be created by code that
|
||||
// is allowed to import this internal subpackage.
|
||||
func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
return Algorithm{
|
||||
name: name,
|
||||
mime: mime,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name for the compression algorithm.
|
||||
func (c Algorithm) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// InternalUnstableUndocumentedMIMEQuestionMark ???
|
||||
// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
|
||||
func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
||||
return c.mime
|
||||
}
|
||||
|
||||
// AlgorithmCompressor returns the compressor field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
func AlgorithmCompressor(algo Algorithm) CompressorFunc {
|
||||
return algo.compressor
|
||||
}
|
||||
|
||||
// AlgorithmDecompressor returns the decompressor field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
func AlgorithmDecompressor(algo Algorithm) DecompressorFunc {
|
||||
return algo.decompressor
|
||||
}
|
||||
|
||||
// AlgorithmPrefix returns the prefix field of algo.
|
||||
// This is a function instead of a public method so that it is only callable from by code
|
||||
// that is allowed to import this internal subpackage.
|
||||
func AlgorithmPrefix(algo Algorithm) []byte {
|
||||
return algo.prefix
|
||||
}
|
||||
41
vendor/github.com/containers/image/v5/pkg/compression/types/types.go
generated
vendored
Normal file
41
vendor/github.com/containers/image/v5/pkg/compression/types/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/pkg/compression/internal"
|
||||
)
|
||||
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||
type DecompressorFunc = internal.DecompressorFunc
|
||||
|
||||
// Algorithm is a compression algorithm provided and supported by pkg/compression.
|
||||
// It can’t be supplied from the outside.
|
||||
type Algorithm = internal.Algorithm
|
||||
|
||||
const (
|
||||
// GzipAlgorithmName is the name used by pkg/compression.Gzip.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
GzipAlgorithmName = "gzip"
|
||||
// Bzip2AlgorithmName is the name used by pkg/compression.Bzip2.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
Bzip2AlgorithmName = "bzip2"
|
||||
// XzAlgorithmName is the name used by pkg/compression.Xz.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
XzAlgorithmName = "Xz"
|
||||
// ZstdAlgorithmName is the name used by pkg/compression.Zstd.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
ZstdAlgorithmName = "zstd"
|
||||
// ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked.
|
||||
// NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm
|
||||
// will actually be available. (In fact it is intended for this types package not to depend
|
||||
// on any of the implementations.)
|
||||
ZstdChunkedAlgorithmName = "zstd:chunked"
|
||||
)
|
||||
59
vendor/github.com/containers/image/v5/pkg/compression/zstd.go
generated
vendored
Normal file
59
vendor/github.com/containers/image/v5/pkg/compression/zstd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
package compression
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
type wrapperZstdDecoder struct {
|
||||
decoder *zstd.Decoder
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) Close() error {
|
||||
w.decoder.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
return w.decoder.DecodeAll(input, dst)
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) Read(p []byte) (int, error) {
|
||||
return w.decoder.Read(p)
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) Reset(r io.Reader) error {
|
||||
return w.decoder.Reset(r)
|
||||
}
|
||||
|
||||
func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) {
|
||||
return w.decoder.WriteTo(wr)
|
||||
}
|
||||
|
||||
func zstdReader(buf io.Reader) (io.ReadCloser, error) {
|
||||
decoder, err := zstd.NewReader(buf)
|
||||
return &wrapperZstdDecoder{decoder: decoder}, err
|
||||
}
|
||||
|
||||
func zstdWriter(dest io.Writer) (io.WriteCloser, error) {
|
||||
return zstd.NewWriter(dest)
|
||||
}
|
||||
|
||||
func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
|
||||
el := zstd.EncoderLevelFromZstd(level)
|
||||
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
|
||||
}
|
||||
|
||||
// zstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||
func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||
if level == nil {
|
||||
return zstdWriter(r)
|
||||
}
|
||||
return zstdWriterWithLevel(r, *level)
|
||||
}
|
||||
|
||||
// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm.
|
||||
func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return zstdReader(r)
|
||||
}
|
||||
802
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
Normal file
802
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
Normal file
|
|
@ -0,0 +1,802 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type dockerAuthConfig struct {
|
||||
Auth string `json:"auth,omitempty"`
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
}
|
||||
|
||||
type dockerConfigFile struct {
|
||||
AuthConfigs map[string]dockerAuthConfig `json:"auths"`
|
||||
CredHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||
}
|
||||
|
||||
type authPath struct {
|
||||
path string
|
||||
legacyFormat bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
|
||||
xdgConfigHomePath = filepath.FromSlash("containers/auth.json")
|
||||
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
|
||||
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
||||
dockerLegacyHomePath = ".dockercfg"
|
||||
nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json")
|
||||
|
||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||
// that they are trying to logout of
|
||||
ErrNotLoggedIn = errors.New("not logged in")
|
||||
// ErrNotSupported is returned for unsupported methods
|
||||
ErrNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
// SetCredentials stores the username and password in a location
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
// Returns a human-redable description of the location that was updated.
|
||||
// NOTE: The return value is only intended to be read by humans; its form is not an API,
|
||||
// it may change (or new forms can be added) any time.
|
||||
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
|
||||
isNamespaced, err := validateKey(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make sure to collect all errors.
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var desc string
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helpers for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
desc, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if ch, exists := auths.CredHelpers[key]; exists {
|
||||
if isNamespaced {
|
||||
return false, unsupportedNamespaceErr(ch)
|
||||
}
|
||||
return false, setAuthToCredHelper(ch, key, username, password)
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
newCreds := dockerAuthConfig{Auth: creds}
|
||||
auths.AuthConfigs[key] = newCreds
|
||||
return true, nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
if isNamespaced {
|
||||
err = unsupportedNamespaceErr(helper)
|
||||
} else {
|
||||
desc = fmt.Sprintf("credential helper: %s", helper)
|
||||
err = setAuthToCredHelper(helper, key, username, password)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
|
||||
return desc, nil
|
||||
}
|
||||
return "", multiErr
|
||||
}
|
||||
|
||||
func unsupportedNamespaceErr(helper string) error {
|
||||
return errors.Errorf("namespaced key is not supported for credential helper %s", helper)
|
||||
}
|
||||
|
||||
// SetAuthentication stores the username and password in the credential helper or file
|
||||
// See the documentation of SetCredentials for format of "key"
|
||||
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
|
||||
_, err := SetCredentials(sys, key, username, password)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAllCredentials returns the registry credentials for all registries stored
|
||||
// in any of the configured credential helpers.
|
||||
func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
|
||||
// To keep things simple, let's first extract all registries from all
|
||||
// possible sources, and then call `GetCredentials` on them. That
|
||||
// prevents us from having to reverse engineer the logic in
|
||||
// `GetCredentials`.
|
||||
allKeys := make(map[string]bool)
|
||||
addKey := func(s string) {
|
||||
allKeys[s] = true
|
||||
}
|
||||
|
||||
// To use GetCredentials, we must at least convert the URL forms into host names.
|
||||
// While we're at it, we’ll also canonicalize docker.io to the standard format.
|
||||
normalizedDockerIORegistry := normalizeRegistry("docker.io")
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, helper := range helpers {
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
for _, path := range getAuthFilePaths(sys, homedir.Get()) {
|
||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||
auths, err := readJSONFile(path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reading JSON file %q", path.path)
|
||||
}
|
||||
// Credential helpers in the auth file have a
|
||||
// direct mapping to a registry, so we can just
|
||||
// walk the map.
|
||||
for registry := range auths.CredHelpers {
|
||||
addKey(registry)
|
||||
}
|
||||
for key := range auths.AuthConfigs {
|
||||
key := normalizeAuthFileKey(key, path.legacyFormat)
|
||||
if key == normalizedDockerIORegistry {
|
||||
key = "docker.io"
|
||||
}
|
||||
addKey(key)
|
||||
}
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err := listAuthsFromCredHelper(helper)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err)
|
||||
}
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
for registry := range creds {
|
||||
addKey(registry)
|
||||
}
|
||||
case exec.ErrNotFound:
|
||||
// It's okay if the helper doesn't exist.
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now use `GetCredentials` to the specific auth configs for each
|
||||
// previously listed registry.
|
||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||
for key := range allKeys {
|
||||
authConf, err := GetCredentials(sys, key)
|
||||
if err != nil {
|
||||
// Note: we rely on the logging in `GetCredentials`.
|
||||
return nil, err
|
||||
}
|
||||
if authConf != (types.DockerAuthConfig{}) {
|
||||
authConfigs[key] = authConf
|
||||
}
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
}
|
||||
|
||||
// getAuthFilePaths returns a slice of authPaths based on the system context
|
||||
// in the order they should be searched. Note that some paths may not exist.
|
||||
// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden
|
||||
// by tests.
|
||||
func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
|
||||
paths := []authPath{}
|
||||
pathToAuth, lf, err := getPathToAuth(sys)
|
||||
if err == nil {
|
||||
paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf})
|
||||
} else {
|
||||
// Error means that the path set for XDG_RUNTIME_DIR does not exist
|
||||
// but we don't want to completely fail in the case that the user is pulling a public image
|
||||
// Logging the error as a warning instead and moving on to pulling the image
|
||||
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
|
||||
}
|
||||
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
|
||||
if xdgCfgHome == "" {
|
||||
xdgCfgHome = filepath.Join(homeDir, ".config")
|
||||
}
|
||||
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
|
||||
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
|
||||
paths = append(paths,
|
||||
authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false},
|
||||
)
|
||||
} else {
|
||||
paths = append(paths,
|
||||
authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
|
||||
)
|
||||
}
|
||||
paths = append(paths,
|
||||
authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
|
||||
)
|
||||
return paths
|
||||
}
|
||||
|
||||
// GetCredentials returns the registry credentials matching key, appropriate for
|
||||
// sys and the users’ configuration.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname.
|
||||
//
|
||||
// GetCredentialsForRef should almost always be used in favor of this API.
|
||||
func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) {
|
||||
return getCredentialsWithHomeDir(sys, key, homedir.Get())
|
||||
}
|
||||
|
||||
// GetCredentialsForRef returns the registry credentials necessary for
|
||||
// accessing ref on the registry ref points to,
|
||||
// appropriate for sys and the users’ configuration.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) {
|
||||
return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get())
|
||||
}
|
||||
|
||||
// getCredentialsWithHomeDir is an internal implementation detail of
|
||||
// GetCredentialsForRef and GetCredentials. It exists only to allow testing it
|
||||
// with an artificial home directory.
|
||||
func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) {
|
||||
_, err := validateKey(key)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
if sys != nil && sys.DockerAuthConfig != nil {
|
||||
logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key)
|
||||
return *sys.DockerAuthConfig, nil
|
||||
}
|
||||
|
||||
var registry string // We compute this once because it is used in several places.
|
||||
if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 {
|
||||
registry = key[:firstSlash]
|
||||
} else {
|
||||
registry = key
|
||||
}
|
||||
|
||||
// Anonymous function to query credentials from auth files.
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, "", err
|
||||
}
|
||||
|
||||
if authConfig != (types.DockerAuthConfig{}) {
|
||||
return authConfig, path.path, nil
|
||||
}
|
||||
}
|
||||
return types.DockerAuthConfig{}, "", nil
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var (
|
||||
creds types.DockerAuthConfig
|
||||
helperKey string
|
||||
credHelperPath string
|
||||
err error
|
||||
)
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
helperKey = key
|
||||
creds, credHelperPath, err = getCredentialsFromAuthFiles()
|
||||
// External helpers.
|
||||
default:
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers, but a "registry" is a valid parent of "key".
|
||||
helperKey = registry
|
||||
creds, err = getAuthFromCredHelper(helper, registry)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
if creds != (types.DockerAuthConfig{}) {
|
||||
msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper)
|
||||
if credHelperPath != "" {
|
||||
msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
|
||||
}
|
||||
logrus.Debug(msg)
|
||||
return creds, nil
|
||||
}
|
||||
}
|
||||
if multiErr != nil {
|
||||
return types.DockerAuthConfig{}, multiErr
|
||||
}
|
||||
|
||||
logrus.Debugf("No credentials for %s found", key)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
// GetAuthentication returns the registry credentials matching key, appropriate for
|
||||
// sys and the users’ configuration.
|
||||
// If an entry is not found, an empty struct is returned.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname.
|
||||
//
|
||||
// Deprecated: This API only has support for username and password. To get the
|
||||
// support for oauth2 in container registry authentication, we added the new
|
||||
// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to
|
||||
// maintain backward compatibility.
|
||||
func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) {
|
||||
return getAuthenticationWithHomeDir(sys, key, homedir.Get())
|
||||
}
|
||||
|
||||
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
||||
// it exists only to allow testing it with an artificial home directory.
|
||||
func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) {
|
||||
auth, err := getCredentialsWithHomeDir(sys, key, homeDir)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if auth.IdentityToken != "" {
|
||||
return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it")
|
||||
}
|
||||
return auth.Username, auth.Password, nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication removes credentials for `key` from all possible
|
||||
// sources such as credential helpers and auth files.
|
||||
// A valid key is a repository, a namespace within a registry, or a registry hostname;
|
||||
// using forms other than just a registry may fail depending on configuration.
|
||||
func RemoveAuthentication(sys *types.SystemContext, key string) error {
|
||||
isNamespaced, err := validateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
isLoggedIn := false
|
||||
|
||||
removeFromCredHelper := func(helper string) {
|
||||
if isNamespaced {
|
||||
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
|
||||
return
|
||||
} else {
|
||||
err := deleteAuthFromCredHelper(helper, key)
|
||||
if err == nil {
|
||||
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
|
||||
isLoggedIn = true
|
||||
return
|
||||
}
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
|
||||
return
|
||||
}
|
||||
}
|
||||
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "removing credentials for %s from credential helper %s", key, helper))
|
||||
}
|
||||
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
if innerHelper, exists := auths.CredHelpers[key]; exists {
|
||||
removeFromCredHelper(innerHelper)
|
||||
}
|
||||
if _, ok := auths.AuthConfigs[key]; ok {
|
||||
isLoggedIn = true
|
||||
delete(auths.AuthConfigs, key)
|
||||
}
|
||||
return true, multiErr
|
||||
})
|
||||
if err != nil {
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
}
|
||||
// External helpers.
|
||||
default:
|
||||
removeFromCredHelper(helper)
|
||||
}
|
||||
}
|
||||
|
||||
if multiErr != nil {
|
||||
return multiErr
|
||||
}
|
||||
if !isLoggedIn {
|
||||
return ErrNotLoggedIn
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAllAuthentication deletes all the credentials stored in credential
|
||||
// helpers and auth files.
|
||||
func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var err error
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
_, err = modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
for registry, helper := range auths.CredHelpers {
|
||||
// Helpers in auth files are expected
|
||||
// to exist, so no special treatment
|
||||
// for them.
|
||||
if err := deleteAuthFromCredHelper(helper, registry); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
auths.AuthConfigs = make(map[string]dockerAuthConfig)
|
||||
return true, nil
|
||||
})
|
||||
// External helpers.
|
||||
default:
|
||||
var creds map[string]string
|
||||
creds, err = listAuthsFromCredHelper(helper)
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
for registry := range creds {
|
||||
err = deleteAuthFromCredHelper(helper, registry)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
case exec.ErrNotFound:
|
||||
// It's okay if the helper doesn't exist.
|
||||
continue
|
||||
default:
|
||||
// fall through
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err)
|
||||
multiErr = multierror.Append(multiErr, err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("All credentials removed from credential helper %s", helper)
|
||||
}
|
||||
|
||||
return multiErr
|
||||
}
|
||||
|
||||
func listAuthsFromCredHelper(credHelper string) (map[string]string, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.List(p)
|
||||
}
|
||||
|
||||
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
||||
// returns the path, and a bool specifies whether the file is in legacy format
|
||||
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
||||
return getPathToAuthWithOS(sys, runtime.GOOS)
|
||||
}
|
||||
|
||||
// getPathToAuthWithOS is an internal implementation detail of getPathToAuth,
|
||||
// it exists only to allow testing it with an artificial runtime.GOOS.
|
||||
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) {
|
||||
if sys != nil {
|
||||
if sys.AuthFilePath != "" {
|
||||
return sys.AuthFilePath, false, nil
|
||||
}
|
||||
if sys.LegacyFormatAuthFilePath != "" {
|
||||
return sys.LegacyFormatAuthFilePath, true, nil
|
||||
}
|
||||
if sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
|
||||
}
|
||||
}
|
||||
if goOS == "windows" || goOS == "darwin" {
|
||||
return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
|
||||
}
|
||||
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
if runtimeDir != "" {
|
||||
// This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway.
|
||||
// We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case.
|
||||
_, err := os.Stat(runtimeDir)
|
||||
if os.IsNotExist(err) {
|
||||
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
|
||||
// or made a typo while setting the environment variable,
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
|
||||
return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
|
||||
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
|
||||
return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil
|
||||
}
|
||||
return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil
|
||||
}
|
||||
|
||||
// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
|
||||
// or returns an empty dockerConfigFile data structure if auth.json does not exist
|
||||
// if the file exists and is empty, readJSONFile returns an error
|
||||
func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
||||
var auths dockerConfigFile
|
||||
|
||||
raw, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
}
|
||||
return dockerConfigFile{}, err
|
||||
}
|
||||
|
||||
if legacyFormat {
|
||||
if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil {
|
||||
return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
|
||||
}
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(raw, &auths); err != nil {
|
||||
return dockerConfigFile{}, errors.Wrapf(err, "unmarshaling JSON at %q", path)
|
||||
}
|
||||
|
||||
if auths.AuthConfigs == nil {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
}
|
||||
if auths.CredHelpers == nil {
|
||||
auths.CredHelpers = make(map[string]string)
|
||||
}
|
||||
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
// modifyJSON finds an auth.json file, calls editor on the contents, and
|
||||
// writes it back if editor returns true.
|
||||
// Returns a human-redable description of the file, to be returned by SetCredentials.
|
||||
func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) (string, error) {
|
||||
path, legacyFormat, err := getPathToAuth(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if legacyFormat {
|
||||
return "", fmt.Errorf("writes to %s using legacy format are not supported", path)
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if err = os.MkdirAll(dir, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
auths, err := readJSONFile(path, false)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "reading JSON file %q", path)
|
||||
}
|
||||
|
||||
updated, err := editor(&auths)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "updating %q", path)
|
||||
}
|
||||
if updated {
|
||||
newData, err := json.MarshalIndent(auths, "", "\t")
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "marshaling JSON %q", path)
|
||||
}
|
||||
|
||||
if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
|
||||
return "", errors.Wrapf(err, "writing to file %q", path)
|
||||
}
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds, err := helperclient.Get(p, registry)
|
||||
if err != nil {
|
||||
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
|
||||
logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper)
|
||||
err = nil
|
||||
}
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
switch creds.Username {
|
||||
case "<token>":
|
||||
return types.DockerAuthConfig{
|
||||
IdentityToken: creds.Secret,
|
||||
}, nil
|
||||
default:
|
||||
return types.DockerAuthConfig{
|
||||
Username: creds.Username,
|
||||
Password: creds.Secret,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func setAuthToCredHelper(credHelper, registry, username, password string) error {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
creds := &credentials.Credentials{
|
||||
ServerURL: registry,
|
||||
Username: username,
|
||||
Secret: password,
|
||||
}
|
||||
return helperclient.Store(p, creds)
|
||||
}
|
||||
|
||||
func deleteAuthFromCredHelper(credHelper, registry string) error {
|
||||
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||
p := helperclient.NewShellProgramFunc(helperName)
|
||||
return helperclient.Erase(p, registry)
|
||||
}
|
||||
|
||||
// findCredentialsInFile looks for credentials matching "key"
|
||||
// (which is "registry" or a namespace in "registry") in "path".
|
||||
func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
|
||||
auths, err := readJSONFile(path, legacyFormat)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path)
|
||||
}
|
||||
|
||||
// First try cred helpers. They should always be normalized.
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path)
|
||||
return getAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
// Support sub-registry namespaces in auth.
|
||||
// (This is not a feature of ~/.docker/config.json; we support it even for
|
||||
// those files as an extension.)
|
||||
var keys []string
|
||||
if !legacyFormat {
|
||||
keys = authKeysForKey(key)
|
||||
} else {
|
||||
keys = []string{registry}
|
||||
}
|
||||
|
||||
// Repo or namespace keys are only supported as exact matches. For registry
|
||||
// keys we prefer exact matches as well.
|
||||
for _, key := range keys {
|
||||
if val, exists := auths.AuthConfigs[key]; exists {
|
||||
return decodeDockerAuth(val)
|
||||
}
|
||||
}
|
||||
|
||||
// bad luck; let's normalize the entries first
|
||||
// This primarily happens for legacyFormat, which for a time used API URLs
|
||||
// (http[s:]//…/v1/) as keys.
|
||||
// Secondarily, (docker login) accepted URLs with no normalization for
|
||||
// several years, and matched registry hostnames against that, so support
|
||||
// those entries even in non-legacyFormat ~/.docker/config.json.
|
||||
// The docker.io registry still uses the /v1/ key with a special host name,
|
||||
// so account for that as well.
|
||||
registry = normalizeRegistry(registry)
|
||||
for k, v := range auths.AuthConfigs {
|
||||
if normalizeAuthFileKey(k, legacyFormat) == registry {
|
||||
return decodeDockerAuth(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Only log this if we found nothing; getCredentialsWithHomeDir logs the
|
||||
// source of found data.
|
||||
logrus.Debugf("No credentials matching %s found in %s", key, path)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
// authKeysForKey returns the keys matching a provided auth file key, in order
|
||||
// from the best match to worst. For example,
|
||||
// when given a repository key "quay.io/repo/ns/image", it returns
|
||||
// - quay.io/repo/ns/image
|
||||
// - quay.io/repo/ns
|
||||
// - quay.io/repo
|
||||
// - quay.io
|
||||
func authKeysForKey(key string) (res []string) {
|
||||
for {
|
||||
res = append(res, key)
|
||||
|
||||
lastSlash := strings.LastIndex(key, "/")
|
||||
if lastSlash == -1 {
|
||||
break
|
||||
}
|
||||
key = key[:lastSlash]
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// decodeDockerAuth decodes the username and password, which is
|
||||
// encoded in base64.
|
||||
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
}
|
||||
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
// if it's invalid just skip, as docker does
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
user := parts[0]
|
||||
password := strings.Trim(parts[1], "\x00")
|
||||
return types.DockerAuthConfig{
|
||||
Username: user,
|
||||
Password: password,
|
||||
IdentityToken: conf.IdentityToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// normalizeAuthFileKey takes a key, converts it to a host name and normalizes
|
||||
// the resulting registry.
|
||||
func normalizeAuthFileKey(key string, legacyFormat bool) string {
|
||||
stripped := strings.TrimPrefix(key, "http://")
|
||||
stripped = strings.TrimPrefix(stripped, "https://")
|
||||
|
||||
if legacyFormat || stripped != key {
|
||||
stripped = strings.SplitN(stripped, "/", 2)[0]
|
||||
}
|
||||
|
||||
return normalizeRegistry(stripped)
|
||||
}
|
||||
|
||||
// normalizeRegistry converts the provided registry if a known docker.io host
|
||||
// is provided.
|
||||
func normalizeRegistry(registry string) string {
|
||||
switch registry {
|
||||
case "registry-1.docker.io", "docker.io":
|
||||
return "index.docker.io"
|
||||
}
|
||||
return registry
|
||||
}
|
||||
|
||||
// validateKey verifies that the input key does not have a prefix that is not
|
||||
// allowed and returns an indicator if the key is namespaced.
|
||||
func validateKey(key string) (bool, error) {
|
||||
if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") {
|
||||
return false, errors.Errorf("key %s contains http[s]:// prefix", key)
|
||||
}
|
||||
|
||||
// Ideally this should only accept explicitly valid keys, compare
|
||||
// validateIdentityRemappingPrefix. For now, just reject values that look
|
||||
// like tagged or digested values.
|
||||
if strings.ContainsRune(key, '@') {
|
||||
return false, fmt.Errorf(`key %s contains a '@' character`, key)
|
||||
}
|
||||
|
||||
firstSlash := strings.IndexRune(key, '/')
|
||||
isNamespaced := firstSlash != -1
|
||||
// Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo.
|
||||
if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') {
|
||||
return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key)
|
||||
}
|
||||
// check if the provided key contains one or more subpaths.
|
||||
return isNamespaced, nil
|
||||
}
|
||||
1
vendor/github.com/containers/image/v5/pkg/strslice/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/image/v5/pkg/strslice/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).
|
||||
30
vendor/github.com/containers/image/v5/pkg/strslice/strslice.go
generated
vendored
Normal file
30
vendor/github.com/containers/image/v5/pkg/strslice/strslice.go
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package strslice
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// StrSlice represents a string or an array of strings.
|
||||
// We need to override the json decoder to accept both options.
|
||||
type StrSlice []string
|
||||
|
||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||
// strings. This method is needed to implement json.Unmarshaler.
|
||||
func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
// With no input, we preserve the existing value by returning nil and
|
||||
// leaving the target alone. This allows defining default values for
|
||||
// the type.
|
||||
return nil
|
||||
}
|
||||
|
||||
p := make([]string, 0, 1)
|
||||
if err := json.Unmarshal(b, &p); err != nil {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
p = append(p, s)
|
||||
}
|
||||
|
||||
*e = p
|
||||
return nil
|
||||
}
|
||||
351
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
Normal file
351
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go
generated
vendored
Normal file
|
|
@ -0,0 +1,351 @@
|
|||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/rootless"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// defaultShortNameMode is the default mode of registries.conf files if the
|
||||
// corresponding field is left empty.
|
||||
const defaultShortNameMode = types.ShortNameModePermissive
|
||||
|
||||
// userShortNamesFile is the user-specific config file to store aliases.
|
||||
var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf")
|
||||
|
||||
// shortNameAliasesConfPath returns the path to the machine-generated
|
||||
// short-name-aliases.conf file.
|
||||
func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) {
|
||||
if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 {
|
||||
return ctx.UserShortNameAliasConfPath, nil
|
||||
}
|
||||
|
||||
if rootless.GetRootlessEUID() == 0 {
|
||||
// Root user or in a non-conforming user NS
|
||||
return filepath.Join("/var/cache", userShortNamesFile), nil
|
||||
}
|
||||
|
||||
// Rootless user
|
||||
cacheRoot, err := homedir.GetCacheHome()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(cacheRoot, userShortNamesFile), nil
|
||||
}
|
||||
|
||||
// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the
|
||||
// software-maintained `userShortNamesFile`.
|
||||
type shortNameAliasConf struct {
|
||||
// A map for aliasing short names to their fully-qualified image
|
||||
// reference counter parts.
|
||||
// Note that Aliases is niled after being loaded from a file.
|
||||
Aliases map[string]string `toml:"aliases"`
|
||||
|
||||
// If you add any field, make sure to update nonempty() below.
|
||||
}
|
||||
|
||||
// nonempty returns true if config contains at least one configuration entry.
|
||||
func (c *shortNameAliasConf) nonempty() bool {
|
||||
copy := *c // A shallow copy
|
||||
if copy.Aliases != nil && len(copy.Aliases) == 0 {
|
||||
copy.Aliases = nil
|
||||
}
|
||||
return !reflect.DeepEqual(copy, shortNameAliasConf{})
|
||||
}
|
||||
|
||||
// alias combines the parsed value of an alias with the config file it has been
|
||||
// specified in. The config file is crucial for an improved user experience
|
||||
// such that users are able to resolve potential pull errors.
|
||||
type alias struct {
|
||||
// The parsed value of an alias. May be nil if set to "" in a config.
|
||||
value reference.Named
|
||||
// The config file the alias originates from.
|
||||
configOrigin string
|
||||
}
|
||||
|
||||
// shortNameAliasCache is the result of parsing shortNameAliasConf,
|
||||
// pre-processed for faster usage.
|
||||
type shortNameAliasCache struct {
|
||||
// Note that an alias value may be nil iff it's set as an empty string
|
||||
// in the config.
|
||||
namedAliases map[string]alias
|
||||
}
|
||||
|
||||
// ResolveShortNameAlias performs an alias resolution of the specified name.
|
||||
// The user-specific short-name-aliases.conf has precedence over aliases in the
|
||||
// assembled registries.conf. It returns the possibly resolved alias or nil, a
|
||||
// human-readable description of the config where the alias is specified, and
|
||||
// an error. The origin of the config file is crucial for an improved user
|
||||
// experience such that users are able to resolve potential pull errors.
|
||||
// Almost all callers should use pkg/shortnames instead.
|
||||
//
|
||||
// Note that it’s the caller’s responsibility to pass only a repository
|
||||
// (reference.IsNameOnly) as the short name.
|
||||
func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) {
|
||||
if err := validateShortName(name); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// Acquire the lock as a reader to allow for multiple routines in the
|
||||
// same process space to read simultaneously.
|
||||
lock.RLock()
|
||||
defer lock.Unlock()
|
||||
|
||||
_, aliasCache, err := loadShortNameAliasConf(confPath)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// First look up the short-name-aliases.conf. Note that a value may be
|
||||
// nil iff it's set as an empty string in the config.
|
||||
alias, resolved := aliasCache.namedAliases[name]
|
||||
if resolved {
|
||||
return alias.value, alias.configOrigin, nil
|
||||
}
|
||||
|
||||
config, err := getConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
alias, resolved = config.aliasCache.namedAliases[name]
|
||||
if resolved {
|
||||
return alias.value, alias.configOrigin, nil
|
||||
}
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// editShortNameAlias loads the aliases.conf file and changes it. If value is
|
||||
// set, it adds the name-value pair as a new alias. Otherwise, it will remove
|
||||
// name from the config.
|
||||
func editShortNameAlias(ctx *types.SystemContext, name string, value *string) error {
|
||||
if err := validateShortName(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if value != nil {
|
||||
if _, err := parseShortNameValue(*value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
confPath, lock, err := shortNameAliasesConfPathAndLock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Acquire the lock as a writer to prevent data corruption.
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
// Load the short-name-alias.conf, add the specified name-value pair,
|
||||
// and write it back to the file.
|
||||
conf, _, err := loadShortNameAliasConf(confPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if conf.Aliases == nil { // Ensure we have a map to update.
|
||||
conf.Aliases = make(map[string]string)
|
||||
}
|
||||
if value != nil {
|
||||
conf.Aliases[name] = *value
|
||||
} else {
|
||||
// If the name does not exist, throw an error.
|
||||
if _, exists := conf.Aliases[name]; !exists {
|
||||
return errors.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath)
|
||||
}
|
||||
|
||||
delete(conf.Aliases, name)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
encoder := toml.NewEncoder(f)
|
||||
return encoder.Encode(conf)
|
||||
}
|
||||
|
||||
// AddShortNameAlias adds the specified name-value pair as a new alias to the
|
||||
// user-specific aliases.conf. It may override an existing alias for `name`.
|
||||
//
|
||||
// Note that it’s the caller’s responsibility to pass only a repository
|
||||
// (reference.IsNameOnly) as the short name.
|
||||
func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error {
|
||||
return editShortNameAlias(ctx, name, &value)
|
||||
}
|
||||
|
||||
// RemoveShortNameAlias clears the alias for the specified name. It throws an
|
||||
// error in case name does not exist in the machine-generated
|
||||
// short-name-alias.conf. In such case, the alias must be specified in one of
|
||||
// the registries.conf files, which is the users' responsibility.
|
||||
//
|
||||
// Note that it’s the caller’s responsibility to pass only a repository
|
||||
// (reference.IsNameOnly) as the short name.
|
||||
func RemoveShortNameAlias(ctx *types.SystemContext, name string) error {
|
||||
return editShortNameAlias(ctx, name, nil)
|
||||
}
|
||||
|
||||
// parseShortNameValue parses the specified alias into a reference.Named. The alias is
|
||||
// expected to not be tagged or carry a digest and *must* include a
|
||||
// domain/registry.
|
||||
//
|
||||
// Note that the returned reference is always normalized.
|
||||
func parseShortNameValue(alias string) (reference.Named, error) {
|
||||
ref, err := reference.Parse(alias)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "parsing alias %q", alias)
|
||||
}
|
||||
|
||||
if _, ok := ref.(reference.Digested); ok {
|
||||
return nil, errors.Errorf("invalid alias %q: must not contain digest", alias)
|
||||
}
|
||||
|
||||
if _, ok := ref.(reference.Tagged); ok {
|
||||
return nil, errors.Errorf("invalid alias %q: must not contain tag", alias)
|
||||
}
|
||||
|
||||
named, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
|
||||
}
|
||||
|
||||
registry := reference.Domain(named)
|
||||
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
|
||||
return nil, errors.Errorf("invalid alias %q: must contain registry and repository", alias)
|
||||
}
|
||||
|
||||
// A final parse to make sure that docker.io references are correctly
|
||||
// normalized (e.g., docker.io/alpine to docker.io/library/alpine.
|
||||
named, err = reference.ParseNormalizedNamed(alias)
|
||||
return named, err
|
||||
}
|
||||
|
||||
// validateShortName parses the specified `name` of an alias (i.e., the left-hand
|
||||
// side) and checks if it's a short name and does not include a tag or digest.
|
||||
func validateShortName(name string) error {
|
||||
repo, err := reference.Parse(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot parse short name: %q", name)
|
||||
}
|
||||
|
||||
if _, ok := repo.(reference.Digested); ok {
|
||||
return errors.Errorf("invalid short name %q: must not contain digest", name)
|
||||
}
|
||||
|
||||
if _, ok := repo.(reference.Tagged); ok {
|
||||
return errors.Errorf("invalid short name %q: must not contain tag", name)
|
||||
}
|
||||
|
||||
named, ok := repo.(reference.Named)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid short name %q: no name", name)
|
||||
}
|
||||
|
||||
registry := reference.Domain(named)
|
||||
if strings.ContainsAny(registry, ".:") || registry == "localhost" {
|
||||
return errors.Errorf("invalid short name %q: must not contain registry", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal
|
||||
// representation.
|
||||
func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) {
|
||||
res := shortNameAliasCache{
|
||||
namedAliases: make(map[string]alias),
|
||||
}
|
||||
errs := []error{}
|
||||
for name, value := range conf.Aliases {
|
||||
if err := validateShortName(name); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Empty right-hand side values in config files allow to reset
|
||||
// an alias in a previously loaded config. This way, drop-in
|
||||
// config files from registries.conf.d can reset potentially
|
||||
// malconfigured aliases.
|
||||
if value == "" {
|
||||
res.namedAliases[name] = alias{nil, path}
|
||||
continue
|
||||
}
|
||||
|
||||
named, err := parseShortNameValue(value)
|
||||
if err != nil {
|
||||
// We want to report *all* malformed entries to avoid a
|
||||
// whack-a-mole for the user.
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
res.namedAliases[name] = alias{named, path}
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
err := errs[0]
|
||||
for i := 1; i < len(errs); i++ {
|
||||
err = errors.Wrapf(err, "%v\n", errs[i])
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// updateWithConfigurationFrom updates c with configuration from updates.
|
||||
// In case of conflict, updates is preferred.
|
||||
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
|
||||
for name, value := range updates.namedAliases {
|
||||
c.namedAliases[name] = value
|
||||
}
|
||||
}
|
||||
|
||||
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {
|
||||
conf := shortNameAliasConf{}
|
||||
|
||||
meta, err := toml.DecodeFile(confPath, &conf)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// It's okay if the config doesn't exist. Other errors are not.
|
||||
return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
|
||||
}
|
||||
if keys := meta.Undecoded(); len(keys) > 0 {
|
||||
logrus.Debugf("Failed to decode keys %q from %q", keys, confPath)
|
||||
}
|
||||
|
||||
// Even if we don’t always need the cache, doing so validates the machine-generated config. The
|
||||
// file could still be corrupted by another process or user.
|
||||
cache, err := newShortNameAliasCache(confPath, &conf)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath)
|
||||
}
|
||||
|
||||
return &conf, cache, nil
|
||||
}
|
||||
|
||||
func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, lockfile.Locker, error) {
|
||||
shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
// Make sure the path to file exists.
|
||||
if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
lockPath := shortNameAliasesConfPath + ".lock"
|
||||
locker, err := lockfile.GetLockfile(lockPath)
|
||||
return shortNameAliasesConfPath, locker, err
|
||||
}
|
||||
1050
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
1050
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
110
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
Normal file
110
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
package tlsclientconfig
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
|
||||
func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
|
||||
fs, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range fs {
|
||||
fullPath := filepath.Join(dir, f.Name())
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if tlsc.RootCAs == nil {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
}
|
||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
certName := f.Name()
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
logrus.Debugf(" cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".key") {
|
||||
keyName := f.Name()
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
logrus.Debugf(" key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasFile(files []os.DirEntry, name string) bool {
|
||||
for _, f := range files {
|
||||
if f.Name() == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewTransport Creates a default transport
|
||||
func NewTransport() *http.Transport {
|
||||
direct := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: direct.DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
if _, err := sockets.DialerFromEnvironment(direct); err != nil {
|
||||
logrus.Debugf("Can't execute DialerFromEnvironment: %v", err)
|
||||
}
|
||||
return tr
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue