go.mod: update osbuild/images to v0.74.0

This commit is contained in:
Gianluca Zuccarelli 2024-08-06 15:20:12 +01:00 committed by Sanne Raymaekers
parent 3789ff4ce8
commit c9972f7da8
327 changed files with 8341 additions and 12785 deletions

61
go.mod
View file

@ -1,6 +1,8 @@
module github.com/osbuild/osbuild-composer
go 1.21
go 1.21.0
toolchain go1.21.11
exclude github.com/mattn/go-sqlite3 v2.0.3+incompatible
@ -16,7 +18,7 @@ require (
github.com/Azure/go-autorest/autorest v0.11.29
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13
github.com/BurntSushi/toml v1.4.0
github.com/aws/aws-sdk-go v1.55.2
github.com/aws/aws-sdk-go v1.55.5
github.com/coreos/go-semver v0.3.1
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/deepmap/oapi-codegen v1.8.2
@ -36,7 +38,7 @@ require (
github.com/labstack/gommon v0.4.2
github.com/openshift-online/ocm-sdk-go v0.1.432
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.72.0
github.com/osbuild/images v0.74.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1
github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.19.1
@ -46,19 +48,19 @@ require (
github.com/stretchr/testify v1.9.0
github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453
github.com/vmware/govmomi v0.39.0
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
google.golang.org/api v0.189.0
google.golang.org/api v0.190.0
)
require (
cloud.google.com/go v0.115.0 // indirect
cloud.google.com/go/auth v0.7.2 // indirect
cloud.google.com/go/auth v0.7.3 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect
cloud.google.com/go/compute/metadata v0.5.0 // indirect
cloud.google.com/go/iam v1.1.10 // indirect
cloud.google.com/go/iam v1.1.12 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
@ -72,7 +74,7 @@ require (
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.3 // indirect
github.com/Microsoft/hcsshim v0.12.5 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
@ -83,25 +85,25 @@ require (
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/errdefs v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containers/common v0.59.2 // indirect
github.com/containers/image/v5 v5.31.1 // indirect
github.com/containers/common v0.60.0 // indirect
github.com/containers/image/v5 v5.32.0 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/ocicrypt v1.1.10 // indirect
github.com/containers/storage v1.54.0 // indirect
github.com/containers/ocicrypt v1.2.0 // indirect
github.com/containers/storage v1.55.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/cyphar/filepath-securejoin v0.3.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v26.1.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.1 // indirect
github.com/docker/docker v27.1.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
@ -120,9 +122,9 @@ require (
github.com/golang/glog v1.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-containerregistry v0.19.1 // indirect
github.com/google/go-containerregistry v0.20.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/gorilla/css v1.0.0 // indirect
@ -142,11 +144,11 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
@ -158,8 +160,8 @@ require (
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/mountinfo v0.7.1 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/user v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
@ -174,17 +176,17 @@ require (
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.51.1 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/sigstore/fulcio v1.4.5 // indirect
github.com/sigstore/rekor v1.3.6 // indirect
github.com/sigstore/sigstore v1.8.3 // indirect
github.com/sigstore/sigstore v1.8.4 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
github.com/sylabs/sif/v2 v2.16.0 // indirect
github.com/sylabs/sif/v2 v2.18.0 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
@ -192,7 +194,7 @@ require (
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
github.com/vbauerster/mpb/v8 v8.7.3 // indirect
github.com/vbauerster/mpb/v8 v8.7.4 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
@ -208,12 +210,11 @@ require (
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.23.0 // indirect
google.golang.org/genproto v0.0.0-20240722135656-d784300faade // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240722135656-d784300faade // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect
google.golang.org/grpc v1.64.1 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

127
go.sum
View file

@ -1,18 +1,18 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=
cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=
cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY=
cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA=
cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=
cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=
cloud.google.com/go/compute v1.27.4 h1:XM8ulx6crjdl09XBfji7viFgZOEQuIxBwKmjRH9Rtmc=
cloud.google.com/go/compute v1.27.4/go.mod h1:7JZS+h21ERAGHOy5qb7+EPyXlQwzshzrx1x6L9JhTqU=
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI=
cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps=
cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k=
cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c=
cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw=
cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg=
cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk=
cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4=
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
@ -76,16 +76,16 @@ github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0=
github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ=
github.com/Microsoft/hcsshim v0.12.5 h1:bpTInLlDy/nDRWFVcefDZZ1+U8tS+rz3MxjKgu9boo0=
github.com/Microsoft/hcsshim v0.12.5/go.mod h1:tIUGego4G1EN5Hb6KC90aDYiUI2dqLSTTOCjVNpOgZ8=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.2 h1:/2OFM8uFfK9e+cqHTw9YPrvTzIXT2XkFGXRM7WbJb7E=
github.com/aws/aws-sdk-go v1.55.2/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -107,16 +107,16 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containers/common v0.59.2 h1:FcURZzlMYMVZXqjMEop6C0A3yWilrfmWUPUw09APHvI=
github.com/containers/common v0.59.2/go.mod h1:/PHpbapKSHQU29Jmjn3Ld3jekoHvX0zx7qQxxyPqSTM=
github.com/containers/image/v5 v5.31.1 h1:3x9soI6Biml/GiDLpkSmKrkRSwVGctxu/vONpoUdklA=
github.com/containers/image/v5 v5.31.1/go.mod h1:5QfOqSackPkSbF7Qxc1DnVNnPJKQ+KWLkfEfDpK590Q=
github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w=
github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8=
github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ=
github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic=
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
github.com/containers/storage v1.54.0 h1:xwYAlf6n9OnIlURQLLg3FYHbO74fQ/2W2N6EtQEUM4I=
github.com/containers/storage v1.54.0/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw=
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
github.com/containers/storage v1.55.0 h1:wTWZ3YpcQf1F+dSP4KxG9iqDfpQY1otaUXjPpffuhgg=
github.com/containers/storage v1.55.0/go.mod h1:28cB81IDk+y7ok60Of6u52RbCeBRucbFOeLunhER1RQ=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@ -129,8 +129,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE=
github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@ -142,14 +142,14 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc=
github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
@ -178,8 +178,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@ -254,11 +254,10 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-containerregistry v0.20.0 h1:wRqHpOeVh3DnenOrPy9xDOLdnLatiGuuNRVelR2gSbg=
github.com/google/go-containerregistry v0.20.0/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -267,8 +266,8 @@ github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -370,8 +369,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg=
@ -394,8 +393,8 @@ github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0Sh
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo=
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU=
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I=
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@ -438,10 +437,10 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
github.com/moby/sys/user v0.2.0 h1:OnpapJsRp25vkhw8TFG6OLJODNh/3rEwRWtJ3kakwRM=
github.com/moby/sys/user v0.2.0/go.mod h1:RYstrcWOJpVh+6qzUqp2bU3eaRpdiQeKGlKitaH0PM8=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -453,8 +452,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo/v2 v2.18.0 h1:W9Y7IWXxPUpAit9ieMOLI7PJZGaW22DTKgiVAuhDTLc=
github.com/onsi/ginkgo/v2 v2.18.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@ -469,8 +468,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.432 h1:XIlCJKxXXznMP5Usu9lVGZa+UTYVl
github.com/openshift-online/ocm-sdk-go v0.1.432/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.72.0 h1:WKRXokWCpkS6zusc2yadXbW5jQD4cNvBMHZUV/8L4jM=
github.com/osbuild/images v0.72.0/go.mod h1:McWtOOsi/2gH2J4tQ8Vnvg+O3jVf44XIj3jUWJZt47E=
github.com/osbuild/images v0.74.0 h1:V3biAGZ+9z10DnCJAMXYalIlcVLmWNMiaZXSHwl+N5s=
github.com/osbuild/images v0.74.0/go.mod h1:DTCMxDrZSkrgQdz+Kda+rSvCXk1453lNMupBFIE+3d8=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 h1:UFEJIcPa46W8gtWgOYzriRKYyy1t6SWL0BI7fPTuVvc=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1/go.mod h1:z+WA+dX6qMwc7fqY5jCzESDIlg4WR2sBQezxsoXv9Ik=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=
@ -496,8 +495,8 @@ github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZ
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -524,8 +523,8 @@ github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc
github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8=
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4=
github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs=
github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w=
github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
@ -558,8 +557,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/sylabs/sif/v2 v2.16.0 h1:2eqaBaQQsn5DZTzm3QZm0HupZQEjNXfxRnCmtyCihEU=
github.com/sylabs/sif/v2 v2.16.0/go.mod h1:d5TxgD/mhMUU3kWLmZmWJQ99Wg0asaTP0bq3ezR1xpg=
github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI=
github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
@ -578,8 +577,8 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0=
github.com/vbauerster/mpb/v8 v8.7.3/go.mod h1:9nFlNpDGVoTmQ4QvNjSLtwLmAFjwmq0XaAF26toHGNM=
github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo=
github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI=
github.com/vmware/govmomi v0.39.0 h1:soLZ08Q2zvjRSinNup8xVlw0KDDCJPPA1rIDmBhi7As=
github.com/vmware/govmomi v0.39.0/go.mod h1:oHzAQ1r6152zYDGcUqeK+EO8LhKo5wjtvWZBGHws2Hc=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
@ -648,8 +647,8 @@ golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZP
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -774,19 +773,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=
google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=
google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q=
google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240722135656-d784300faade h1:lKFsS7wpngDgSCeFn7MoLy+wBDQZ1UQIJD4UNM1Qvkg=
google.golang.org/genproto v0.0.0-20240722135656-d784300faade/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY=
google.golang.org/genproto/googleapis/api v0.0.0-20240722135656-d784300faade h1:WxZOF2yayUHpHSbUE6NMzumUzBxYc3YGwo0YHnbzsJY=
google.golang.org/genproto/googleapis/api v0.0.0-20240722135656-d784300faade/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY=
google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk=
google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@ -810,8 +809,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=
gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=

View file

@ -1,5 +1,14 @@
# Changelog
## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01)
### Bug Fixes
* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8))
* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22)

View file

@ -101,6 +101,20 @@ func (t *Token) IsValid() bool {
return t.isValidWithEarlyExpiry(defaultExpiryDelta)
}
// MetadataString is a convenience method for accessing string values in the
// token's metadata. Returns an empty string if the metadata is nil or the value
// for the given key cannot be cast to a string.
func (t *Token) MetadataString(k string) string {
if t.Metadata == nil {
return ""
}
s, ok := t.Metadata[k].(string)
if !ok {
return ""
}
return s
}
func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
if t.isEmpty() {
return false

View file

@ -66,10 +66,10 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool
if tok == nil {
return false
}
if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" {
if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" {
return false
}
if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" {
if tok.MetadataString("auth.google.serviceAccount") != "default" {
return false
}
return true

View file

@ -337,17 +337,19 @@ func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
}
func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
if err != nil {
return nil, err
}
if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
return nil, err
}
token, err := c.creds.Token(ctx)
if err != nil {
return nil, err
}
if token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
if err != nil {
return nil, err
}
if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
return nil, err
}
}
if c.secure {
ri, _ := grpccreds.RequestInfoFromContext(ctx)
if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil {

View file

@ -193,17 +193,19 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
}
}()
}
credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
if err != nil {
return nil, err
}
if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
return nil, err
}
token, err := t.creds.Token(req.Context())
if err != nil {
return nil, err
}
if token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
if err != nil {
return nil, err
}
if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
return nil, err
}
}
req2 := req.Clone(req.Context())
SetAuthHeader(token, req2)
reqBodyClosed = true

View file

@ -1,6 +1,20 @@
# Changes
## [1.1.12](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.11...iam/v1.1.12) (2024-07-24)
### Bug Fixes
* **iam:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10)
### Bug Fixes
* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01)

46
vendor/github.com/Microsoft/hcsshim/hnsaccelnet.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
//go:build windows
package hcsshim
import (
"errors"
"github.com/Microsoft/hcsshim/internal/hns"
)
// HNSNnvManagementMacAddress represents management mac address
// which needs to be excluded from VF reassignment
type HNSNnvManagementMacAddress = hns.HNSNnvManagementMacAddress
// HNSNnvManagementMacList represents a list of management
// mac addresses for exclusion from VF reassignment
type HNSNnvManagementMacList = hns.HNSNnvManagementMacList
var (
ErrorEmptyMacAddressList = errors.New("management mac_address list is empty")
)
// SetNnvManagementMacAddresses sets a list of
// management mac addresses in hns for exclusion from VF reassignment.
func SetNnvManagementMacAddresses(managementMacAddresses []string) (*HNSNnvManagementMacList, error) {
if len(managementMacAddresses) == 0 {
return nil, ErrorEmptyMacAddressList
}
nnvManagementMacList := &HNSNnvManagementMacList{}
for _, mac := range managementMacAddresses {
nnvManagementMacList.MacAddressList = append(nnvManagementMacList.MacAddressList, HNSNnvManagementMacAddress{MacAddress: mac})
}
return nnvManagementMacList.Set()
}
// GetNnvManagementMacAddresses retrieves a list of
// management mac addresses in hns for exclusion from VF reassignment.
func GetNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) {
return hns.GetNnvManagementMacAddressList()
}
// DeleteNnvManagementMacAddresses delete list of
// management mac addresses in hns which are excluded from VF reassignment.
func DeleteNnvManagementMacAddresses() (*HNSNnvManagementMacList, error) {
return hns.DeleteNnvManagementMacAddressList()
}

View file

@ -0,0 +1,60 @@
//go:build windows
package hns
import (
"encoding/json"
"github.com/sirupsen/logrus"
)
// HNSNnvManagementMacAddress represents management mac address
// which needs to be excluded from VF reassignment
type HNSNnvManagementMacAddress struct {
MacAddress string `json:",omitempty"`
}
// HNSNnvManagementMacList represents a list of management
// mac addresses for exclusion from VF reassignment
type HNSNnvManagementMacList struct {
MacAddressList []HNSNnvManagementMacAddress `json:",omitempty"`
}
// HNSNnvManagementMacRequest makes a HNS call to modify/query NnvManagementMacList
func HNSNnvManagementMacRequest(method, path, request string) (*HNSNnvManagementMacList, error) {
nnvManagementMacList := &HNSNnvManagementMacList{}
err := hnsCall(method, "/accelnet/"+path, request, &nnvManagementMacList)
if err != nil {
return nil, err
}
return nnvManagementMacList, nil
}
// Set ManagementMacAddressList by sending "POST" NnvManagementMacRequest to HNS.
func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMacList, error) {
operation := "Set"
title := "hcsshim::nnvManagementMacList::" + operation
logrus.Debugf(title+" id=%s", nnvManagementMacList.MacAddressList)
jsonString, err := json.Marshal(nnvManagementMacList)
if err != nil {
return nil, err
}
return HNSNnvManagementMacRequest("POST", "", string(jsonString))
}
// Get ManagementMacAddressList by sending "GET" NnvManagementMacRequest to HNS.
func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
operation := "Get"
title := "hcsshim::nnvManagementMacList::" + operation
logrus.Debugf(title)
return HNSNnvManagementMacRequest("GET", "", "")
}
// Delete ManagementMacAddressList by sending "DELETE" NnvManagementMacRequest to HNS.
func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
operation := "Delete"
title := "hcsshim::nnvManagementMacList::" + operation
logrus.Debugf(title)
return HNSNnvManagementMacRequest("DELETE", "", "")
}

View file

@ -10,6 +10,28 @@ import (
"github.com/sirupsen/logrus"
)
// EndpointState represents the states of an HNS Endpoint lifecycle.
type EndpointState uint16
// EndpointState const
// The lifecycle of an Endpoint goes through created, attached, AttachedSharing - endpoint is being shared with other containers,
// detached, after being attached, degraded and finally destroyed.
// Note: This attribute is used by calico to define stale containers and is dependent on HNS v1 api, if we move to HNS v2 api we will need
// to update the current calico code and cordinate the change with calico. Reach out to Microsoft to facilate the change via HNS.
const (
Uninitialized EndpointState = iota
Created EndpointState = 1
Attached EndpointState = 2
AttachedSharing EndpointState = 3
Detached EndpointState = 4
Degraded EndpointState = 5
Destroyed EndpointState = 6
)
func (es EndpointState) String() string {
return [...]string{"Uninitialized", "Attached", "AttachedSharing", "Detached", "Degraded", "Destroyed"}[es]
}
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:"ID,omitempty"`
@ -34,6 +56,7 @@ type HNSEndpoint struct {
Namespace *Namespace `json:",omitempty"`
EncapOverhead uint16 `json:",omitempty"`
SharedContainers []string `json:",omitempty"`
State EndpointState `json:",omitempty"`
}
// SystemType represents the type of the system on which actions are done

View file

@ -57,9 +57,10 @@ type PaPolicy struct {
type OutboundNatPolicy struct {
Policy
VIP string `json:"VIP,omitempty"`
Exceptions []string `json:"ExceptionList,omitempty"`
Destinations []string `json:",omitempty"`
VIP string `json:"VIP,omitempty"`
Exceptions []string `json:"ExceptionList,omitempty"`
Destinations []string `json:",omitempty"`
MaxPortPoolUsage uint16 `json:",omitempty"`
}
type ProxyPolicy struct {

View file

@ -9503,6 +9503,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -32566,6 +32572,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.55.2"
const SDKVersion = "1.55.5"

View file

@ -124700,9 +124700,38 @@ type FleetLaunchTemplateOverrides struct {
// The Availability Zone in which to launch the instances.
AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
// The ID of the AMI. An AMI is required to launch an instance. This parameter
// is only available for fleets of type instant. For fleets of type maintain
// and request, you must specify the AMI ID in the launch template.
// The ID of the AMI in the format ami-17characters00000.
//
// Alternatively, you can specify a Systems Manager parameter, using one of
// the following formats. The Systems Manager parameter will resolve to an AMI
// ID on launch.
//
// To reference a public parameter:
//
// * resolve:ssm:public-parameter
//
// To reference a parameter stored in the same account:
//
// * resolve:ssm:parameter-name
//
// * resolve:ssm:parameter-name:version-number
//
// * resolve:ssm:parameter-name:label
//
// To reference a parameter shared from another Amazon Web Services account:
//
// * resolve:ssm:parameter-ARN
//
// * resolve:ssm:parameter-ARN:version-number
//
// * resolve:ssm:parameter-ARN:label
//
// For more information, see Use a Systems Manager parameter instead of an AMI
// ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id)
// in the Amazon EC2 User Guide.
//
// This parameter is only available for fleets of type instant. For fleets of
// type maintain and request, you must specify the AMI ID in the launch template.
ImageId *string `locationName:"imageId" type:"string"`
// The attributes for the instance types. When you specify instance attributes,
@ -124845,9 +124874,38 @@ type FleetLaunchTemplateOverridesRequest struct {
// The Availability Zone in which to launch the instances.
AvailabilityZone *string `type:"string"`
// The ID of the AMI. An AMI is required to launch an instance. This parameter
// is only available for fleets of type instant. For fleets of type maintain
// and request, you must specify the AMI ID in the launch template.
// The ID of the AMI in the format ami-17characters00000.
//
// Alternatively, you can specify a Systems Manager parameter, using one of
// the following formats. The Systems Manager parameter will resolve to an AMI
// ID on launch.
//
// To reference a public parameter:
//
// * resolve:ssm:public-parameter
//
// To reference a parameter stored in the same account:
//
// * resolve:ssm:parameter-name
//
// * resolve:ssm:parameter-name:version-number
//
// * resolve:ssm:parameter-name:label
//
// To reference a parameter shared from another Amazon Web Services account:
//
// * resolve:ssm:parameter-ARN
//
// * resolve:ssm:parameter-ARN:version-number
//
// * resolve:ssm:parameter-ARN:label
//
// For more information, see Use a Systems Manager parameter instead of an AMI
// ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id)
// in the Amazon EC2 User Guide.
//
// This parameter is only available for fleets of type instant. For fleets of
// type maintain and request, you must specify the AMI ID in the launch template.
ImageId *string `type:"string"`
// The attributes for the instance types. When you specify instance attributes,
@ -168935,12 +168993,17 @@ type RequestLaunchTemplateData struct {
// The name or Amazon Resource Name (ARN) of an IAM instance profile.
IamInstanceProfile *LaunchTemplateIamInstanceProfileSpecificationRequest `type:"structure"`
// The ID of the AMI. Alternatively, you can specify a Systems Manager parameter,
// which will resolve to an AMI ID on launch.
// The ID of the AMI in the format ami-17characters00000.
//
// Valid formats:
// Alternatively, you can specify a Systems Manager parameter, using one of
// the following formats. The Systems Manager parameter will resolve to an AMI
// ID on launch.
//
// * ami-17characters00000
// To reference a public parameter:
//
// * resolve:ssm:public-parameter
//
// To reference a parameter stored in the same account:
//
// * resolve:ssm:parameter-name
//
@ -168948,15 +169011,26 @@ type RequestLaunchTemplateData struct {
//
// * resolve:ssm:parameter-name:label
//
// * resolve:ssm:public-parameter
// To reference a parameter shared from another Amazon Web Services account:
//
// Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager
// parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet,
// you must specify the AMI ID.
// * resolve:ssm:parameter-ARN
//
// * resolve:ssm:parameter-ARN:version-number
//
// * resolve:ssm:parameter-ARN:label
//
// For more information, see Use a Systems Manager parameter instead of an AMI
// ID (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id)
// in the Amazon EC2 User Guide.
//
// If the launch template will be used for an EC2 Fleet or Spot Fleet, note
// the following:
//
// * Only EC2 Fleets of type instant support specifying a Systems Manager
// parameter.
//
// * For EC2 Fleets of type maintain or request, or for Spot Fleets, you
// must specify the AMI ID.
ImageId *string `type:"string"`
// Indicates whether an instance stops or terminates when you initiate shutdown
@ -201114,9 +201188,6 @@ const (
// ResourceTypeVpcBlockPublicAccessExclusion is a ResourceType enum value
ResourceTypeVpcBlockPublicAccessExclusion = "vpc-block-public-access-exclusion"
// ResourceTypeVpcEncryptionControl is a ResourceType enum value
ResourceTypeVpcEncryptionControl = "vpc-encryption-control"
// ResourceTypeIpamResourceDiscovery is a ResourceType enum value
ResourceTypeIpamResourceDiscovery = "ipam-resource-discovery"
@ -201216,7 +201287,6 @@ func ResourceType_Values() []string {
ResourceTypeVerifiedAccessTrustProvider,
ResourceTypeVpnConnectionDeviceType,
ResourceTypeVpcBlockPublicAccessExclusion,
ResourceTypeVpcEncryptionControl,
ResourceTypeIpamResourceDiscovery,
ResourceTypeIpamResourceDiscoveryAssociation,
ResourceTypeInstanceConnectEndpoint,

View file

@ -73,7 +73,7 @@ type bpCompressionStepData struct {
operation bpcOperation // What we are actually doing
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
closers []io.Closer // Objects to close after the upload is done, if any.
@ -323,7 +323,11 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
if d.srcCompressorName == "" || d.uploadedCompressorName == "" {
return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)",
d.srcCompressorName, d.uploadedCompressorName)
}
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms.
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
@ -337,7 +341,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
}
}
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
d.srcCompressorName != internalblobinfocache.UnknownCompression {
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms, see above.
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)

View file

@ -409,7 +409,6 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
srcInfos := ic.src.LayerInfos()
numLayers := len(srcInfos)
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil {
return nil, err
@ -440,7 +439,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
data := make([]copyLayerData, numLayers)
data := make([]copyLayerData, len(srcInfos))
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
@ -463,9 +462,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.c.options.OciEncryptLayers != nil {
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.c.options.OciEncryptLayers {
switch {
@ -478,7 +475,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
}
}
if encryptAll {
if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layers”
for i := 0; i < len(srcInfos); i++ {
layersToEncrypt.Add(i)
}
@ -493,8 +490,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
defer copyGroup.Wait()
for i, srcLayer := range srcInfos {
err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
if err != nil {
if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying layer: %w", err)
}
@ -509,8 +505,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
}
compressionAlgos := set.New[string]()
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
destInfos := make([]types.BlobInfo, len(srcInfos))
diffIDs := make([]digest.Digest, len(srcInfos))
for i, cld := range data {
if cld.err != nil {
return nil, cld.err

View file

@ -86,11 +86,9 @@ type extensionSignatureList struct {
Signatures []extensionSignature `json:"signatures"`
}
// bearerToken records a cached token we can use to authenticate.
type bearerToken struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
token string
expirationTime time.Time
}
@ -147,25 +145,6 @@ const (
noAuth
)
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
token := new(bearerToken)
if err := json.Unmarshal(blob, &token); err != nil {
return nil, err
}
if token.Token == "" {
token.Token = token.AccessToken
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
return token, nil
}
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
if sys != nil && sys.DockerCertPath != "" {
@ -774,7 +753,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
token = *t
c.tokenCache.Store(cacheKey, token)
}
registryToken = token.Token
registryToken = token.token
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
return nil
@ -827,12 +806,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
return nil, err
}
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
if err != nil {
return nil, err
}
return newBearerTokenFromJSONBlob(tokenBlob)
return newBearerTokenFromHTTPResponseBody(res)
}
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
@ -878,12 +852,50 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
return nil, err
}
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
return newBearerTokenFromHTTPResponseBody(res)
}
// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken.
// The caller is still responsible for ensuring res.Body is closed.
func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) {
blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
if err != nil {
return nil, err
}
return newBearerTokenFromJSONBlob(tokenBlob)
var token struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
expirationTime time.Time
}
if err := json.Unmarshal(blob, &token); err != nil {
const bodySampleLength = 50
bodySample := blob
if len(bodySample) > bodySampleLength {
bodySample = bodySample[:bodySampleLength]
}
return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err)
}
bt := &bearerToken{
token: token.Token,
}
if bt.token == "" {
bt.token = token.AccessToken
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
return bt, nil
}
// detectPropertiesHelper performs the work of detectProperties which executes

View file

@ -361,8 +361,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
}
if !candidate.UnknownLocation {
if candidate.CompressionAlgorithm != nil {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
} else {

View file

@ -1,7 +1,9 @@
package docker
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
@ -11,6 +13,7 @@ import (
"net/http"
"net/url"
"os"
"os/exec"
"strings"
"sync"
@ -162,6 +165,34 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
client.Close()
return nil, err
}
if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" {
acf := map[string]struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
IdentityToken string `json:"identityToken,omitempty"`
}{
physicalRef.ref.String(): {
Username: client.auth.Username,
Password: client.auth.Password,
IdentityToken: client.auth.IdentityToken,
},
}
acfD, err := json.Marshal(acf)
if err != nil {
logrus.Warnf("failed to marshal auth config: %v", err)
} else {
cmd := exec.Command(h)
cmd.Stdin = bytes.NewReader(acfD)
if err := cmd.Run(); err != nil {
var stderr string
if ee, ok := err.(*exec.ExitError); ok {
stderr = string(ee.Stderr)
}
logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err)
}
}
}
return s, nil
}

View file

@ -1,6 +1,7 @@
package manifest
import (
"bytes"
"encoding/json"
"fmt"
"maps"
@ -296,29 +297,51 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platformCopy := ociPlatformClone(*component.Platform)
platform = &platformCopy
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
ArtifactType: component.ArtifactType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
Annotations: maps.Clone(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
index.Manifests[i] = oci1DescriptorClone(component)
}
return &index
}
func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor {
var platform *imgspecv1.Platform
if d.Platform != nil {
platformCopy := ociPlatformClone(*d.Platform)
platform = &platformCopy
}
return imgspecv1.Descriptor{
MediaType: d.MediaType,
Digest: d.Digest,
Size: d.Size,
URLs: slices.Clone(d.URLs),
Annotations: maps.Clone(d.Annotations),
Data: bytes.Clone(d.Data),
Platform: platform,
ArtifactType: d.ArtifactType,
}
}
// OCI1IndexPublicClone creates a deep copy of the passed-in index.
// This is publicly visible as c/image/manifest.OCI1IndexClone.
func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
var subject *imgspecv1.Descriptor
if index.Subject != nil {
s := oci1DescriptorClone(*index.Subject)
subject = &s
}
manifests := make([]imgspecv1.Descriptor, len(index.Manifests))
for i, m := range index.Manifests {
manifests[i] = oci1DescriptorClone(m)
}
return &OCI1IndexPublic{
Index: imgspecv1.Index{
Versioned: index.Versioned,
MediaType: index.MediaType,
ArtifactType: index.ArtifactType,
Manifests: manifests,
Subject: subject,
Annotations: maps.Clone(index.Annotations),
},
}
}
// ToOCI1Index returns the index encoded as an OCI1 index.

View file

@ -74,3 +74,15 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
logrus.Debugf("Using SQLite blob info cache at %s", path)
return cache
}
// CleanupDefaultCache removes the blob info cache directory.
// It deletes the cache directory but it does not affect any file or memory buffer currently
// in use.
func CleanupDefaultCache(sys *types.SystemContext) error {
dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID())
if err != nil {
// Mirror the DefaultCache behavior that does not fail in this case
return nil
}
return os.RemoveAll(dir)
}

View file

@ -27,7 +27,7 @@ type cache struct {
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
}
// New returns a BlobInfoCache implementation which is in-memory only.

View file

@ -248,6 +248,11 @@ type V2RegistriesConf struct {
// potentially use all unqualified-search registries
ShortNameMode string `toml:"short-name-mode"`
// AdditionalLayerStoreAuthHelper is a helper binary that receives
// registry credentials pass them to Additional Layer Store for
// registry authentication. These credentials are only collected when pulling (not pushing).
AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"`
shortNameAliasConf
// If you add any field, make sure to update Nonempty() below.
@ -825,6 +830,16 @@ func CredentialHelpers(sys *types.SystemContext) ([]string, error) {
return config.partialV2.CredentialHelpers, nil
}
// AdditionalLayerStoreAuthHelper returns the helper for passing registry
// credentials to Additional Layer Store.
func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) {
config, err := getConfig(sys)
if err != nil {
return "", err
}
return config.partialV2.AdditionalLayerStoreAuthHelper, nil
}
// refMatchingSubdomainPrefix returns the length of ref
// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
@ -1051,6 +1066,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
c.shortNameMode = updates.shortNameMode
}
// == Merge AdditionalLayerStoreAuthHelper:
if updates.partialV2.AdditionalLayerStoreAuthHelper != "" {
c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper
}
// == Merge aliasCache:
// We dont maintain (in fact we actively clear) c.partialV2.shortNameAliasConf.
c.aliasCache.updateWithConfigurationFrom(updates.aliasCache)

View file

@ -15,6 +15,7 @@ import (
"github.com/containers/image/v5/signature/internal"
"github.com/containers/storage/pkg/homedir"
// This is a fallback code; the primary recommendation is to use the gpgme mechanism
// implementation, which is out-of-process and more appropriate for handling long-term private key material
// than any Go implementation.
@ -150,7 +151,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
}
if md.SignedBy == nil {
return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature))
return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId))
}
if md.Signature != nil {
if md.Signature.SigLifetimeSecs != nil {

View file

@ -325,7 +325,13 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
if out.UncompressedDigest != "" {
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
// responsible for ensuring blobDigest has been validated.
if out.CompressedDigest != blobDigest {
return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q",
out.CompressedDigest, blobDigest)
}
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
// We trust ApplyDiffWithDiffer to validate or create both values correctly.
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
} else {
// Dont identify layers by TOC if UncompressedDigest is available.
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers

View file

@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 31
VersionMinor = 32
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View file

@ -7,7 +7,7 @@ linters:
- goimports
- revive
- ineffassign
- vet
- govet
- unused
- misspell

View file

@ -96,9 +96,8 @@ func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) {
return v, ok
} else if v, ok := lbco.Private.CipherOptions[key]; ok {
return v, ok
} else {
return nil, false
}
return nil, false
}
func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer {

View file

@ -79,9 +79,8 @@ func GuessGPGVersion() GPGVersion {
return GPGv2
} else if err := exec.Command("gpg", "--version").Run(); err == nil {
return GPGv1
} else {
return GPGVersionUndetermined
}
return GPGVersionUndetermined
}
// NewGPGClient creates a new GPGClient object representing the given version

View file

@ -24,7 +24,7 @@ import (
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
"github.com/go-jose/go-jose/v3"
"github.com/go-jose/go-jose/v4"
)
type jweKeyWrapper struct {
@ -65,7 +65,11 @@ func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]
}
func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) {
jwe, err := jose.ParseEncrypted(string(jweString))
// cf. list of algorithms in func addPubKeys() below
keyEncryptionAlgorithms := []jose.KeyAlgorithm{jose.RSA_OAEP, jose.RSA_OAEP_256, jose.ECDH_ES_A128KW, jose.ECDH_ES_A192KW, jose.ECDH_ES_A256KW}
// accept all algorithms defined in RFC 7518, section 5.1
contentEncryption := []jose.ContentEncryption{jose.A128CBC_HS256, jose.A192CBC_HS384, jose.A256CBC_HS512, jose.A128GCM, jose.A192GCM, jose.A256GCM}
jwe, err := jose.ParseEncrypted(string(jweString), keyEncryptionAlgorithms, contentEncryption)
if err != nil {
return nil, errors.New("jose.ParseEncrypted failed")
}

View file

@ -124,9 +124,8 @@ func (kw *keyProviderKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []b
}
return protocolOuput.KeyWrapResults.Annotation, nil
} else {
return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
return nil, nil
@ -162,9 +161,8 @@ func (kw *keyProviderKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jsonString
}
return protocolOuput.KeyUnwrapResults.OptsData, nil
} else {
return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
return nil, errors.New("Unsupported keyprovider invocation. Supported invocation methods are grpc and cmd")
}
func getProviderGRPCOutput(input []byte, connString string, operation KeyProviderKeyWrapProtocolOperation) (*KeyProviderKeyWrapProtocolOutput, error) {

View file

@ -26,7 +26,7 @@ import (
"strings"
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/go-jose/go-jose/v3"
"github.com/go-jose/go-jose/v4"
"golang.org/x/crypto/openpgp"
)

View file

@ -23,7 +23,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20240513t140131z-f40f39d13"
IMAGE_SUFFIX: "c20240529t141726z-f40f39d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"

View file

@ -1,11 +1,7 @@
---
run:
concurrency: 6
deadline: 5m
skip-dirs-use-default: true
timeout: 5m
linters:
enable:
- gofumpt
disable:
- errcheck
- staticcheck

View file

@ -53,6 +53,8 @@ local-cross cross: ## cross build the binaries for arm, darwin, and freebsd
os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) ./... ; \
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) ./... || exit 1 ; \
echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \
env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \
done

View file

@ -1 +1 @@
1.54.0
1.55.0

View file

@ -304,7 +304,14 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
archiveErr = err
}
// consume any trailer after the EOF marker
io.Copy(io.Discard, diffReader)
if _, err := io.Copy(io.Discard, diffReader); err != nil {
err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", layerID, err)
if isReadWrite {
report.Layers[layerID] = append(report.Layers[layerID], err)
} else {
report.ROLayers[layerID] = append(report.ROLayers[layerID], err)
}
}
wg.Done()
}(id, reader)
wg.Wait()
@ -366,7 +373,7 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
if options.LayerMountable {
func() {
// Mount the layer.
mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel})
mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel, Options: []string{"ro"}})
if err != nil {
err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
if isReadWrite {
@ -955,6 +962,9 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6
mtime: mtime,
}
}
case tar.TypeXGlobalHeader:
// ignore, since even though it looks like a valid pathname, it doesn't end
// up on the filesystem
default:
// treat these as TypeReg items
delete(c.directory, components[0])
@ -966,9 +976,6 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6
mode: mode,
mtime: mtime,
}
case tar.TypeXGlobalHeader:
// ignore, since even though it looks like a valid pathname, it doesn't end
// up on the filesystem
}
return
}

View file

@ -30,7 +30,6 @@ import (
"io"
"io/fs"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
@ -75,8 +74,6 @@ func init() {
type Driver struct {
sync.Mutex
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
pathCacheLock sync.Mutex
pathCache map[string]string
@ -129,22 +126,16 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
a := &Driver{
root: home,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
pathCache: make(map[string]string),
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
locker: locker.New(),
mountOptions: mountOptions,
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the root aufs driver dir and return
// if it already exists
// If not populate the dir structure
if err := idtools.MkdirAllAs(home, 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(home, 0o700); err != nil {
if os.IsExist(err) {
return a, nil
}
@ -157,7 +148,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
// Populate the dir structure
for _, p := range paths {
if err := idtools.MkdirAllAs(path.Join(home, p), 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(path.Join(home, p), 0o700); err != nil {
return nil, err
}
}
@ -191,13 +182,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
// Return a nil error if the kernel supports aufs
// We cannot modprobe because inside dind modprobe fails
// to run
func supportsAufs() error {
// We can try to modprobe aufs first before looking at
// proc/filesystems for when aufs is supported
exec.Command("modprobe", "aufs").Run()
if unshare.IsRootless() {
return ErrAufsNested
}
@ -334,7 +319,7 @@ func (a *Driver) createDirsFor(id, parent string) error {
// The path of directories are <aufs_root_path>/mnt/<image_id>
// and <aufs_root_path>/diff/<image_id>
for _, p := range paths {
rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair()
rootPair := idtools.IDPair{UID: 0, GID: 0}
rootPerms := defaultPerms
if parent != "" {
st, err := system.Stat(path.Join(a.rootPath(), p, parent))
@ -355,7 +340,9 @@ func (a *Driver) createDirsFor(id, parent string) error {
// Remove will unmount and remove the given id.
func (a *Driver) Remove(id string) error {
a.locker.Lock(id)
defer a.locker.Unlock(id)
defer func() {
_ = a.locker.Unlock(id)
}()
a.pathCacheLock.Lock()
mountpoint, exists := a.pathCache[id]
a.pathCacheLock.Unlock()
@ -446,7 +433,10 @@ func atomicRemove(source string) error {
// This will mount the dir at its given path
func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
a.locker.Lock(id)
defer a.locker.Unlock(id)
defer func() {
_ = a.locker.Unlock(id)
}()
parents, err := a.getParentLayerPaths(id)
if err != nil && !os.IsNotExist(err) {
return "", err
@ -483,7 +473,10 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
// Put unmounts and updates list of active mounts.
func (a *Driver) Put(id string) error {
a.locker.Lock(id)
defer a.locker.Unlock(id)
defer func() {
_ = a.locker.Unlock(id)
}()
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
@ -506,7 +499,9 @@ func (a *Driver) Put(id string) error {
// For AUFS, it queries the mountpoint for this ID.
func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
a.locker.Lock(id)
defer a.locker.Unlock(id)
defer func() {
_ = a.locker.Unlock(id)
}()
a.pathCacheLock.Lock()
m, exists := a.pathCache[id]
if !exists {
@ -689,7 +684,9 @@ func (a *Driver) Cleanup() error {
func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
defer func() {
if err != nil {
Unmount(target)
if err1 := Unmount(target); err1 != nil {
logrus.Warnf("Unmount %q: %v", target, err1)
}
}
}()

View file

@ -66,11 +66,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, fmt.Errorf("%q is not on a btrfs filesystem: %w", home, graphdriver.ErrPrerequisites)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
if err := idtools.MkdirAllAs(filepath.Join(home, "subvolumes"), 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(filepath.Join(home, "subvolumes"), 0o700); err != nil {
return nil, err
}
@ -85,8 +81,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
driver := &Driver{
home: home,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
options: opt,
}
@ -129,8 +123,6 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
type Driver struct {
// root of the file system
home string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
options btrfsOptions
quotaEnabled bool
once sync.Once
@ -481,11 +473,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
quotas := d.quotasDir()
subvolumes := d.subvolumesDir()
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return err
}
if err := idtools.MkdirAllAs(subvolumes, 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(subvolumes, 0o700); err != nil {
return err
}
if parent == "" {
@ -523,7 +511,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
return err
}
if err := idtools.MkdirAllAs(quotas, 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(quotas, 0o700); err != nil {
return err
}
if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0o644); err != nil {
@ -531,14 +519,6 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
}
}
// if we have a remapped root (user namespaces enabled), change the created snapshot
// dir ownership to match
if rootUID != 0 || rootGID != 0 {
if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil {
return err
}
}
mountLabel := ""
if opts != nil {
mountLabel = opts.MountLabel

View file

@ -4,11 +4,12 @@ import (
"bytes"
"errors"
"fmt"
"io/fs"
"os"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/selinux/pkg/pwalk"
"github.com/opencontainers/selinux/pkg/pwalkdir"
)
const (
@ -54,13 +55,14 @@ func chownByMapsMain() {
chowner := newLChowner()
chown := func(path string, info os.FileInfo, _ error) error {
if path == "." {
var chown fs.WalkDirFunc = func(path string, d fs.DirEntry, _ error) error {
info, err := d.Info()
if path == "." || err != nil {
return nil
}
return chowner.LChown(path, info, toHost, toContainer)
}
if err := pwalk.Walk(".", chown); err != nil {
if err := pwalkdir.Walk(".", chown); err != nil {
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
os.Exit(1)
}

View file

@ -1,5 +1,5 @@
//go:build linux || darwin || freebsd || solaris
// +build linux darwin freebsd solaris
//go:build !windows
// +build !windows
package graphdriver

View file

@ -50,13 +50,13 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
defer srcFile.Close()
if *copyWithFileClone {
_, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
if err == nil {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
if errno == 0 {
return nil
}
*copyWithFileClone = false
if err == unix.EXDEV {
if errno == unix.EXDEV {
*copyWithFileRange = false
}
}

View file

@ -193,6 +193,7 @@ type DriverWithDifferOutput struct {
UIDs []uint32
GIDs []uint32
UncompressedDigest digest.Digest
CompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
TarSplit []byte
@ -215,20 +216,25 @@ const (
DifferOutputFormatFlat
)
// DifferFsVerity is a part of the experimental Differ interface and should not be used from outside of c/storage.
// It configures the fsverity requirement.
type DifferFsVerity int
const (
// DifferFsVerityDisabled means no fs-verity is used
DifferFsVerityDisabled = iota
// DifferFsVerityEnabled means fs-verity is used when supported
DifferFsVerityEnabled
// DifferFsVerityIfAvailable means fs-verity is used when supported by
// the underlying kernel and filesystem.
DifferFsVerityIfAvailable
// DifferFsVerityRequired means fs-verity is required
// DifferFsVerityRequired means fs-verity is required. Note this is not
// currently set or exposed by the overlay driver.
DifferFsVerityRequired
)
// DifferOptions overrides how the differ work
// DifferOptions is a part of the experimental Differ interface and should not be used from outside of c/storage.
// It overrides how the differ works.
type DifferOptions struct {
// Format defines the destination directory layout format
Format DifferOutputFormat
@ -377,8 +383,6 @@ type Options struct {
ImageStore string
DriverPriority []string
DriverOptions []string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
ExperimentalEnabled bool
}

View file

@ -263,7 +263,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
return false, fmt.Errorf("create mapped mount: %w", err)
}
defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
defer func() {
if err := unix.Unmount(lowerMappedDir, unix.MNT_DETACH); err != nil {
logrus.Warnf("Unmount %q: %v", lowerMappedDir, err)
}
}()
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
flags := uintptr(0)

View file

@ -8,6 +8,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
@ -56,7 +57,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
if err != nil {
return fmt.Errorf("failed to open output file %q: %w", destFile, err)
return &fs.PathError{Op: "openat", Path: destFile, Err: err}
}
outFd := os.NewFile(uintptr(fd), "outFd")
@ -117,7 +118,7 @@ func hasACL(path string) (bool, error) {
fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return false, err
return false, &fs.PathError{Op: "openat", Path: path, Err: err}
}
defer unix.Close(fd)
// do not worry about checking the magic number, if the file is invalid
@ -125,7 +126,7 @@ func hasACL(path string) (bool, error) {
flags := make([]byte, 4)
nread, err := unix.Pread(fd, flags, 8)
if err != nil {
return false, err
return false, fmt.Errorf("pread %q: %w", path, err)
}
if nread != 4 {
return false, fmt.Errorf("failed to read flags from %q", path)
@ -150,5 +151,8 @@ func mountComposefsBlob(dataDir, mountPoint string) error {
mountOpts += ",noacl"
}
return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts)
if err := unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts); err != nil {
return fmt.Errorf("failed to mount erofs image at %q: %w", mountPoint, err)
}
return nil
}

View file

@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path"
@ -119,8 +120,6 @@ type Driver struct {
home string
runhome string
imageStore string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
quotaCtl *quota.Control
options overlayOptions
@ -332,13 +331,9 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
backingFs = fsName
runhome := filepath.Join(options.RunRoot, filepath.Base(home))
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0o755, 0, 0); err != nil {
if err := os.MkdirAll(path.Join(home, linkDir), 0o755); err != nil {
return nil, err
}
@ -348,7 +343,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
}
if err := idtools.MkdirAllAs(runhome, 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(runhome, 0o700); err != nil {
return nil, err
}
@ -373,9 +368,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
return nil, err
}
} else {
if opts.forceMask != nil {
return nil, errors.New("'force_mask' is supported only with 'mount_program'")
}
// check if they are running over btrfs, aufs, overlay, or ecryptfs
switch fsMagic {
case graphdriver.FsMagicAufs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
@ -457,8 +449,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
home: home,
imageStore: options.ImageStore,
runhome: runhome,
uidMaps: options.UIDMaps,
gidMaps: options.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
supportsDType: supportsDType,
usingMetacopy: usingMetacopy,
@ -698,12 +688,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
}
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
// We can try to modprobe overlay first
selinuxLabelTest := selinux.PrivContainerMountLabel()
exec.Command("modprobe", "overlay").Run()
logLevel := logrus.ErrorLevel
if unshare.IsRootless() {
logLevel = logrus.DebugLevel
@ -831,7 +817,9 @@ func (d *Driver) useNaiveDiff() bool {
logrus.Info(nativeDiffCacheText)
useNaiveDiffOnly = true
}
cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
if err := cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText); err != nil {
logrus.Warnf("Recording overlay native-diff support status: %v", err)
}
})
return useNaiveDiffOnly
}
@ -860,14 +848,14 @@ func (d *Driver) Status() [][2]string {
// Metadata returns meta data about the overlay driver such as
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
dir := d.dir(id)
dir, _, inAdditionalStore := d.dir2(id, false)
if err := fileutils.Exists(dir); err != nil {
return nil, err
}
metadata := map[string]string{
"WorkDir": path.Join(dir, "work"),
"MergedDir": path.Join(dir, "merged"),
"MergedDir": d.getMergedDir(id, dir, inAdditionalStore),
"UpperDir": path.Join(dir, "diff"),
}
@ -983,6 +971,10 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
}
}
if d.options.forceMask != nil && d.options.mountProgram == "" {
return fmt.Errorf("overlay: force_mask option for writeable layers is only supported with a mount_program")
}
if _, ok := opts.StorageOpt["size"]; !ok {
if opts.StorageOpt == nil {
opts.StorageOpt = map[string]string{}
@ -1021,8 +1013,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
disableQuota := readOnly
uidMaps := d.uidMaps
gidMaps := d.gidMaps
var uidMaps []idtools.IDMap
var gidMaps []idtools.IDMap
if opts != nil && opts.IDMappings != nil {
uidMaps = opts.IDMappings.UIDs()
@ -1047,14 +1039,23 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
return err
}
st := idtools.Stat{IDs: idPair, Mode: defaultPerms}
if parent != "" {
parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
return err
parentDiff := filepath.Join(parentBase, "diff")
if xSt, err := idtools.GetContainersOverrideXattr(parentDiff); err == nil {
st = xSt
} else {
systemSt, err := system.Stat(parentDiff)
if err != nil {
return err
}
st.IDs.UID = int(systemSt.UID())
st.IDs.GID = int(systemSt.GID())
st.Mode = os.FileMode(systemSt.Mode())
}
rootUID = int(st.UID())
rootGID = int(st.GID())
}
if err := fileutils.Lexists(dir); err == nil {
@ -1100,22 +1101,21 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
}
}
perms := defaultPerms
forcedSt := st
if d.options.forceMask != nil {
perms = *d.options.forceMask
forcedSt.IDs = idPair
forcedSt.Mode = *d.options.forceMask
}
if parent != "" {
parentBase := d.dir(parent)
st, err := system.Stat(filepath.Join(parentBase, "diff"))
if err != nil {
diff := path.Join(dir, "diff")
if err := idtools.MkdirAs(diff, forcedSt.Mode, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
}
if d.options.forceMask != nil {
if err := idtools.SetContainersOverrideXattr(diff, st); err != nil {
return err
}
perms = os.FileMode(st.Mode())
}
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
return err
}
lid := generateID(idLength)
@ -1130,16 +1130,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl
return err
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
}
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID); err != nil {
return err
}
// if no parent directory, create a dummy lower directory and skip writing a "lowers" file
if parent == "" {
return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, rootUID, rootGID)
return idtools.MkdirAs(path.Join(dir, "empty"), 0o700, forcedSt.IDs.UID, forcedSt.IDs.GID)
}
lower, err := d.getLower(parent)
@ -1283,12 +1283,6 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
}
func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string {
if uidMaps == nil {
uidMaps = d.uidMaps
}
if gidMaps == nil {
gidMaps = d.gidMaps
}
if uidMaps != nil {
var uids, gids bytes.Buffer
if len(uidMaps) == 1 && uidMaps[0].Size == 1 {
@ -1539,11 +1533,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
diffN++
st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil && !permsKnown {
perms = os.FileMode(st.Mode())
permsKnown = true
}
err = fileutils.Exists(filepath.Join(dir, nameWithSuffix("diff", diffN)))
}
idmappedMountProcessPid := -1
@ -1561,7 +1551,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
composefsMounts := []string{}
defer func() {
for _, m := range composefsMounts {
defer unix.Unmount(m, unix.MNT_DETACH)
defer func(m string) {
if err := unix.Unmount(m, unix.MNT_DETACH); err != nil {
logrus.Warnf("Unmount %q: %v", m, err)
}
}(m)
}
}()
@ -1665,7 +1659,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
skipIDMappingLayers[composefsMount] = composefsMount
// overlay takes a reference on the mount, so it is safe to unmount
// the mapped idmounts as soon as the final overlay file system is mounted.
defer unix.Unmount(composefsMount, unix.MNT_DETACH)
defer func() {
if err := unix.Unmount(composefsMount, unix.MNT_DETACH); err != nil {
logrus.Warnf("Unmount %q: %v", composefsMount, err)
}
}()
}
absLowers = append(absLowers, composefsMount)
continue
@ -1705,10 +1703,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
mergedDir := path.Join(dir, "merged")
mergedDir := d.getMergedDir(id, dir, inAdditionalStore)
// Attempt to create the merged dir only if it doesn't exist.
if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) {
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
if err := idtools.MkdirAllAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return "", err
}
}
@ -1772,7 +1770,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// overlay takes a reference on the mount, so it is safe to unmount
// the mapped idmounts as soon as the final overlay file system is mounted.
defer unix.Unmount(root, unix.MNT_DETACH)
defer func() {
if err := unix.Unmount(root, unix.MNT_DETACH); err != nil {
logrus.Warnf("Unmount %q: %v", root, err)
}
}()
}
// relative path to the layer through the id mapped mount
@ -1854,7 +1856,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
mountFunc = func(source string, target string, mType string, flags uintptr, label string) error {
return mountOverlayFrom(d.home, source, target, mType, flags, label)
}
mountTarget = path.Join(id, "merged")
if !inAdditionalStore {
mountTarget = path.Join(id, "merged")
}
}
// overlay has a check in place to prevent mounting the same file system twice
@ -1873,13 +1877,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
return mergedDir, nil
}
// getMergedDir returns the directory path that should be used as the mount point for the overlayfs.
func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string {
// If the layer is in an additional store, the lock we might hold only a reading lock. To prevent
// races with other processes, use a private directory under the main store rundir. At this point, the
// current process is holding an exclusive lock on the store, and since the rundir cannot be shared for
// different stores, it is safe to assume the current process has exclusive access to it.
if inAdditionalStore {
return path.Join(d.runhome, id, "merged")
}
return path.Join(dir, "merged")
}
// Put unmounts the mount path created for the give id.
func (d *Driver) Put(id string) error {
dir, _, inAdditionalStore := d.dir2(id, false)
if err := fileutils.Exists(dir); err != nil {
return err
}
mountpoint := path.Join(dir, "merged")
mountpoint := d.getMergedDir(id, dir, inAdditionalStore)
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
@ -1936,7 +1953,15 @@ func (d *Driver) Put(id string) error {
}
}
if !inAdditionalStore {
if inAdditionalStore {
// check the base name for extra safety
if strings.HasPrefix(mountpoint, d.runhome) && filepath.Base(mountpoint) == "merged" {
err := os.RemoveAll(filepath.Dir(mountpoint))
if err != nil {
logrus.Warningf("Failed to remove mountpoint %s overlay: %s: %v", id, mountpoint, err)
}
}
} else {
uid, gid := int(0), int(0)
fi, err := os.Stat(mountpoint)
if err != nil {
@ -1953,7 +1978,7 @@ func (d *Driver) Put(id string) error {
// rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
// its atomic semantic. In this way the "merged" directory is never removed.
if err := unix.Rename(tmpMountpoint, mountpoint); err != nil {
logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err)
logrus.Debugf("Failed to replace mountpoint %s overlay: %s: %v", id, mountpoint, err)
return fmt.Errorf("replacing mount point %q: %w", mountpoint, err)
}
}
@ -2024,11 +2049,27 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat {
}
type overlayFileGetter struct {
diffDirs []string
diffDirs []string
composefsMounts map[string]*os.File // map from diff dir to the directory with the composefs blob mounted
}
func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
buf := make([]byte, unix.PathMax)
for _, d := range g.diffDirs {
if f, found := g.composefsMounts[d]; found {
// there is no *at equivalent for getxattr, but it can be emulated by opening the file under /proc/self/fd/$FD/$PATH
len, err := unix.Getxattr(fmt.Sprintf("/proc/self/fd/%d/%s", int(f.Fd()), path), "trusted.overlay.redirect", buf)
if err != nil {
if errors.Is(err, unix.ENODATA) {
continue
}
return nil, &fs.PathError{Op: "getxattr", Path: path, Err: err}
}
// the xattr value is the path to the file in the composefs layer diff directory
return os.Open(filepath.Join(d, string(buf[:len])))
}
f, err := os.Open(filepath.Join(d, path))
if err == nil {
return f, nil
@ -2041,7 +2082,16 @@ func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) {
}
func (g *overlayFileGetter) Close() error {
return nil
var errs *multierror.Error
for _, f := range g.composefsMounts {
if err := f.Close(); err != nil {
errs = multierror.Append(errs, err)
}
if err := unix.Rmdir(f.Name()); err != nil {
errs = multierror.Append(errs, err)
}
}
return errs.ErrorOrNil()
}
func (d *Driver) getStagingDir(id string) string {
@ -2052,10 +2102,7 @@ func (d *Driver) getStagingDir(id string) string {
// DiffGetter returns a FileGetCloser that can read files from the directory that
// contains files for the layer differences, either for this layer, or one of our
// lowers if we're just a template directory. Used for direct access for tar-split.
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
if d.usingComposefs {
return nil, nil
}
func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error) {
p, err := d.getDiffPath(id)
if err != nil {
return nil, err
@ -2064,7 +2111,43 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
if err != nil {
return nil, err
}
return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil
// map from diff dir to the directory with the composefs blob mounted
composefsMounts := make(map[string]*os.File)
defer func() {
if Err != nil {
for _, f := range composefsMounts {
f.Close()
if err := unix.Rmdir(f.Name()); err != nil && !os.IsNotExist(err) {
logrus.Warnf("Failed to remove %s: %v", f.Name(), err)
}
}
}
}()
diffDirs := append([]string{p}, paths...)
for _, diffDir := range diffDirs {
// diffDir has the form $GRAPH_ROOT/overlay/$ID/diff, so grab the $ID from the parent directory
id := path.Base(path.Dir(diffDir))
composefsBlob := d.getComposefsData(id)
if fileutils.Exists(composefsBlob) != nil {
// not a composefs layer, ignore it
continue
}
dir, err := os.MkdirTemp(d.runhome, "composefs-mnt")
if err != nil {
return nil, err
}
if err := mountComposefsBlob(composefsBlob, dir); err != nil {
return nil, err
}
fd, err := os.Open(dir)
if err != nil {
return nil, err
}
composefsMounts[diffDir] = fd
_ = unix.Unmount(dir, unix.MNT_DETACH)
}
return &overlayFileGetter{diffDirs: diffDirs, composefsMounts: composefsMounts}, nil
}
// CleanupStagingDirectory cleanups the staging directory.
@ -2100,9 +2183,16 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
var idMappings *idtools.IDMappings
var forceMask *os.FileMode
if options != nil {
idMappings = options.Mappings
forceMask = options.ForceMask
}
if d.options.forceMask != nil {
forceMask = d.options.forceMask
}
if idMappings == nil {
idMappings = &idtools.IDMappings{}
}
@ -2120,8 +2210,8 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
return graphdriver.DriverWithDifferOutput{}, err
}
perms := defaultPerms
if d.options.forceMask != nil {
perms = *d.options.forceMask
if forceMask != nil {
perms = *forceMask
}
applyDir = filepath.Join(layerDir, "dir")
if err := os.Mkdir(applyDir, perms); err != nil {
@ -2155,7 +2245,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
}
if d.usingComposefs {
differOptions.Format = graphdriver.DifferOutputFormatFlat
differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled
differOptions.UseFsVerity = graphdriver.DifferFsVerityIfAvailable
}
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
@ -2163,6 +2253,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
IgnoreChownErrors: d.options.ignoreChownErrors,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: unshare.IsRootless(),
ForceMask: forceMask,
}, &differOptions)
out.Target = applyDir
@ -2342,14 +2433,18 @@ func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent strin
// layers.
diffPath, err := d.getDiffPath(id)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get diff path: %w", err)
}
layers, err := d.getLowerDiffPaths(id)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get lower diff path: %w", err)
}
return archive.OverlayChanges(layers, diffPath)
c, err := archive.OverlayChanges(layers, diffPath)
if err != nil {
return nil, fmt.Errorf("computing changes: %w", err)
}
return c, nil
}
// AdditionalImageStores returns additional image stores supported by the driver
@ -2476,6 +2571,19 @@ func nameWithSuffix(name string, number int) string {
return fmt.Sprintf("%s%d", name, number)
}
func validateOneAdditionalLayerPath(target string) error {
for _, p := range []string{
filepath.Join(target, "diff"),
filepath.Join(target, "info"),
filepath.Join(target, "blob"),
} {
if err := fileutils.Exists(p); err != nil {
return err
}
}
return nil
}
func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) {
refElem := base64.StdEncoding.EncodeToString([]byte(ref))
for _, ls := range d.options.layerStores {
@ -2484,18 +2592,11 @@ func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (st
ref = refElem
}
target := path.Join(ls.path, ref, tocDigest.String())
// Check if all necessary files exist
for _, p := range []string{
filepath.Join(target, "diff"),
filepath.Join(target, "info"),
filepath.Join(target, "blob"),
} {
if err := fileutils.Exists(p); err != nil {
wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err)
return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown)
}
err := validateOneAdditionalLayerPath(target)
if err == nil {
return target, nil
}
return target, nil
logrus.Debugf("additional Layer Store %v failed to stat additional layer: %v", ls, err)
}
return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown)

View file

@ -1,5 +1,5 @@
//go:build linux && cgo
// +build linux,cgo
//go:build linux && cgo && !exclude_disk_quota
// +build linux,cgo,!exclude_disk_quota
package overlay

View file

@ -0,0 +1,18 @@
//go:build linux && (!cgo || exclude_disk_quota)
// +build linux
// +build !cgo exclude_disk_quota
package overlay
import (
"path"
"github.com/containers/storage/pkg/directory"
)
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For Overlay, it attempts to check the XFS quota for size, and falls back to
// finding the size of the "diff" directory.
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(path.Join(d.dir(id), "diff"))
}

View file

@ -5,18 +5,8 @@ package overlay
import (
"fmt"
"path"
"github.com/containers/storage/pkg/directory"
)
// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID.
// For Overlay, it attempts to check the XFS quota for size, and falls back to
// finding the size of the "diff" directory.
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
return directory.Usage(path.Join(d.dir(id), "diff"))
}
func getComposeFsHelper() (string, error) {
return "", fmt.Errorf("composefs not supported on this build")
}

View file

@ -19,16 +19,6 @@ package quota
#include <linux/quota.h>
#include <linux/dqblk_xfs.h>
#ifndef FS_XFLAG_PROJINHERIT
struct fsxattr {
__u32 fsx_xflags;
__u32 fsx_extsize;
__u32 fsx_nextents;
__u32 fsx_projid;
unsigned char fsx_pad[12];
};
#define FS_XFLAG_PROJINHERIT 0x00000200
#endif
#ifndef FS_IOC_FSGETXATTR
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
#endif
@ -357,7 +347,6 @@ func setProjectID(targetPath string, projectID uint32) error {
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {

View file

@ -33,12 +33,10 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
d := &Driver{
name: "vfs",
home: home,
idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
imageStore: options.ImageStore,
}
rootIDs := d.idMappings.RootPair()
if err := idtools.MkdirAllAndChown(filepath.Join(home, "dir"), 0o700, rootIDs); err != nil {
if err := os.MkdirAll(filepath.Join(home, "dir"), 0o700); err != nil {
return nil, err
}
for _, option := range options.DriverOptions {
@ -79,7 +77,6 @@ type Driver struct {
name string
home string
additionalHomes []string
idMappings *idtools.IDMappings
ignoreChownErrors bool
naiveDiff graphdriver.DiffDriver
updater graphdriver.LayerIDMapUpdater
@ -152,14 +149,21 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
return fmt.Errorf("--storage-opt is not supported for vfs")
}
idMappings := d.idMappings
var uidMaps []idtools.IDMap
var gidMaps []idtools.IDMap
if opts != nil && opts.IDMappings != nil {
idMappings = opts.IDMappings
uidMaps = opts.IDMappings.UIDs()
gidMaps = opts.IDMappings.GIDs()
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
}
dir := d.dir2(id, ro)
rootIDs := idMappings.RootPair()
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
if err := os.MkdirAll(filepath.Dir(dir), 0o700); err != nil {
return err
}
@ -174,21 +178,24 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
rootPerms = os.FileMode(0o700)
}
idPair := idtools.IDPair{UID: rootUID, GID: rootGID}
if parent != "" {
st, err := system.Stat(d.dir(parent))
if err != nil {
return err
}
rootPerms = os.FileMode(st.Mode())
rootIDs.UID = int(st.UID())
rootIDs.GID = int(st.GID())
idPair.UID = int(st.UID())
idPair.GID = int(st.GID())
}
if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil {
if err := idtools.MkdirAllAndChownNew(dir, rootPerms, idPair); err != nil {
return err
}
labelOpts := []string{"level:s0"}
if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
label.SetFileLabel(dir, mountLabel)
if err := label.SetFileLabel(dir, mountLabel); err != nil {
logrus.Debugf("Set %s label to %q file ended with error: %v", mountLabel, dir, err)
}
}
if parent != "" {
parentDir, err := d.Get(parent, graphdriver.MountOpts{})

View file

@ -106,11 +106,7 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
return nil, fmt.Errorf("zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
}
rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps)
if err != nil {
return nil, fmt.Errorf("failed to get root uid/gid: %w", err)
}
if err := idtools.MkdirAllAs(base, 0o700, rootUID, rootGID); err != nil {
if err := os.MkdirAll(base, 0o700); err != nil {
return nil, fmt.Errorf("failed to create '%s': %w", base, err)
}
@ -118,8 +114,6 @@ func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) {
dataset: rootDataset,
options: options,
filesystemsCache: filesystemsCache,
uidMaps: opt.UIDMaps,
gidMaps: opt.GIDMaps,
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
}
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
@ -177,8 +171,6 @@ type Driver struct {
options zfsOptions
sync.Mutex // protects filesystem cache against concurrent access
filesystemsCache map[string]bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
ctr *graphdriver.RefCounter
}
@ -248,7 +240,9 @@ func (d *Driver) cloneFilesystem(name, parentName string) error {
}
if err != nil {
snapshot.Destroy(zfs.DestroyDeferDeletion)
if err1 := snapshot.Destroy(zfs.DestroyDeferDeletion); err1 != nil {
logrus.Warnf("Destroy zfs.DestroyDeferDeletion: %v", err1)
}
return err
}
return snapshot.Destroy(zfs.DestroyDeferDeletion)
@ -448,12 +442,8 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
opts := label.FormatMountLabel(mountOptions, options.MountLabel)
logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
if err != nil {
return "", err
}
// Create the target directories if they don't exist
if err := idtools.MkdirAllAs(mountpoint, 0o755, rootUID, rootGID); err != nil {
if err := os.MkdirAll(mountpoint, 0o755); err != nil {
return "", err
}

View file

@ -2529,7 +2529,9 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
layer.GIDs = diffOutput.GIDs
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
layer.UncompressedDigest = diffOutput.UncompressedDigest
updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID)
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, diffOutput.CompressedDigest, layer.ID)
layer.CompressedDigest = diffOutput.CompressedDigest
updateDigestMap(&r.bytocsum, layer.TOCDigest, diffOutput.TOCDigest, layer.ID)
layer.TOCDigest = diffOutput.TOCDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata

View file

@ -5,7 +5,7 @@ import (
)
// Deprecated: Use lockfile.*LockFile.
type Locker = lockfile.Locker //lint:ignore SA1019 // lockfile.Locker is deprecated
type Locker = lockfile.Locker //nolint:staticcheck // SA1019 lockfile.Locker is deprecated
// Deprecated: Use lockfile.GetLockFile.
func GetLockfile(path string) (lockfile.Locker, error) {

View file

@ -70,6 +70,8 @@ type (
}
)
const PaxSchilyXattr = "SCHILY.xattr."
const (
tarExt = "tar"
solaris = "solaris"
@ -169,10 +171,17 @@ func DetectCompression(source []byte) Compression {
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
func DecompressStream(archive io.Reader) (_ io.ReadCloser, Err error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
defer func() {
if Err != nil {
p.Put(buf)
}
}()
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
@ -189,6 +198,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, buf)
return readBufWrapper, nil
case Gzip:
cleanup := func() {
p.Put(buf)
}
if rc, canUse := tryProcFilter([]string{"pigz", "-d"}, buf, cleanup); canUse {
return rc, nil
}
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, err
@ -207,6 +222,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
return readBufWrapper, nil
case Zstd:
cleanup := func() {
p.Put(buf)
}
if rc, canUse := tryProcFilter([]string{"zstd", "-d"}, buf, cleanup); canUse {
return rc, nil
}
return zstdReader(buf)
default:
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
@ -214,9 +235,16 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
func CompressStream(dest io.Writer, compression Compression) (_ io.WriteCloser, Err error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
defer func() {
if Err != nil {
p.Put(buf)
}
}()
switch compression {
case Uncompressed:
writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
@ -391,11 +419,11 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
return hdr, nil
}
// ReadSecurityXattrToTarHeader reads security.capability, security,image
// readSecurityXattrToTarHeader reads security.capability, security,image
// xattrs from filesystem to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
func readSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
if hdr.PAXRecords == nil {
hdr.PAXRecords = make(map[string]string)
}
for _, xattr := range []string{"security.capability", "security.ima"} {
capability, err := system.Lgetxattr(path, xattr)
@ -403,14 +431,14 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
return fmt.Errorf("failed to read %q attribute from %q: %w", xattr, path, err)
}
if capability != nil {
hdr.Xattrs[xattr] = string(capability)
hdr.PAXRecords[PaxSchilyXattr+xattr] = string(capability)
}
}
return nil
}
// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
// readUserXattrToTarHeader reads user.* xattr from filesystem to a tar header
func readUserXattrToTarHeader(path string, hdr *tar.Header) error {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform {
return err
@ -425,10 +453,10 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
}
return err
}
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
if hdr.PAXRecords == nil {
hdr.PAXRecords = make(map[string]string)
}
hdr.Xattrs[key] = string(value)
hdr.PAXRecords[PaxSchilyXattr+key] = string(value)
}
}
return nil
@ -516,10 +544,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
if err != nil {
return err
}
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
if err := readSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
if err := ReadUserXattrToTarHeader(path, hdr); err != nil {
if err := readUserXattrToTarHeader(path, hdr); err != nil {
return err
}
if err := ReadFileFlagsToTarHeader(path, hdr); err != nil {
@ -642,7 +670,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
case tar.TypeReg, tar.TypeRegA:
case tar.TypeReg:
// Source is regular file. We use system.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
// On Linux, this equates to a regular os.OpenFile
@ -701,8 +729,11 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") {
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&0o7777)
if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
value := idtools.Stat{
IDs: idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid},
Mode: hdrInfo.Mode() & 0o7777,
}
if err := idtools.SetContainersOverrideXattr(path, value); err != nil {
return err
}
}
@ -753,11 +784,15 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
var errs []string
for key, value := range hdr.Xattrs {
if _, found := xattrsToIgnore[key]; found {
for key, value := range hdr.PAXRecords {
xattrKey, ok := strings.CutPrefix(key, PaxSchilyXattr)
if !ok {
continue
}
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
if _, found := xattrsToIgnore[xattrKey]; found {
continue
}
if err := system.Lsetxattr(path, xattrKey, []byte(value), 0); err != nil {
if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
@ -1113,9 +1148,14 @@ loop:
}
}
if options.ForceMask != nil && rootHdr != nil {
value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode)
if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
if options.ForceMask != nil {
value := idtools.Stat{Mode: 0o755}
if rootHdr != nil {
value.IDs.UID = rootHdr.Uid
value.IDs.GID = rootHdr.Gid
value.Mode = os.FileMode(rootHdr.Mode)
}
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
return err
}
}
@ -1337,7 +1377,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
}
} else if runtime.GOOS == darwin {
uid, gid = hdr.Uid, hdr.Gid
if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok {
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[0], 10, 32)

View file

@ -1,5 +1,5 @@
//go:build freebsd || darwin
// +build freebsd darwin
//go:build netbsd || freebsd || darwin
// +build netbsd freebsd darwin
package archive

View file

@ -48,8 +48,8 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, getOverlayOpaqueXattrName())
if hdr.PAXRecords != nil {
delete(hdr.PAXRecords, PaxSchilyXattr+getOverlayOpaqueXattrName())
}
// If there are no lower layers, then it can't have been deleted in this layer.
if len(o.rolayers) == 0 {

View file

@ -316,7 +316,11 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno)
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
dc := func(root, path string, fi os.FileInfo) (string, error) {
return overlayDeletedFile(layers, root, path, fi)
r, err := overlayDeletedFile(layers, root, path, fi)
if err != nil {
return "", fmt.Errorf("overlay deleted file query: %w", err)
}
return r, nil
}
return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout)
}
@ -351,7 +355,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
// If the directory isn't marked as opaque, then it's just a normal directory.
opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName())
if err != nil {
return "", err
return "", fmt.Errorf("failed querying overlay opaque xattr: %w", err)
}
if len(opaque) != 1 || opaque[0] != 'y' {
return "", err

View file

@ -31,9 +31,9 @@ func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.Sta
ownerChanged ||
oldStat.Rdev() != newStat.Rdev() ||
oldStat.Flags() != newStat.Flags() ||
!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
((oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR) && (oldStat.Size() != newStat.Size())) {
return true
}
return false

View file

@ -0,0 +1,73 @@
package archive
import (
"bytes"
"fmt"
"io"
"os/exec"
"strings"
"sync"
)
var filterPath sync.Map
func getFilterPath(name string) string {
path, ok := filterPath.Load(name)
if ok {
return path.(string)
}
path, err := exec.LookPath(name)
if err != nil {
path = ""
}
filterPath.Store(name, path)
return path.(string)
}
type errorRecordingReader struct {
r io.Reader
err error
}
func (r *errorRecordingReader) Read(p []byte) (int, error) {
n, err := r.r.Read(p)
if r.err == nil && err != io.EOF {
r.err = err
}
return n, err
}
// tryProcFilter tries to run the command specified in args, passing input to its stdin and returning its stdout.
// cleanup() is a caller provided function that will be called when the command finishes running, regardless of
// whether it succeeds or fails.
// If the command is not found, it returns (nil, false) and the cleanup function is not called.
func tryProcFilter(args []string, input io.Reader, cleanup func()) (io.ReadCloser, bool) {
path := getFilterPath(args[0])
if path == "" {
return nil, false
}
var stderrBuf bytes.Buffer
inputWithError := &errorRecordingReader{r: input}
r, w := io.Pipe()
cmd := exec.Command(path, args[1:]...)
cmd.Stdin = inputWithError
cmd.Stdout = w
cmd.Stderr = &stderrBuf
go func() {
err := cmd.Run()
// if there is an error reading from input, prefer to return that error
if inputWithError.err != nil {
err = inputWithError.err
} else if err != nil && stderrBuf.Len() > 0 {
err = fmt.Errorf("%s: %w", strings.TrimRight(stderrBuf.String(), "\n"), err)
}
w.CloseWithError(err) // CloseWithErr(nil) == Close()
cleanup()
}()
return r, true
}

View file

@ -10,9 +10,11 @@ func invokeUnpack(decompressedArchive io.Reader,
dest string,
options *archive.TarOptions, root string,
) error {
_ = root // Restricting the operation to this root is not implemented on macOS
return archive.Unpack(decompressedArchive, dest, options)
}
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
_ = root // Restricting the operation to this root is not implemented on macOS
return archive.TarWithOptions(srcPath, options)
}

View file

@ -107,12 +107,15 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
w.Close()
if err := cmd.Wait(); err != nil {
errorOut := fmt.Errorf("unpacking failed (error: %w; output: %s)", err, output)
// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
io.Copy(io.Discard, decompressedArchive)
if _, err := io.Copy(io.Discard, decompressedArchive); err != nil {
return fmt.Errorf("%w\nexhausting input failed (error: %w)", errorOut, err)
}
return fmt.Errorf("processing tar file(%s): %w", output, err)
return errorOut
}
return nil
}

View file

@ -19,10 +19,13 @@ import (
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
caps, err := capability.NewPid(0)
caps, err := capability.NewPid2(0)
if err != nil {
return err
}
if err := caps.Load(); err != nil {
return err
}
// initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
// environment not in the chroot from untrusted files.

View file

@ -40,11 +40,13 @@ func applyLayer() {
}
// We need to be able to set any perms
oldmask, err := system.Umask(0)
defer system.Umask(oldmask)
oldMask, err := system.Umask(0)
if err != nil {
fatal(err)
}
defer func() {
_, _ = system.Umask(oldMask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above.
}()
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
fatal(err)

View file

@ -2,10 +2,15 @@ package chunked
import (
"encoding/binary"
"fmt"
"hash/crc32"
"io"
"github.com/docker/go-units"
)
const bloomFilterMaxLength = 100 * units.MB // max size for bloom filter
type bloomFilter struct {
bitArray []uint64
k uint32
@ -79,6 +84,10 @@ func readBloomFilter(reader io.Reader) (*bloomFilter, error) {
if err := binary.Read(reader, binary.LittleEndian, &k); err != nil {
return nil, err
}
// sanity check
if bloomFilterLen > bloomFilterMaxLength {
return nil, fmt.Errorf("bloom filter length %d exceeds max length %d", bloomFilterLen, bloomFilterMaxLength)
}
bloomFilterArray := make([]uint64, bloomFilterLen)
if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil {
return nil, err

View file

@ -18,6 +18,7 @@ import (
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
"github.com/docker/go-units"
jsoniter "github.com/json-iterator/go"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
@ -34,6 +35,8 @@ const (
// https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
bloomFilterScale = 10 // how much bigger is the bloom filter than the number of entries
bloomFilterHashes = 3 // number of hash functions for the bloom filter
maxTagsLen = 100 * units.MB // max size for tags len
)
type cacheFile struct {
@ -77,7 +80,9 @@ var (
func (c *layer) release() {
runtime.SetFinalizer(c, nil)
if c.mmapBuffer != nil {
unix.Munmap(c.mmapBuffer)
if err := unix.Munmap(c.mmapBuffer); err != nil {
logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
}
}
}
@ -189,7 +194,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
}
defer func() {
if errRet != nil && mmapBuffer != nil {
unix.Munmap(mmapBuffer)
if err := unix.Munmap(mmapBuffer); err != nil {
logrus.Warnf("Error Munmap: layer %q: %v", layerID, err)
}
}
}()
cacheFile, err := readCacheFileFromMemory(buffer)
@ -280,6 +287,13 @@ func (c *layersCache) load() error {
newLayers = append(newLayers, l)
continue
}
if r.ReadOnly {
// if the layer is coming from a read-only store, do not attempt
// to write to it.
continue
}
// the cache file is either not present or broken. Try to generate it from the TOC.
l, err = c.createCacheFileFromTOC(r.ID)
if err != nil {
@ -635,6 +649,14 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil {
return nil, err
}
if tagsLen > maxTagsLen {
return nil, fmt.Errorf("tags len %d exceeds the maximum allowed size %d", tagsLen, maxTagsLen)
}
if digestLen > tagLen {
return nil, fmt.Errorf("digest len %d exceeds the tag len %d", digestLen, tagLen)
}
tags := make([]byte, tagsLen)
if _, err := bigData.Read(tags); err != nil {
return nil, err
@ -643,6 +665,10 @@ func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
// retrieve the unread part of the buffer.
remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():]
if vdataLen >= uint64(len(remaining)) {
return nil, fmt.Errorf("vdata len %d exceeds the remaining buffer size %d", vdataLen, len(remaining))
}
vdata := remaining[:vdataLen]
fnames := remaining[vdataLen:]
@ -901,7 +927,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
s := iter.ReadString()
d, err := digest.Parse(s)
if err != nil {
return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err)
return nil, fmt.Errorf("invalid tarSplitDigest %q: %w", s, err)
}
toc.TarSplitDigest = d

View file

@ -5,13 +5,16 @@ import (
"errors"
"fmt"
"io"
"maps"
"strconv"
"time"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
expMaps "golang.org/x/exp/maps"
)
var typesToTar = map[string]byte{
@ -209,20 +212,162 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
}
decodedTarSplit := []byte{}
if tarSplitChunk.Offset > 0 {
if toc.TarSplitDigest != "" {
if tarSplitChunk.Offset <= 0 {
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
}
tarSplit, err := readBlob(tarSplitChunk.Length)
if err != nil {
return nil, nil, nil, 0, err
}
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
if err != nil {
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
}
// We use the TOC for creating on-disk files, but the tar-split for creating metadata
// when exporting the layer contents. Ensure the two match, otherwise local inspection of a container
// might be misleading about the exported contents.
if err := ensureTOCMatchesTarSplit(toc, decodedTarSplit); err != nil {
return nil, nil, nil, 0, fmt.Errorf("tar-split and TOC data is inconsistent: %w", err)
}
} else if tarSplitChunk.Offset > 0 {
// We must ignore the tar-split when the digest is not present in the TOC, because we cant authenticate it.
//
// But if we asked for the chunk, now we must consume the data to not block the producer.
// Ideally the GetBlobAt API should be changed so that this is not necessary.
_, err := readBlob(tarSplitChunk.Length)
if err != nil {
return nil, nil, nil, 0, err
}
}
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
}
// ensureTOCMatchesTarSplit validates that toc and tarSplit contain _exactly_ the same entries.
func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
pendingFiles := map[string]*internal.FileMetadata{} // Name -> an entry in toc.Entries
for i := range toc.Entries {
e := &toc.Entries[i]
if e.Type != internal.TypeChunk {
if _, ok := pendingFiles[e.Name]; ok {
return fmt.Errorf("TOC contains duplicate entries for path %q", e.Name)
}
pendingFiles[e.Name] = e
}
}
if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error {
e, ok := pendingFiles[hdr.Name]
if !ok {
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
}
delete(pendingFiles, hdr.Name)
expected, err := internal.NewFileMetadata(hdr)
if err != nil {
return fmt.Errorf("determining expected metadata for %q: %w", hdr.Name, err)
}
if err := ensureFileMetadataAttributesMatch(e, &expected); err != nil {
return fmt.Errorf("TOC and tar-split metadata doesnt match: %w", err)
}
return nil
}); err != nil {
return err
}
if len(pendingFiles) != 0 {
remaining := expMaps.Keys(pendingFiles)
if len(remaining) > 5 {
remaining = remaining[:5] // Just to limit the size of the output.
}
return fmt.Errorf("TOC contains entries not present in tar-split, incl. %q", remaining)
}
return nil
}
// ensureTimePointersMatch ensures that a and b are equal
func ensureTimePointersMatch(a, b *time.Time) error {
// We didnt always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.
// The archive/tar code turns time.IsZero() timestamps into an Unix timestamp of 0 when writing, but turns an Unix timestamp of 0
// when writing into a (local-timezone) Jan 1 1970, which is not IsZero(). So, treat that the same as IsZero as well.
unixZero := time.Unix(0, 0)
if a != nil && (a.IsZero() || a.Equal(unixZero)) {
a = nil
}
if b != nil && (b.IsZero() || b.Equal(unixZero)) {
b = nil
}
switch {
case a == nil && b == nil:
return nil
case a == nil:
return fmt.Errorf("nil != %v", *b)
case b == nil:
return fmt.Errorf("%v != nil", *a)
default:
if a.Equal(*b) {
return nil
}
return fmt.Errorf("%v != %v", *a, *b)
}
}
// ensureFileMetadataAttributesMatch ensures that a and b match in file attributes (it ignores entries relevant to locating data
// in the tar stream or matching contents)
func ensureFileMetadataAttributesMatch(a, b *internal.FileMetadata) error {
// Keep this in sync with internal.FileMetadata!
if a.Type != b.Type {
return fmt.Errorf("mismatch of Type: %q != %q", a.Type, b.Type)
}
if a.Name != b.Name {
return fmt.Errorf("mismatch of Name: %q != %q", a.Name, b.Name)
}
if a.Linkname != b.Linkname {
return fmt.Errorf("mismatch of Linkname: %q != %q", a.Linkname, b.Linkname)
}
if a.Mode != b.Mode {
return fmt.Errorf("mismatch of Mode: %q != %q", a.Mode, b.Mode)
}
if a.Size != b.Size {
return fmt.Errorf("mismatch of Size: %q != %q", a.Size, b.Size)
}
if a.UID != b.UID {
return fmt.Errorf("mismatch of UID: %q != %q", a.UID, b.UID)
}
if a.GID != b.GID {
return fmt.Errorf("mismatch of GID: %q != %q", a.GID, b.GID)
}
if err := ensureTimePointersMatch(a.ModTime, b.ModTime); err != nil {
return fmt.Errorf("mismatch of ModTime: %w", err)
}
if err := ensureTimePointersMatch(a.AccessTime, b.AccessTime); err != nil {
return fmt.Errorf("mismatch of AccessTime: %w", err)
}
if err := ensureTimePointersMatch(a.ChangeTime, b.ChangeTime); err != nil {
return fmt.Errorf("mismatch of ChangeTime: %w", err)
}
if a.Devmajor != b.Devmajor {
return fmt.Errorf("mismatch of Devmajor: %q != %q", a.Devmajor, b.Devmajor)
}
if a.Devminor != b.Devminor {
return fmt.Errorf("mismatch of Devminor: %q != %q", a.Devminor, b.Devminor)
}
if !maps.Equal(a.Xattrs, b.Xattrs) {
return fmt.Errorf("mismatch of Xattrs: %q != %q", a.Xattrs, b.Xattrs)
}
// Digest is not compared
// Offset is not compared
// EndOffset is not compared
// ChunkSize is not compared
// ChunkOffset is not compared
// ChunkDigest is not compared
// ChunkType is not compared
return nil
}
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedCompressedChecksum)
if err != nil {

View file

@ -7,7 +7,6 @@ package compressor
import (
"bufio"
"bytes"
"encoding/base64"
"io"
"github.com/containers/storage/pkg/chunked/internal"
@ -369,34 +368,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
}
}
typ, err := internal.GetType(hdr.Typeflag)
mainEntry, err := internal.NewFileMetadata(hdr)
if err != nil {
return err
}
xattrs := make(map[string]string)
for k, v := range hdr.Xattrs {
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
}
entries := []internal.FileMetadata{
{
Type: typ,
Name: hdr.Name,
Linkname: hdr.Linkname,
Mode: hdr.Mode,
Size: hdr.Size,
UID: hdr.Uid,
GID: hdr.Gid,
ModTime: &hdr.ModTime,
AccessTime: &hdr.AccessTime,
ChangeTime: &hdr.ChangeTime,
Devmajor: hdr.Devmajor,
Devminor: hdr.Devminor,
Xattrs: xattrs,
Digest: checksum,
Offset: startOffset,
EndOffset: lastOffset,
},
}
mainEntry.Digest = checksum
mainEntry.Offset = startOffset
mainEntry.EndOffset = lastOffset
entries := []internal.FileMetadata{mainEntry}
for i := 1; i < len(chunks); i++ {
entries = append(entries, internal.FileMetadata{
Type: internal.TypeChunk,

View file

@ -1,13 +1,16 @@
//go:build unix
package dump
import (
"bufio"
"encoding/base64"
"fmt"
"io"
"path/filepath"
"reflect"
"strings"
"time"
"unicode"
"github.com/containers/storage/pkg/chunked/internal"
"golang.org/x/sys/unix"
@ -20,20 +23,26 @@ const (
ESCAPE_LONE_DASH
)
func escaped(val string, escape int) string {
func escaped(val []byte, escape int) string {
noescapeSpace := escape&NOESCAPE_SPACE != 0
escapeEqual := escape&ESCAPE_EQUAL != 0
escapeLoneDash := escape&ESCAPE_LONE_DASH != 0
length := len(val)
if escapeLoneDash && val == "-" {
if escapeLoneDash && len(val) == 1 && val[0] == '-' {
return fmt.Sprintf("\\x%.2x", val[0])
}
// This is intended to match the C isprint API with LC_CTYPE=C
isprint := func(c byte) bool {
return c >= 32 && c < 127
}
// This is intended to match the C isgraph API with LC_CTYPE=C
isgraph := func(c byte) bool {
return c > 32 && c < 127
}
var result string
for i := 0; i < length; i++ {
c := val[i]
for _, c := range []byte(val) {
hexEscape := false
var special string
@ -50,9 +59,9 @@ func escaped(val string, escape int) string {
hexEscape = escapeEqual
default:
if noescapeSpace {
hexEscape = !unicode.IsPrint(rune(c))
hexEscape = !isprint(c)
} else {
hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c))
hexEscape = !isgraph(c)
}
}
@ -67,8 +76,8 @@ func escaped(val string, escape int) string {
return result
}
func escapedOptional(val string, escape int) string {
if val == "" {
func escapedOptional(val []byte, escape int) string {
if len(val) == 0 {
return "-"
}
return escaped(val, escape)
@ -104,10 +113,31 @@ func sanitizeName(name string) string {
return path
}
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
func dumpNode(out io.Writer, added map[string]*internal.FileMetadata, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
path := sanitizeName(entry.Name)
if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil {
parent := filepath.Dir(path)
if _, found := added[parent]; !found && path != "/" {
parentEntry := &internal.FileMetadata{
Name: parent,
Type: internal.TypeDir,
Mode: 0o755,
}
if err := dumpNode(out, added, links, verityDigests, parentEntry); err != nil {
return err
}
}
if e, found := added[path]; found {
// if the entry was already added, make sure it has the same data
if !reflect.DeepEqual(*e, *entry) {
return fmt.Errorf("entry %q already added with different data", path)
}
return nil
}
added[path] = entry
if _, err := fmt.Fprint(out, escaped([]byte(path), ESCAPE_STANDARD)); err != nil {
return err
}
@ -151,7 +181,7 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
}
}
if _, err := fmt.Fprintf(out, escapedOptional(payload, ESCAPE_LONE_DASH)); err != nil {
if _, err := fmt.Fprint(out, escapedOptional([]byte(payload), ESCAPE_LONE_DASH)); err != nil {
return err
}
@ -165,14 +195,18 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
return err
}
digest := verityDigests[payload]
if _, err := fmt.Fprintf(out, escapedOptional(digest, ESCAPE_LONE_DASH)); err != nil {
if _, err := fmt.Fprint(out, escapedOptional([]byte(digest), ESCAPE_LONE_DASH)); err != nil {
return err
}
for k, v := range entry.Xattrs {
name := escaped(k, ESCAPE_EQUAL)
value := escaped(v, ESCAPE_EQUAL)
for k, vEncoded := range entry.Xattrs {
v, err := base64.StdEncoding.DecodeString(vEncoded)
if err != nil {
return fmt.Errorf("decode xattr %q: %w", k, err)
}
name := escaped([]byte(k), ESCAPE_EQUAL)
value := escaped(v, ESCAPE_EQUAL)
if _, err := fmt.Fprintf(out, " %s=%s", name, value); err != nil {
return err
}
@ -201,6 +235,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
}()
links := make(map[string]int)
added := make(map[string]*internal.FileMetadata)
for _, e := range toc.Entries {
if e.Linkname == "" {
continue
@ -211,14 +246,14 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
links[e.Linkname] = links[e.Linkname] + 1
}
if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") {
if len(toc.Entries) == 0 {
root := &internal.FileMetadata{
Name: "/",
Type: internal.TypeDir,
Mode: 0o755,
}
if err := dumpNode(w, links, verityDigests, root); err != nil {
if err := dumpNode(w, added, links, verityDigests, root); err != nil {
pipeW.CloseWithError(err)
closed = true
return
@ -229,7 +264,7 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
if e.Type == internal.TypeChunk {
continue
}
if err := dumpNode(w, links, verityDigests, &e); err != nil {
if err := dumpNode(w, added, links, verityDigests, &e); err != nil {
pipeW.CloseWithError(err)
closed = true
return

View file

@ -0,0 +1,605 @@
package chunked
import (
"encoding/base64"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"sync/atomic"
"syscall"
"time"
driversCopy "github.com/containers/storage/drivers/copy"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked/internal"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/vbatts/tar-split/archive/tar"
"golang.org/x/sys/unix"
)
// procPathForFile returns an absolute path in /proc which
// refers to the file; see procPathForFd.
func procPathForFile(f *os.File) string {
return procPathForFd(int(f.Fd()))
}
// procPathForFd returns an absolute path in /proc which
// refers to the file; this allows passing a file descriptor
// in places that don't accept a file descriptor.
func procPathForFd(fd int) string {
return fmt.Sprintf("/proc/self/fd/%d", fd)
}
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
// are not part of the TOC document.
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
type fileMetadata struct {
internal.FileMetadata
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
chunks []*internal.FileMetadata
// skipSetAttrs is set when the file attributes must not be
// modified, e.g. it is a hard link from a different source,
// or a composefs file.
skipSetAttrs bool
}
func doHardLink(dirfd, srcFd int, destFile string) error {
destDir, destBase := filepath.Split(destFile)
destDirFd := dirfd
if destDir != "" && destDir != "." {
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
if err != nil {
return err
}
defer f.Close()
destDirFd = int(f.Fd())
}
doLink := func() error {
// Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
// /proc/self/fd doesn't and can be used with rootless.
srcPath := procPathForFd(srcFd)
err := unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW)
if err != nil {
return &fs.PathError{Op: "linkat", Path: destFile, Err: err}
}
return nil
}
err := doLink()
// if the destination exists, unlink it first and try again
if err != nil && os.IsExist(err) {
if err := unix.Unlinkat(destDirFd, destBase, 0); err != nil {
return err
}
return doLink()
}
return err
}
func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
destFile := fileMetadata.Name
src := procPathForFd(srcFd)
st, err := os.Stat(src)
if err != nil {
return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err)
}
copyWithFileRange, copyWithFileClone := true, true
if useHardLinks {
err := doHardLink(dirfd, srcFd, destFile)
if err == nil {
// if the file was deduplicated with a hard link, skip overriding file metadata.
fileMetadata.skipSetAttrs = true
return nil, st.Size(), nil
}
}
// If the destination file already exists, we shouldn't blow it away
dstFile, err := openFileUnderRoot(dirfd, destFile, newFileFlags, mode)
if err != nil {
return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err)
}
err = driversCopy.CopyRegularToFile(src, dstFile, st, &copyWithFileRange, &copyWithFileClone)
if err != nil {
dstFile.Close()
return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err)
}
return dstFile, st.Size(), nil
}
func timeToTimespec(time *time.Time) (ts unix.Timespec) {
if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return unix.NsecToTimespec(time.UnixNano())
}
// chown changes the owner and group of the file at the specified path under the directory
// pointed by dirfd.
// If nofollow is true, the function will not follow symlinks.
// If path is empty, the function will change the owner and group of the file descriptor.
// absolutePath is the absolute path of the file, used only for error messages.
func chown(dirfd int, path string, uid, gid int, nofollow bool, absolutePath string) error {
var err error
flags := 0
if nofollow {
flags |= unix.AT_SYMLINK_NOFOLLOW
} else if path == "" {
flags |= unix.AT_EMPTY_PATH
}
err = unix.Fchownat(dirfd, path, uid, gid, flags)
if err == nil {
return nil
}
if errors.Is(err, syscall.EINVAL) {
return fmt.Errorf(`potentially insufficient UIDs or GIDs available in the user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, path, err)
}
return &fs.PathError{Op: "fchownat", Path: absolutePath, Err: err}
}
// setFileAttrs sets the file attributes for file given metadata
func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error {
if metadata.skipSetAttrs {
return nil
}
if file == nil {
return errors.New("invalid file")
}
fd := int(file.Fd())
t, err := typeToTarType(metadata.Type)
if err != nil {
return err
}
// If it is a symlink, force to use the path
if t == tar.TypeSymlink {
usePath = true
}
baseName := ""
if usePath {
dirName := filepath.Dir(metadata.Name)
if dirName != "" {
parentFd, err := openFileUnderRoot(dirfd, dirName, unix.O_PATH|unix.O_DIRECTORY, 0)
if err != nil {
return err
}
defer parentFd.Close()
dirfd = int(parentFd.Fd())
}
baseName = filepath.Base(metadata.Name)
}
doChown := func() error {
var err error
if usePath {
err = chown(dirfd, baseName, metadata.UID, metadata.GID, true, metadata.Name)
} else {
err = chown(fd, "", metadata.UID, metadata.GID, false, metadata.Name)
}
if options.IgnoreChownErrors {
return nil
}
return err
}
doSetXattr := func(k string, v []byte) error {
err := unix.Fsetxattr(fd, k, v, 0)
if err != nil {
return &fs.PathError{Op: "fsetxattr", Path: metadata.Name, Err: err}
}
return nil
}
doUtimes := func() error {
ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
var err error
if usePath {
err = unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW)
} else {
err = unix.UtimesNanoAt(unix.AT_FDCWD, procPathForFd(fd), ts, 0)
}
if err != nil {
return &fs.PathError{Op: "utimensat", Path: metadata.Name, Err: err}
}
return nil
}
doChmod := func() error {
var err error
op := ""
if usePath {
err = unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
op = "fchmodat"
} else {
err = unix.Fchmod(fd, uint32(mode))
op = "fchmod"
}
if err != nil {
return &fs.PathError{Op: op, Path: metadata.Name, Err: err}
}
return nil
}
if err := doChown(); err != nil {
return err
}
canIgnore := func(err error) bool {
return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP)
}
for k, v := range metadata.Xattrs {
if _, found := xattrsToIgnore[k]; found {
continue
}
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return fmt.Errorf("decode xattr %q: %w", v, err)
}
if err := doSetXattr(k, data); !canIgnore(err) {
return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err)
}
}
if err := doUtimes(); !canIgnore(err) {
return err
}
if err := doChmod(); !canIgnore(err) {
return err
}
return nil
}
func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
root := procPathForFd(dirfd)
targetRoot, err := os.Readlink(root)
if err != nil {
return -1, err
}
hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
var fd int
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
// last component as the path to openat().
if hasNoFollow {
dirName, baseName := filepath.Split(name)
if dirName != "" && dirName != "." {
newRoot, err := securejoin.SecureJoin(root, dirName)
if err != nil {
return -1, err
}
root = newRoot
}
parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return -1, &fs.PathError{Op: "open", Path: root, Err: err}
}
defer unix.Close(parentDirfd)
fd, err = unix.Openat(parentDirfd, baseName, int(flags), uint32(mode))
if err != nil {
return -1, &fs.PathError{Op: "openat", Path: name, Err: err}
}
} else {
newPath, err := securejoin.SecureJoin(root, name)
if err != nil {
return -1, err
}
fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode))
if err != nil {
return -1, &fs.PathError{Op: "openat", Path: newPath, Err: err}
}
}
target, err := os.Readlink(procPathForFd(fd))
if err != nil {
unix.Close(fd)
return -1, err
}
// Add an additional check to make sure the opened fd is inside the rootfs
if !strings.HasPrefix(target, targetRoot) {
unix.Close(fd)
return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name)
}
return fd, err
}
func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
how := unix.OpenHow{
Flags: flags,
Mode: uint64(mode & 0o7777),
Resolve: unix.RESOLVE_IN_ROOT,
}
fd, err := unix.Openat2(dirfd, name, &how)
if err != nil {
return -1, &fs.PathError{Op: "openat2", Path: name, Err: err}
}
return fd, nil
}
// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid
// using it again.
var skipOpenat2 int32
// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a
// userspace lookup.
func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
var fd int
var err error
if name == "" {
fd, err := unix.Dup(dirfd)
if err != nil {
return -1, fmt.Errorf("failed to duplicate file descriptor %d: %w", dirfd, err)
}
return fd, nil
}
if atomic.LoadInt32(&skipOpenat2) > 0 {
fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
} else {
fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode)
// If the function failed with ENOSYS, switch off the support for openat2
// and fallback to using safejoin.
if err != nil && errors.Is(err, unix.ENOSYS) {
atomic.StoreInt32(&skipOpenat2, 1)
fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
}
}
return fd, err
}
// openFileUnderRoot safely opens a file under the specified root directory using openat2
// dirfd is an open file descriptor to the target checkout directory.
// name is the path to open relative to dirfd.
// flags are the flags to pass to the open syscall.
// mode specifies the mode to use for newly created files.
func openFileUnderRoot(dirfd int, name string, flags uint64, mode os.FileMode) (*os.File, error) {
fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
hasCreate := (flags & unix.O_CREAT) != 0
if errors.Is(err, unix.ENOENT) && hasCreate {
parent := filepath.Dir(name)
if parent != "" {
newDirfd, err2 := openOrCreateDirUnderRoot(dirfd, parent, 0)
if err2 == nil {
defer newDirfd.Close()
fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
}
}
}
return nil, fmt.Errorf("open %q under the rootfs: %w", name, err)
}
// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing.
// dirfd is an open file descriptor to the target checkout directory.
// name is the path to open relative to dirfd.
// mode specifies the mode to use for newly created files.
func openOrCreateDirUnderRoot(dirfd int, name string, mode os.FileMode) (*os.File, error) {
fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
if errors.Is(err, unix.ENOENT) {
parent := filepath.Dir(name)
if parent != "" {
pDir, err2 := openOrCreateDirUnderRoot(dirfd, parent, mode)
if err2 != nil {
return nil, err
}
defer pDir.Close()
baseName := filepath.Base(name)
if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, uint32(mode)); err2 != nil {
return nil, &fs.PathError{Op: "mkdirat", Path: name, Err: err2}
}
fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
}
}
return nil, err
}
// appendHole creates a hole with the specified size at the open fd.
// fd is the open file descriptor.
// name is the path to use for error messages.
// size is the size of the hole to create.
func appendHole(fd int, name string, size int64) error {
off, err := unix.Seek(fd, size, unix.SEEK_CUR)
if err != nil {
return &fs.PathError{Op: "seek", Path: name, Err: err}
}
// Make sure the file size is changed. It might be the last hole and no other data written afterwards.
if err := unix.Ftruncate(fd, off); err != nil {
return &fs.PathError{Op: "ftruncate", Path: name, Err: err}
}
return nil
}
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
parent, base := filepath.Split(name)
parentFd := dirfd
if parent != "" && parent != "." {
parentFile, err := openOrCreateDirUnderRoot(dirfd, parent, 0)
if err != nil {
return err
}
defer parentFile.Close()
parentFd = int(parentFile.Fd())
}
if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
if !os.IsExist(err) {
return &fs.PathError{Op: "mkdirat", Path: name, Err: err}
}
}
file, err := openFileUnderRoot(parentFd, base, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return err
}
defer file.Close()
return setFileAttrs(dirfd, file, mode, metadata, options, false)
}
func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
sourceFile, err := openFileUnderRoot(dirfd, metadata.Linkname, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0)
if err != nil {
return err
}
defer sourceFile.Close()
err = doHardLink(dirfd, int(sourceFile.Fd()), metadata.Name)
if err != nil {
return err
}
newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_WRONLY|unix.O_NOFOLLOW, 0)
if err != nil {
// If the target is a symlink, open the file with O_PATH.
if errors.Is(err, unix.ELOOP) {
newFile, err := openFileUnderRoot(dirfd, metadata.Name, unix.O_PATH|unix.O_NOFOLLOW, 0)
if err != nil {
return err
}
defer newFile.Close()
return setFileAttrs(dirfd, newFile, mode, metadata, options, true)
}
return err
}
defer newFile.Close()
return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
}
func safeSymlink(dirfd int, metadata *fileMetadata) error {
destDir, destBase := filepath.Split(metadata.Name)
destDirFd := dirfd
if destDir != "" && destDir != "." {
f, err := openOrCreateDirUnderRoot(dirfd, destDir, 0)
if err != nil {
return err
}
defer f.Close()
destDirFd = int(f.Fd())
}
if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil {
return &fs.PathError{Op: "symlinkat", Path: metadata.Name, Err: err}
}
return nil
}
type whiteoutHandler struct {
Dirfd int
Root string
}
func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
file, err := openOrCreateDirUnderRoot(d.Dirfd, path, 0)
if err != nil {
return err
}
defer file.Close()
if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil {
return &fs.PathError{Op: "fsetxattr", Path: path, Err: err}
}
return nil
}
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
dir, base := filepath.Split(path)
dirfd := d.Dirfd
if dir != "" && dir != "." {
dir, err := openOrCreateDirUnderRoot(d.Dirfd, dir, 0)
if err != nil {
return err
}
defer dir.Close()
dirfd = int(dir.Fd())
}
if err := unix.Mknodat(dirfd, base, mode, dev); err != nil {
return &fs.PathError{Op: "mknodat", Path: path, Err: err}
}
return nil
}
func (d whiteoutHandler) Chown(path string, uid, gid int) error {
file, err := openFileUnderRoot(d.Dirfd, path, unix.O_PATH, 0)
if err != nil {
return err
}
defer file.Close()
return chown(int(file.Fd()), "", uid, gid, false, path)
}
type readerAtCloser interface {
io.ReaderAt
io.Closer
}
// seekableFile is a struct that wraps an *os.File to provide an ImageSourceSeekable.
type seekableFile struct {
reader readerAtCloser
}
func (f *seekableFile) Close() error {
return f.reader.Close()
}
func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
streams := make(chan io.ReadCloser)
errs := make(chan error)
go func() {
for _, chunk := range chunks {
streams <- io.NopCloser(io.NewSectionReader(f.reader, int64(chunk.Offset), int64(chunk.Length)))
}
close(streams)
close(errs)
}()
return streams, errs, nil
}
func newSeekableFile(reader readerAtCloser) *seekableFile {
return &seekableFile{reader: reader}
}

View file

@ -5,25 +5,57 @@ package internal
// larger software like the graph drivers.
import (
"archive/tar"
"bytes"
"encoding/base64"
"encoding/binary"
"fmt"
"io"
"strings"
"time"
"github.com/containers/storage/pkg/archive"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
)
// TOC is short for Table of Contents and is used by the zstd:chunked
// file format to effectively add an overall index into the contents
// of a tarball; it also includes file metadata.
type TOC struct {
Version int `json:"version"`
Entries []FileMetadata `json:"entries"`
TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
// Version is currently expected to be 1
Version int `json:"version"`
// Entries is the list of file metadata in this TOC.
// The ordering in this array currently defaults to being the same
// as that of the tar stream; however, this should not be relied on.
Entries []FileMetadata `json:"entries"`
// TarSplitDigest is the checksum of the "tar-split" data which
// is included as a distinct skippable zstd frame before the TOC.
TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
}
// FileMetadata is an entry in the TOC that includes both generic file metadata
// that duplicates what can found in the tar header (and should match), but
// also special/custom content (see below).
//
// Regular files may optionally be represented as a sequence of “chunks”,
// which may be ChunkTypeData or ChunkTypeZeros (and ChunkTypeData boundaries
// are heuristically determined to increase chance of chunk matching / reuse
// similar to rsync). In that case, the regular file is represented
// as an initial TypeReg entry (with all metadata for the file as a whole)
// immediately followed by zero or more TypeChunk entries (containing only Type,
// Name and Chunk* fields); if there is at least one TypeChunk entry, the Chunk*
// fields are relevant in all of these entries, including the initial
// TypeReg one.
//
// Note that the metadata here, when fetched by a zstd:chunked aware client,
// is used instead of that in the tar stream. The contents of the tar stream
// are not used in this scenario.
type FileMetadata struct {
// If you add any fields, update ensureFileMetadataMatches as well!
// The metadata below largely duplicates that in the tar headers.
Type string `json:"type"`
Name string `json:"name"`
Linkname string `json:"linkName,omitempty"`
@ -37,9 +69,11 @@ type FileMetadata struct {
Devmajor int64 `json:"devMajor,omitempty"`
Devminor int64 `json:"devMinor,omitempty"`
Xattrs map[string]string `json:"xattrs,omitempty"`
Digest string `json:"digest,omitempty"`
Offset int64 `json:"offset,omitempty"`
EndOffset int64 `json:"endOffset,omitempty"`
// Digest is a hexadecimal sha256 checksum of the file contents; it
// is empty for empty files
Digest string `json:"digest,omitempty"`
Offset int64 `json:"offset,omitempty"`
EndOffset int64 `json:"endOffset,omitempty"`
ChunkSize int64 `json:"chunkSize,omitempty"`
ChunkOffset int64 `json:"chunkOffset,omitempty"`
@ -53,19 +87,23 @@ const (
)
const (
// The following types correspond to regular types of entries that can
// appear in a tar archive.
TypeReg = "reg"
TypeChunk = "chunk"
TypeLink = "hardlink"
TypeChar = "char"
TypeBlock = "block"
TypeDir = "dir"
TypeFifo = "fifo"
TypeSymlink = "symlink"
// TypeChunk is special; in zstd:chunked not only are files individually
// compressed and indexable, there is a "rolling checksum" used to compute
// "chunks" of individual file contents, that are also added to the TOC
TypeChunk = "chunk"
)
var TarTypes = map[byte]string{
tar.TypeReg: TypeReg,
tar.TypeRegA: TypeReg,
tar.TypeLink: TypeLink,
tar.TypeChar: TypeChar,
tar.TypeBlock: TypeBlock,
@ -83,11 +121,23 @@ func GetType(t byte) (string, error) {
}
const (
// ManifestChecksumKey is a hexadecimal sha256 digest of the compressed manifest digest.
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
// ManifestInfoKey is an annotation that signals the start of the TOC (manifest)
// contents which are embedded as a skippable zstd frame. It has a format of
// four decimal integers separated by `:` as follows:
// <offset>:<length>:<uncompressed length>:<type>
// The <type> is ManifestTypeCRFS which should have the value `1`.
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
// TarSplitInfoKey is an annotation that signals the start of the "tar-split" metadata
// contents which are embedded as a skippable zstd frame. It has a format of
// three decimal integers separated by `:` as follows:
// <offset>:<length>:<uncompressed length>
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written.
// TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead.
// The value is retained here as a constant as a historical reference for older zstd:chunked images.
// TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
ManifestTypeCRFS = 1
@ -232,3 +282,43 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
return manifestDataLE
}
// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil.
func timeIfNotZero(t *time.Time) *time.Time {
if t == nil || t.IsZero() {
return nil
}
return t
}
// NewFileMetadata creates a basic FileMetadata entry for hdr.
// The caller must set DigestOffset/EndOffset, and the Chunk* values, separately.
func NewFileMetadata(hdr *tar.Header) (FileMetadata, error) {
typ, err := GetType(hdr.Typeflag)
if err != nil {
return FileMetadata{}, err
}
xattrs := make(map[string]string)
for k, v := range hdr.PAXRecords {
xattrKey, ok := strings.CutPrefix(k, archive.PaxSchilyXattr)
if !ok {
continue
}
xattrs[xattrKey] = base64.StdEncoding.EncodeToString([]byte(v))
}
return FileMetadata{
Type: typ,
Name: hdr.Name,
Linkname: hdr.Linkname,
Mode: hdr.Mode,
Size: hdr.Size,
UID: hdr.Uid,
GID: hdr.Gid,
ModTime: timeIfNotZero(&hdr.ModTime),
AccessTime: timeIfNotZero(&hdr.AccessTime),
ChangeTime: timeIfNotZero(&hdr.ChangeTime),
Devmajor: hdr.Devmajor,
Devminor: hdr.Devminor,
Xattrs: xattrs,
}, nil
}

View file

@ -8,20 +8,18 @@ import (
"fmt"
"hash"
"io"
"io/fs"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/containerd/stargz-snapshotter/estargz"
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
driversCopy "github.com/containers/storage/drivers/copy"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/containers/storage/pkg/chunked/internal"
@ -29,8 +27,6 @@ import (
"github.com/containers/storage/pkg/fsverity"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
securejoin "github.com/cyphar/filepath-securejoin"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
@ -42,9 +38,8 @@ import (
const (
maxNumberMissingChunks = 1024
autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them.
autoMergePartsThreshold = 1024 // if the gap between two ranges is below this threshold, automatically merge them.
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
chunkedData = "zstd-chunked-data"
chunkedLayerDataKey = "zstd-chunked-layer-data"
@ -59,21 +54,6 @@ const (
copyGoRoutines = 32
)
// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that
// are not part of the TOC document.
// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk.
type fileMetadata struct {
internal.FileMetadata
// chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg.
chunks []*internal.FileMetadata
// skipSetAttrs is set when the file attributes must not be
// modified, e.g. it is a hard link from a different source,
// or a composefs file.
skipSetAttrs bool
}
type compressedFileType int
type chunkedDiffer struct {
@ -111,7 +91,7 @@ type chunkedDiffer struct {
blobSize int64
storeOpts *types.StoreOptions
pullOptions map[string]string
useFsVerity graphdriver.DifferFsVerity
fsVerityDigests map[string]string
@ -127,98 +107,7 @@ type chunkedLayerData struct {
Format graphdriver.DifferOutputFormat `json:"format"`
}
func timeToTimespec(time *time.Time) (ts unix.Timespec) {
if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return unix.NsecToTimespec(time.UnixNano())
}
func doHardLink(srcFd int, destDirFd int, destBase string) error {
doLink := func() error {
// Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses
// /proc/self/fd doesn't and can be used with rootless.
srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd)
return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW)
}
err := doLink()
// if the destination exists, unlink it first and try again
if err != nil && os.IsExist(err) {
unix.Unlinkat(destDirFd, destBase, 0)
return doLink()
}
return err
}
func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) {
destFile := fileMetadata.Name
src := fmt.Sprintf("/proc/self/fd/%d", srcFd)
st, err := os.Stat(src)
if err != nil {
return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err)
}
copyWithFileRange, copyWithFileClone := true, true
if useHardLinks {
destDirPath := filepath.Dir(destFile)
destBase := filepath.Base(destFile)
destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode)
if err == nil {
defer destDir.Close()
err := doHardLink(srcFd, int(destDir.Fd()), destBase)
if err == nil {
// if the file was deduplicated with a hard link, skip overriding file metadata.
fileMetadata.skipSetAttrs = true
return nil, st.Size(), nil
}
}
}
// If the destination file already exists, we shouldn't blow it away
dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode)
if err != nil {
return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err)
}
err = driversCopy.CopyRegularToFile(src, dstFile, st, &copyWithFileRange, &copyWithFileClone)
if err != nil {
dstFile.Close()
return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err)
}
return dstFile, st.Size(), nil
}
type seekableFile struct {
file *os.File
}
func (f *seekableFile) Close() error {
return f.file.Close()
}
func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
streams := make(chan io.ReadCloser)
errs := make(chan error)
go func() {
for _, chunk := range chunks {
streams <- io.NopCloser(io.NewSectionReader(f.file, int64(chunk.Offset), int64(chunk.Length)))
}
close(streams)
close(errs)
}()
return streams, errs, nil
}
func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) {
diff, err := archive.DecompressStream(payload)
if err != nil {
return 0, nil, "", nil, err
@ -226,7 +115,7 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se
fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
if err != nil {
return 0, nil, "", nil, err
return 0, nil, "", nil, &fs.PathError{Op: "open", Path: destDirectory, Err: err}
}
f := os.NewFile(uintptr(fd), destDirectory)
@ -240,7 +129,7 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se
}
convertedOutputDigester := digest.Canonical.Digester()
copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff)
copied, err := io.CopyBuffer(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff, c.copyBuffer)
if err != nil {
f.Close()
return 0, nil, "", nil, err
@ -249,21 +138,15 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *se
f.Close()
return 0, nil, "", nil, err
}
is := seekableFile{
file: f,
}
return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil
return copied, newSeekableFile(f), convertedOutputDigester.Digest(), newAnnotations, nil
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
storeOpts, err := types.DefaultStoreOptions()
if err != nil {
return nil, err
}
pullOptions := store.PullOptions()
if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) {
if !parseBooleanPullOption(pullOptions, "enable_partial_images", true) {
return nil, errors.New("enable_partial_images not configured")
}
@ -279,21 +162,21 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
if err != nil {
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
}
return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts)
return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
}
if hasEstargzTOC {
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
if err != nil {
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
}
return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts)
return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
}
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
}
func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
if !parseBooleanPullOption(storeOpts, "convert_images", false) {
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
if !parseBooleanPullOption(pullOptions, "convert_images", false) {
return nil, errors.New("convert_images not configured")
}
@ -309,12 +192,12 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
convertToZstdChunked: true,
copyBuffer: makeCopyBuffer(),
layersCache: layersCache,
storeOpts: storeOpts,
pullOptions: pullOptions,
stream: iss,
}, nil
}
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
@ -333,14 +216,14 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
layersCache: layersCache,
manifest: manifest,
toc: toc,
storeOpts: storeOpts,
pullOptions: pullOptions,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
}, nil
}
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
@ -358,7 +241,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
fileType: fileTypeEstargz,
layersCache: layersCache,
manifest: manifest,
storeOpts: storeOpts,
pullOptions: pullOptions,
stream: iss,
tocOffset: tocOffset,
}, nil
@ -375,15 +258,15 @@ func makeCopyBuffer() []byte {
// dirfd is an open file descriptor to the destination root directory.
// useHardLinks defines whether the deduplication can be performed using hard links.
func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
srcDirfd, err := unix.Open(source, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return false, nil, 0, fmt.Errorf("open source file: %w", err)
return false, nil, 0, &fs.PathError{Op: "open", Path: source, Err: err}
}
defer unix.Close(srcDirfd)
srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
srcFile, err := openFileUnderRoot(srcDirfd, name, unix.O_RDONLY|syscall.O_CLOEXEC, 0)
if err != nil {
return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err)
return false, nil, 0, err
}
defer srcFile.Close()
@ -420,7 +303,7 @@ func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool {
return false
}
path := fmt.Sprintf("/proc/self/fd/%d", fd)
path := procPathForFd(fd)
listXattrs, err := system.Llistxattr(path)
if err != nil {
@ -476,7 +359,7 @@ func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int,
if st.Size() != file.Size {
continue
}
fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0)
fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK|unix.O_CLOEXEC, 0)
if err != nil {
logrus.Debugf("could not open sourceFile %s: %v", sourceFile, err)
return false, nil, 0, nil
@ -585,15 +468,15 @@ type missingPart struct {
}
func (o *originFile) OpenFile() (io.ReadCloser, error) {
srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0)
srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, fmt.Errorf("open source file: %w", err)
return nil, &fs.PathError{Op: "open", Path: o.Root, Err: err}
}
defer unix.Close(srcDirfd)
srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0)
srcFile, err := openFileUnderRoot(srcDirfd, o.Path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, fmt.Errorf("open source file under target rootfs: %w", err)
return nil, err
}
if _, err := srcFile.Seek(o.Offset, 0); err != nil {
@ -603,253 +486,6 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) {
return srcFile, nil
}
// setFileAttrs sets the file attributes for file given metadata
func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error {
if metadata.skipSetAttrs {
return nil
}
if file == nil || file.Fd() < 0 {
return errors.New("invalid file")
}
fd := int(file.Fd())
t, err := typeToTarType(metadata.Type)
if err != nil {
return err
}
// If it is a symlink, force to use the path
if t == tar.TypeSymlink {
usePath = true
}
baseName := ""
if usePath {
dirName := filepath.Dir(metadata.Name)
if dirName != "" {
parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0)
if err != nil {
return err
}
defer parentFd.Close()
dirfd = int(parentFd.Fd())
}
baseName = filepath.Base(metadata.Name)
}
doChown := func() error {
if usePath {
return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW)
}
return unix.Fchown(fd, metadata.UID, metadata.GID)
}
doSetXattr := func(k string, v []byte) error {
return unix.Fsetxattr(fd, k, v, 0)
}
doUtimes := func() error {
ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)}
if usePath {
return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW)
}
return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0)
}
doChmod := func() error {
if usePath {
return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
}
return unix.Fchmod(fd, uint32(mode))
}
if err := doChown(); err != nil {
if !options.IgnoreChownErrors {
return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err)
}
}
canIgnore := func(err error) bool {
return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP)
}
for k, v := range metadata.Xattrs {
if _, found := xattrsToIgnore[k]; found {
continue
}
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return fmt.Errorf("decode xattr %q: %w", v, err)
}
if err := doSetXattr(k, data); !canIgnore(err) {
return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err)
}
}
if err := doUtimes(); !canIgnore(err) {
return fmt.Errorf("set utimes for %q: %w", metadata.Name, err)
}
if err := doChmod(); !canIgnore(err) {
return fmt.Errorf("chmod %q: %w", metadata.Name, err)
}
return nil
}
func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
root := fmt.Sprintf("/proc/self/fd/%d", dirfd)
targetRoot, err := os.Readlink(root)
if err != nil {
return -1, err
}
hasNoFollow := (flags & unix.O_NOFOLLOW) != 0
var fd int
// If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the
// last component as the path to openat().
if hasNoFollow {
dirName := filepath.Dir(name)
if dirName != "" {
newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name))
if err != nil {
return -1, err
}
root = newRoot
}
parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
if err != nil {
return -1, err
}
defer unix.Close(parentDirfd)
fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode))
if err != nil {
return -1, err
}
} else {
newPath, err := securejoin.SecureJoin(root, name)
if err != nil {
return -1, err
}
fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode))
if err != nil {
return -1, err
}
}
target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd))
if err != nil {
unix.Close(fd)
return -1, err
}
// Add an additional check to make sure the opened fd is inside the rootfs
if !strings.HasPrefix(target, targetRoot) {
unix.Close(fd)
return -1, fmt.Errorf("while resolving %q. It resolves outside the root directory", name)
}
return fd, err
}
func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
how := unix.OpenHow{
Flags: flags,
Mode: uint64(mode & 0o7777),
Resolve: unix.RESOLVE_IN_ROOT,
}
return unix.Openat2(dirfd, name, &how)
}
// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid
// using it again.
var skipOpenat2 int32
// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a
// userspace lookup.
func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) {
var fd int
var err error
if atomic.LoadInt32(&skipOpenat2) > 0 {
fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
} else {
fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode)
// If the function failed with ENOSYS, switch off the support for openat2
// and fallback to using safejoin.
if err != nil && errors.Is(err, unix.ENOSYS) {
atomic.StoreInt32(&skipOpenat2, 1)
fd, err = openFileUnderRootFallback(dirfd, name, flags, mode)
}
}
return fd, err
}
// openFileUnderRoot safely opens a file under the specified root directory using openat2
// name is the path to open relative to dirfd.
// dirfd is an open file descriptor to the target checkout directory.
// flags are the flags to pass to the open syscall.
// mode specifies the mode to use for newly created files.
func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) {
fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
hasCreate := (flags & unix.O_CREAT) != 0
if errors.Is(err, unix.ENOENT) && hasCreate {
parent := filepath.Dir(name)
if parent != "" {
newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0)
if err2 == nil {
defer newDirfd.Close()
fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
}
}
}
return nil, fmt.Errorf("open %q under the rootfs: %w", name, err)
}
// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing.
// name is the path to open relative to dirfd.
// dirfd is an open file descriptor to the target checkout directory.
// mode specifies the mode to use for newly created files.
func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) {
fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
if errors.Is(err, unix.ENOENT) {
parent := filepath.Dir(name)
if parent != "" {
pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode)
if err2 != nil {
return nil, err
}
defer pDir.Close()
baseName := filepath.Base(name)
if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0o755); err2 != nil {
return nil, err
}
fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
}
}
return nil, err
}
func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) {
switch {
case partCompression == fileTypeHole:
@ -918,23 +554,14 @@ func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
return nil
}
// appendHole creates a hole with the specified size at the open fd.
func appendHole(fd int, size int64) error {
off, err := unix.Seek(fd, size, unix.SEEK_CUR)
if err != nil {
return err
}
// Make sure the file size is changed. It might be the last hole and no other data written afterwards.
if err := unix.Ftruncate(fd, off); err != nil {
return err
}
return nil
}
func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
switch compression {
case fileTypeZstdChunked:
defer c.zstdReader.Reset(nil)
defer func() {
if err := c.zstdReader.Reset(nil); err != nil {
logrus.Warnf("release of references to the previous zstd reader failed: %v", err)
}
}()
if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
return err
}
@ -948,7 +575,7 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT
return err
}
case fileTypeHole:
if err := appendHole(int(destFile.file.Fd()), size); err != nil {
if err := appendHole(int(destFile.file.Fd()), destFile.metadata.Name, size); err != nil {
return err
}
if destFile.hash != nil {
@ -977,7 +604,7 @@ type destinationFile struct {
}
func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
file, err := openFileUnderRoot(dirfd, metadata.Name, newFileFlags, 0)
if err != nil {
return nil, err
}
@ -1080,7 +707,7 @@ func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
return nil
}
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
var destFile *destinationFile
filesToClose := make(chan *destinationFile, 3)
@ -1294,7 +921,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
return newMissingParts
}
func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
var chunksToRequest []ImageSourceChunk
calculateChunksToRequest := func() {
@ -1333,167 +960,12 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
return err
}
if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil {
if err := c.storeMissingFiles(streams, errs, dirfd, missingParts, options); err != nil {
return err
}
return nil
}
func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error {
parent := filepath.Dir(name)
base := filepath.Base(name)
parentFd := dirfd
if parent != "." {
parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0)
if err != nil {
return err
}
defer parentFile.Close()
parentFd = int(parentFile.Fd())
}
if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil {
if !os.IsExist(err) {
return fmt.Errorf("mkdir %q: %w", name, err)
}
}
file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return err
}
defer file.Close()
return setFileAttrs(dirfd, file, mode, metadata, options, false)
}
func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0)
if err != nil {
return err
}
defer sourceFile.Close()
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0)
if err != nil {
return err
}
defer f.Close()
destDirFd = int(f.Fd())
}
err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase)
if err != nil {
return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err)
}
newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0)
if err != nil {
// If the target is a symlink, open the file with O_PATH.
if errors.Is(err, unix.ELOOP) {
newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0)
if err != nil {
return err
}
defer newFile.Close()
return setFileAttrs(dirfd, newFile, mode, metadata, options, true)
}
return err
}
defer newFile.Close()
return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
}
func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name)
destDirFd := dirfd
if destDir != "." {
f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0)
if err != nil {
return err
}
defer f.Close()
destDirFd = int(f.Fd())
}
if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil {
return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err)
}
return nil
}
type whiteoutHandler struct {
Dirfd int
Root string
}
func (d whiteoutHandler) Setxattr(path, name string, value []byte) error {
file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0)
if err != nil {
return err
}
defer file.Close()
if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil {
return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err)
}
return nil
}
func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error {
dir := filepath.Dir(path)
base := filepath.Base(path)
dirfd := d.Dirfd
if dir != "" {
dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0)
if err != nil {
return err
}
defer dir.Close()
dirfd = int(dir.Fd())
}
if err := unix.Mknodat(dirfd, base, mode, dev); err != nil {
return fmt.Errorf("mknod %q: %w", path, err)
}
return nil
}
func checkChownErr(err error, name string, uid, gid int) error {
if errors.Is(err, syscall.EINVAL) {
return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err)
}
return err
}
func (d whiteoutHandler) Chown(path string, uid, gid int) error {
file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0)
if err != nil {
return err
}
defer file.Close()
if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil {
var stat unix.Stat_t
if unix.Fstat(int(file.Fd()), &stat) == nil {
if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) {
return nil
}
}
return checkChownErr(err, path, uid, gid)
}
return nil
}
type hardLinkToCreate struct {
dest string
dirfd int
@ -1501,8 +973,8 @@ type hardLinkToCreate struct {
metadata *fileMetadata
}
func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool {
if value, ok := storeOpts.PullOptions[name]; ok {
func parseBooleanPullOption(pullOptions map[string]string, name string, def bool) bool {
if value, ok := pullOptions[name]; ok {
return strings.ToLower(value) == "true"
}
return def
@ -1515,10 +987,10 @@ type findAndCopyFileOptions struct {
}
func reopenFileReadOnly(f *os.File) (*os.File, error) {
path := fmt.Sprintf("/proc/self/fd/%d", f.Fd())
path := procPathForFile(f)
fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, err
return nil, &fs.PathError{Op: "open", Path: path, Err: err}
}
return os.NewFile(uintptr(fd), f.Name()), nil
}
@ -1636,7 +1108,7 @@ func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest,
r := io.TeeReader(payload, originalRawDigester.Hash())
// copy the entire tarball and compute its digest
_, err = io.Copy(destination, r)
_, err = io.CopyBuffer(destination, r, c.copyBuffer)
return originalRawDigester.Digest(), err
}
@ -1654,13 +1126,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
// stream to use for reading the zstd:chunked or Estargz file.
stream := c.stream
var compressedDigest digest.Digest
var uncompressedDigest digest.Digest
var convertedBlobSize int64
if c.convertToZstdChunked {
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
return graphdriver.DriverWithDifferOutput{}, &fs.PathError{Op: "open", Path: dest, Err: err}
}
blobFile := os.NewFile(uintptr(fd), "blob-file")
defer func() {
@ -1670,7 +1143,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}()
// calculate the checksum before accessing the file.
compressedDigest, err := c.copyAllBlobToFile(blobFile)
compressedDigest, err = c.copyAllBlobToFile(blobFile)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@ -1683,7 +1156,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
return graphdriver.DriverWithDifferOutput{}, err
}
tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
tarSize, fileSource, diffID, annotations, err := c.convertTarToZstdChunked(dest, blobFile)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@ -1756,14 +1229,15 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
},
TOCDigest: c.tocDigest,
UncompressedDigest: uncompressedDigest,
CompressedDigest: compressedDigest,
}
// When the hard links deduplication is used, file attributes are ignored because setting them
// modifies the source file as well.
useHardLinks := parseBooleanPullOption(c.storeOpts, "use_hard_links", false)
useHardLinks := parseBooleanPullOption(c.pullOptions, "use_hard_links", false)
// List of OSTree repositories to use for deduplication
ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":")
ostreeRepos := strings.Split(c.pullOptions["ostree_repos"], ":")
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
@ -1790,16 +1264,19 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if options.ForceMask != nil {
uid, gid, mode, err := archive.GetFileOwner(dest)
if err == nil {
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil {
value := idtools.Stat{
IDs: idtools.IDPair{UID: int(uid), GID: int(gid)},
Mode: os.FileMode(mode),
}
if err := idtools.SetContainersOverrideXattr(dest, value); err != nil {
return output, err
}
}
}
dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0)
dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return output, fmt.Errorf("cannot open %q: %w", dest, err)
return output, &fs.PathError{Op: "open", Path: dest, Err: err}
}
defer unix.Close(dirfd)
@ -1812,7 +1289,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
for _, e := range mergedEntries {
d := e.Name[0:2]
if _, found := createdDirs[d]; !found {
unix.Mkdirat(dirfd, d, 0o755)
if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
}
createdDirs[d] = struct{}{}
}
}
@ -1868,11 +1347,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
filesToWaitFor := 0
for i, r := range mergedEntries {
for i := range mergedEntries {
r := &mergedEntries[i]
if options.ForceMask != nil {
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&0o7777)
r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
r.Mode = int64(*options.ForceMask)
value := idtools.FormatContainersOverrideXattr(r.UID, r.GID, int(r.Mode))
if r.Xattrs == nil {
r.Xattrs = make(map[string]string)
}
r.Xattrs[idtools.ContainersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value))
}
mode := os.FileMode(r.Mode)
@ -1916,12 +1398,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if r.Size == 0 {
// Used to have a scope for cleanup.
createEmptyFile := func() error {
file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0)
file, err := openFileUnderRoot(dirfd, r.Name, newFileFlags, 0)
if err != nil {
return err
}
defer file.Close()
if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil {
if err := setFileAttrs(dirfd, file, mode, r, options, false); err != nil {
return err
}
return nil
@ -1936,7 +1418,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if r.Name == "" || r.Name == "." {
output.RootDirMode = &mode
}
if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil {
if err := safeMkdir(dirfd, mode, r.Name, r, options); err != nil {
return output, err
}
continue
@ -1950,12 +1432,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
dest: dest,
dirfd: dirfd,
mode: mode,
metadata: &r,
metadata: r,
})
continue
case tar.TypeSymlink:
if err := safeSymlink(dirfd, mode, &r, options); err != nil {
if err := safeSymlink(dirfd, r); err != nil {
return output, err
}
continue
@ -2057,7 +1539,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
// There are some missing files. Prepare a multirange request for the missing chunks.
if len(missingParts) > 0 {
if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
if err := c.retrieveMissingFiles(stream, dirfd, missingParts, options); err != nil {
return output, err
}
}
@ -2167,13 +1649,13 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
// same digest as chunk.ChunkDigest
func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return false
}
defer unix.Close(parentDirfd)
fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0)
fd, err := openFileUnderRoot(parentDirfd, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return false
}

View file

@ -0,0 +1,68 @@
package chunked
import (
"bytes"
"fmt"
"io"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/storage"
)
// iterateTarSplit calls handler for each tar header in tarSplit
func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error {
// This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream
// forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least
// for every non-empty file, which constraints it basically to the output we expect.
//
// Specifically, we assume:
// - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions
// - (There is a FileType entry for every tar header, we ignore it)
// - Trailing padding of a file, if any, is included in the next SegmentType entry
// - At the end, there may be SegmentType entries just for the terminating zero blocks.
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
for {
tsEntry, err := unpacker.Next()
if err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("reading tar-split entries: %w", err)
}
switch tsEntry.Type {
case storage.SegmentType:
payload := tsEntry.Payload
// This is horrible, but we dont know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse
// files, but for sparse files that is set to the logical size.)
//
// First, assume that all padding is zero bytes.
// A tar header starts with a file name, which might in principle be empty, but
// at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that
// the tar name should never be empty (it should be ".", or maybe "./").
//
// This will cause us to skip all zero bytes in the trailing blocks, but thats fine.
i := 0
for i < len(payload) && payload[i] == 0 {
i++
}
payload = payload[i:]
tr := tar.NewReader(bytes.NewReader(payload))
hdr, err := tr.Next()
if err != nil {
if err == io.EOF { // Probably the last entry, but lets let the unpacker drive that.
break
}
return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err)
}
if err := handler(hdr); err != nil {
return err
}
case storage.FileType:
// Nothing
default:
return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type)
}
}
}

View file

@ -75,10 +75,6 @@ type OptionsConfig struct {
// Size
Size string `toml:"size,omitempty"`
// RemapUIDs is a list of default UID mappings to use for layers.
RemapUIDs string `toml:"remap-uids,omitempty"`
// RemapGIDs is a list of default GID mappings to use for layers.
RemapGIDs string `toml:"remap-gids,omitempty"`
// IgnoreChownErrors is a flag for whether chown errors should be
// ignored when building an image.
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
@ -90,13 +86,6 @@ type OptionsConfig struct {
// files and directories.
ForceMask os.FileMode `toml:"force_mask,omitempty"`
// RemapUser is the name of one or more entries in /etc/subuid which
// should be used to set up default UID mappings.
RemapUser string `toml:"remap-user,omitempty"`
// RemapGroup is the name of one or more entries in /etc/subgid which
// should be used to set up default GID mappings.
RemapGroup string `toml:"remap-group,omitempty"`
// RootAutoUsernsUser is the name of one or more entries in /etc/subuid and
// /etc/subgid which should be used to set up automatically a userns.
RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"`

View file

@ -1,5 +1,5 @@
//go:build linux || darwin || freebsd || solaris
// +build linux darwin freebsd solaris
//go:build !windows
// +build !windows
package directory

View file

@ -1,38 +0,0 @@
//go:build !linux && !darwin && !freebsd && !windows
// +build !linux,!darwin,!freebsd,!windows
package homedir
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"errors"
"os"
"path/filepath"
)
// GetRuntimeDir is unsupported on non-linux system.
func GetRuntimeDir() (string, error) {
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
}
// StickRuntimeDirContents is unsupported on non-linux system.
func StickRuntimeDirContents(files []string) ([]string, error) {
return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
}
// GetConfigHome returns XDG_CONFIG_HOME.
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetConfigHome() (string, error) {
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
return xdgConfigHome, nil
}
home := Get()
if home == "" {
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
}
return filepath.Join(home, ".config"), nil
}

View file

@ -10,6 +10,7 @@ import (
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -61,12 +62,20 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
_ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
// just wait for the SIGKILL
for {
syscall.Pause()
_ = syscall.Pause()
}
}
cleanupFunc := func() {
unix.Kill(int(pid), unix.SIGKILL)
_, _ = unix.Wait4(int(pid), nil, 0, nil)
err1 := unix.Kill(int(pid), unix.SIGKILL)
if err1 != nil && err1 != syscall.ESRCH {
logrus.Warnf("kill process pid: %d with SIGKILL ended with error: %v", int(pid), err1)
}
if err1 != nil {
return
}
if _, err := unix.Wait4(int(pid), nil, 0, nil); err != nil {
logrus.Warnf("wait4 pid: %d ended with error: %v", int(pid), err)
}
}
writeMappings := func(fname string, idmap []idtools.IDMap) error {
mappings := ""

View file

@ -367,21 +367,77 @@ func checkChownErr(err error, name string, uid, gid int) error {
return err
}
// Stat contains file states that can be overriden with ContainersOverrideXattr.
type Stat struct {
IDs IDPair
Mode os.FileMode
}
// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string
// that can be used as the value for the ContainersOverrideXattr xattr.
func FormatContainersOverrideXattr(uid, gid, mode int) string {
return fmt.Sprintf("%d:%d:0%o", uid, gid, mode&0o7777)
}
// GetContainersOverrideXattr will get and decode ContainersOverrideXattr.
func GetContainersOverrideXattr(path string) (Stat, error) {
var stat Stat
xstat, err := system.Lgetxattr(path, ContainersOverrideXattr)
if err != nil {
return stat, err
}
attrs := strings.Split(string(xstat), ":")
if len(attrs) != 3 {
return stat, fmt.Errorf("The number of clons in %s does not equal to 3",
ContainersOverrideXattr)
}
value, err := strconv.ParseUint(attrs[0], 10, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse UID: %w", err)
}
stat.IDs.UID = int(value)
value, err = strconv.ParseUint(attrs[0], 10, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse GID: %w", err)
}
stat.IDs.GID = int(value)
value, err = strconv.ParseUint(attrs[2], 8, 32)
if err != nil {
return stat, fmt.Errorf("Failed to parse mode: %w", err)
}
stat.Mode = os.FileMode(value)
return stat, nil
}
// SetContainersOverrideXattr will encode and set ContainersOverrideXattr.
func SetContainersOverrideXattr(path string, stat Stat) error {
value := FormatContainersOverrideXattr(stat.IDs.UID, stat.IDs.GID, int(stat.Mode))
return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0)
}
func SafeChown(name string, uid, gid int) error {
if runtime.GOOS == "darwin" {
var mode uint64 = 0o0700
var mode os.FileMode = 0o0700
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
if err == nil {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[2], 8, 32)
if err == nil {
mode = val
mode = os.FileMode(val)
}
}
}
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
value := Stat{IDPair{uid, gid}, mode}
if err = SetContainersOverrideXattr(name, value); err != nil {
return err
}
uid = os.Getuid()
@ -397,19 +453,19 @@ func SafeChown(name string, uid, gid int) error {
func SafeLchown(name string, uid, gid int) error {
if runtime.GOOS == "darwin" {
var mode uint64 = 0o0700
var mode os.FileMode = 0o0700
xstat, err := system.Lgetxattr(name, ContainersOverrideXattr)
if err == nil {
attrs := strings.Split(string(xstat), ":")
if len(attrs) == 3 {
val, err := strconv.ParseUint(attrs[2], 8, 32)
if err == nil {
mode = val
mode = os.FileMode(val)
}
}
}
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil {
value := Stat{IDPair{uid, gid}, mode}
if err = SetContainersOverrideXattr(name, value); err != nil {
return err
}
uid = os.Getuid()

View file

@ -150,10 +150,13 @@ func (w *atomicFileWriter) complete(commit bool) (retErr error) {
}
defer func() {
w.closeTempFile()
err := w.closeTempFile()
if retErr != nil || w.writeErr != nil {
os.Remove(w.f.Name())
}
if retErr == nil {
retErr = err
}
}()
if commit {

View file

@ -415,7 +415,9 @@ func (l *LockFile) lock(lType lockType) {
// Optimization: only use the (expensive) syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
lockHandle(l.fd, lType, false)
if err := lockHandle(l.fd, lType, false); err != nil {
panic(err)
}
}
l.lockType = lType
l.locked = true
@ -426,10 +428,13 @@ func (l *LockFile) lock(lType lockType) {
// command.
func (l *LockFile) tryLock(lType lockType) error {
var success bool
var rwMutexUnlocker func()
if lType == readLock {
success = l.rwMutex.TryRLock()
rwMutexUnlocker = l.rwMutex.RUnlock
} else {
success = l.rwMutex.TryLock()
rwMutexUnlocker = l.rwMutex.Unlock
}
if !success {
return fmt.Errorf("resource temporarily unavailable")
@ -440,7 +445,7 @@ func (l *LockFile) tryLock(lType lockType) error {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
l.rwMutex.Unlock()
rwMutexUnlocker()
return err
}
l.fd = fd
@ -450,7 +455,7 @@ func (l *LockFile) tryLock(lType lockType) error {
// reader lock or a writer lock.
if err = lockHandle(l.fd, lType, true); err != nil {
closeHandle(fd)
l.rwMutex.Unlock()
rwMutexUnlocker()
return err
}
}

View file

@ -1,5 +1,5 @@
//go:build linux || solaris || darwin || freebsd
// +build linux solaris darwin freebsd
//go:build !windows
// +build !windows
package lockfile

View file

@ -6,10 +6,12 @@ package loopback
import (
"errors"
"fmt"
"io/fs"
"os"
"syscall"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Loopback related errors
@ -39,7 +41,7 @@ func getNextFreeLoopbackIndex() (int, error) {
return index, err
}
func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
func openNextAvailableLoopback(sparseName string, sparseFile *os.File) (loopFile *os.File, err error) {
// Read information about the loopback file.
var st syscall.Stat_t
err = syscall.Fstat(int(sparseFile.Fd()), &st)
@ -48,31 +50,51 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
return nil, ErrAttachLoopbackDevice
}
// upper bound to avoid infinite loop
remaining := 1000
// Start looking for a free /dev/loop
for {
target := fmt.Sprintf("/dev/loop%d", index)
index++
fi, err := os.Stat(target)
if err != nil {
if os.IsNotExist(err) {
logrus.Error("There are no more loopback devices available.")
}
if remaining == 0 {
logrus.Errorf("No free loopback devices available")
return nil, ErrAttachLoopbackDevice
}
remaining--
if fi.Mode()&os.ModeDevice != os.ModeDevice {
logrus.Errorf("Loopback device %s is not a block device.", target)
continue
index, err := getNextFreeLoopbackIndex()
if err != nil {
logrus.Debugf("Error retrieving the next available loopback: %s", err)
return nil, err
}
target := fmt.Sprintf("/dev/loop%d", index)
// OpenFile adds O_CLOEXEC
loopFile, err = os.OpenFile(target, os.O_RDWR, 0o644)
if err != nil {
// The kernel returns ENXIO when opening a device that is in the "deleting" or "rundown" state, so
// just treat ENXIO as if the device does not exist.
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, unix.ENXIO) {
// Another process could have taken the loopback device in the meantime. So repeat
// the process with the next loopback device.
continue
}
logrus.Errorf("Opening loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
}
fi, err := loopFile.Stat()
if err != nil {
loopFile.Close()
logrus.Errorf("Stat loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
}
if fi.Mode()&os.ModeDevice != os.ModeDevice {
loopFile.Close()
logrus.Errorf("Loopback device %s is not a block device.", target)
continue
}
// Try to attach to the loop file
if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
loopFile.Close()
@ -124,14 +146,6 @@ func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) {
}
func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) {
// Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start looping for a
// loopback from index 0.
startIndex, err := getNextFreeLoopbackIndex()
if err != nil {
logrus.Debugf("Error retrieving the next available loopback: %s", err)
}
var sparseFile *os.File
// OpenFile adds O_CLOEXEC
@ -146,7 +160,7 @@ func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err erro
}
defer sparseFile.Close()
loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile)
loopFile, err := openNextAvailableLoopback(sparseName, sparseFile)
if err != nil {
return nil, err
}

View file

@ -1,5 +1,18 @@
package mount
import "github.com/moby/sys/mountinfo"
import (
"fmt"
"os"
var PidMountInfo = mountinfo.PidMountInfo
"github.com/moby/sys/mountinfo"
)
func PidMountInfo(pid int) ([]*Info, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return mountinfo.GetMountsFromReader(f, nil)
}

View file

@ -1,5 +1,5 @@
//go:build linux || freebsd || darwin
// +build linux freebsd darwin
//go:build !windows
// +build !windows
package system

View file

@ -526,8 +526,11 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
} else {
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
// to use unshare(), so don't bother creating a new user namespace at this point.
capabilities, err := capability.NewPid(0)
capabilities, err := capability.NewPid2(0)
bailOnError(err, "Initializing a new Capabilities object of pid 0")
err = capabilities.Load()
bailOnError(err, "Reading the current capabilities sets")
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
return
}
@ -587,7 +590,12 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
cmd.Hook = func(int) error {
go func() {
for receivedSignal := range interrupted {
cmd.Cmd.Process.Signal(receivedSignal)
if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil {
logrus.Warnf(
"Failed to send a signal '%d' to the Process (PID: %d): %v",
receivedSignal, cmd.Cmd.Process.Pid, err,
)
}
}
}()
return nil

View file

@ -19,6 +19,10 @@ driver = "overlay"
# Temporary storage location
runroot = "/run/containers/storage"
# Priority list for the storage drivers that will be tested one
# after the other to pick the storage driver if it is not defined.
# driver_priority = ["overlay", "btrfs"]
# Primary Read/Write location of container storage
# When changing the graphroot location on an SELINUX system, you must
# ensure the labeling matches the default locations labels with the
@ -77,28 +81,6 @@ additionalimagestores = [
# operation so it is not enabled by default.
pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
# remap-uids = "0:1668442479:65536"
# remap-gids = "0:1668442479:65536"
# Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
# with an in-container ID of 0 and then a host-level ID taken from the lowest
# range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
# until all of the entries have been used for maps. This setting overrides the
# Remap-UIDs/GIDs setting.
#
# remap-user = "containers"
# remap-group = "containers"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
# to containers configured to create automatically a user namespace. Containers

View file

@ -39,27 +39,6 @@ graphroot = "/var/db/containers/storage"
additionalimagestores = [
]
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
# remap-uids = 0:1668442479:65536
# remap-gids = 0:1668442479:65536
# Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
# with an in-container ID of 0 and then a host-level ID taken from the lowest
# range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
# until all of the entries have been used for maps.
#
# remap-user = "containers"
# remap-group = "containers"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
# to containers configured to create automatically a user namespace. Containers

View file

@ -1088,8 +1088,6 @@ func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
RunRoot: s.runRoot,
DriverPriority: s.graphDriverPriority,
DriverOptions: s.graphOptions,
UIDMaps: s.uidMap,
GIDMaps: s.gidMap,
}
return drivers.New(s.graphDriverName, config)
}
@ -1437,7 +1435,9 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
return true
}
// putLayer requires the rlstore, rlstores, as well as s.containerStore (even if not an argument to this function) to be locked for write.
// On entry:
// - rlstore must be locked for writing
// - rlstores MUST NOT be locked
func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
var parentLayer *Layer
var options LayerOptions
@ -1474,6 +1474,11 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
return nil, -1, ErrLayerUnknown
}
parentLayer = ilayer
if err := s.containerStore.startWriting(); err != nil {
return nil, -1, err
}
defer s.containerStore.stopWriting()
containers, err := s.containerStore.Containers()
if err != nil {
return nil, -1, err
@ -1490,6 +1495,13 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
gidMap = ilayer.GIDMap
}
} else {
// FIXME? Its unclear why we are holding containerStore locked here at all
// (and because we are not modifying it, why it is a write lock, not a read lock).
if err := s.containerStore.startWriting(); err != nil {
return nil, -1, err
}
defer s.containerStore.stopWriting()
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
@ -1497,23 +1509,17 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
gidMap = s.gidMap
}
}
layerOptions := LayerOptions{
OriginalDigest: options.OriginalDigest,
OriginalSize: options.OriginalSize,
UncompressedDigest: options.UncompressedDigest,
Flags: options.Flags,
}
if s.canUseShifting(uidMap, gidMap) {
layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
options.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
} else {
layerOptions.IDMappingOptions = types.IDMappingOptions{
options.IDMappingOptions = types.IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
}
}
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo)
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
@ -1525,10 +1531,6 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, err
}
defer rlstore.stopWriting()
if err := s.containerStore.startWriting(); err != nil {
return nil, -1, err
}
defer s.containerStore.stopWriting()
return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil)
}
@ -2844,7 +2846,7 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
exists := store.Exists(id)
store.stopReading()
if exists {
return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrLayerUnknown)
return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
}
}
@ -2928,14 +2930,40 @@ func (s *store) Unmount(id string, force bool) (bool, error) {
}
func (s *store) Changes(from, to string) ([]archive.Change, error) {
if res, done, err := readAllLayerStores(s, func(store roLayerStore) ([]archive.Change, bool, error) {
// NaiveDiff could cause mounts to happen without a lock, so be safe
// and treat the .Diff operation as a Mount.
// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
// in startUsingGraphDriver().
if err := s.startUsingGraphDriver(); err != nil {
return nil, err
}
defer s.stopUsingGraphDriver()
rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return nil, err
}
if err := rlstore.startWriting(); err != nil {
return nil, err
}
if rlstore.Exists(to) {
res, err := rlstore.Changes(from, to)
rlstore.stopWriting()
return res, err
}
rlstore.stopWriting()
for _, s := range lstores {
store := s
if err := store.startReading(); err != nil {
return nil, err
}
if store.Exists(to) {
res, err := store.Changes(from, to)
return res, true, err
store.stopReading()
return res, err
}
return nil, false, nil
}); done {
return res, err
store.stopReading()
}
return nil, ErrLayerUnknown
}
@ -2966,12 +2994,30 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
}
defer s.stopUsingGraphDriver()
layerStores, err := s.allLayerStoresLocked()
rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return nil, err
}
for _, s := range layerStores {
if err := rlstore.startWriting(); err != nil {
return nil, err
}
if rlstore.Exists(to) {
rc, err := rlstore.Diff(from, to, options)
if rc != nil && err == nil {
wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
err := rc.Close()
rlstore.stopWriting()
return err
})
return wrapped, nil
}
rlstore.stopWriting()
return rc, err
}
rlstore.stopWriting()
for _, s := range lstores {
store := s
if err := store.startReading(); err != nil {
return nil, err
@ -3009,16 +3055,14 @@ func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
return layer, err
}
if err == nil {
// This code path exists only for cmd/containers/storage.applyDiffUsingStagingDirectory; we have tests that
// assume layer creation and applying a staged layer are separate steps. Production pull code always uses the
// other path, where layer creation is atomic.
return layer, rlstore.applyDiffFromStagingDirectory(args.ID, args.DiffOutput, args.DiffOptions)
}
// if the layer doesn't exist yet, try to create it.
if err := s.containerStore.startWriting(); err != nil {
return nil, err
}
defer s.containerStore.stopWriting()
slo := stagedLayerOptions{
DiffOutput: args.DiffOutput,
DiffOptions: args.DiffOptions,

View file

@ -352,7 +352,7 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
}
if opts.GraphDriverName == "" {
if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) {
if canUseRootlessOverlay() {
opts.GraphDriverName = overlayDriver
} else {
opts.GraphDriverName = "vfs"
@ -481,33 +481,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
return err
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
return err
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
}
if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" {
config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return err
}
uidmap = mappings.UIDs()
gidmap = mappings.GIDs()
}
storeOptions.UIDMap = uidmap
storeOptions.GIDMap = gidmap
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
if config.Storage.Options.AutoUsernsMinSize > 0 {
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize

View file

@ -1,3 +1,5 @@
//go:build freebsd || netbsd
package types
const (
@ -14,6 +16,6 @@ var (
)
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
func canUseRootlessOverlay(home, runhome string) bool {
func canUseRootlessOverlay() bool {
return false
}

View file

@ -11,6 +11,6 @@ const (
var defaultOverrideConfigFile = "/etc/containers/storage.conf"
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
func canUseRootlessOverlay(home, runhome string) bool {
func canUseRootlessOverlay() bool {
return false
}

View file

@ -22,7 +22,7 @@ var (
)
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
func canUseRootlessOverlay(home, runhome string) bool {
func canUseRootlessOverlay() bool {
// we check first for fuse-overlayfs since it is cheaper.
if path, _ := exec.LookPath("fuse-overlayfs"); path != "" {
return true

View file

@ -14,6 +14,6 @@ var (
)
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
func canUseRootlessOverlay(home, runhome string) bool {
func canUseRootlessOverlay() bool {
return false
}

View file

@ -25,16 +25,6 @@ rootless_storage_path = "$HOME/$UID/containers/storage"
additionalimagestores = [
]
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
remap-uids = "0:1000000000:30000"
remap-gids = "0:1500000000:60000"
[storage.options.overlay]
# mountopt specifies comma separated list of extra mount options

View file

@ -66,7 +66,10 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
return
}
ReloadConfigurationFile(configFile, storeOptions)
if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
logrus.Warningf("Failed to reload %q %v\n", configFile, err)
return
}
prevReloadConfig.storeOptions = storeOptions
prevReloadConfig.mod = mtime

View file

@ -0,0 +1,138 @@
# Changelog #
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased] ##
## [0.3.1] - 2024-07-23 ##
### Changed ###
- By allowing `Open(at)InRoot` to opt-out of the extra work done by `MkdirAll`
to do the necessary "partial lookups", `Open(at)InRoot` now does less work
for both implementations (resulting in a many-fold decrease in the number of
operations for `openat2`, and a modest improvement for non-`openat2`) and is
far more guaranteed to match the correct `openat2(RESOLVE_IN_ROOT)`
behaviour.
- We now use `readlinkat(fd, "")` where possible. For `Open(at)InRoot` this
effectively just means that we no longer risk getting spurious errors during
rename races. However, for our hardened procfs handler, this in theory should
prevent mount attacks from tricking us when doing magic-link readlinks (even
when using the unsafe host `/proc` handle). Unfortunately `Reopen` is still
potentially vulnerable to those kinds of somewhat-esoteric attacks.
Technically this [will only work on post-2.6.39 kernels][linux-readlinkat-emptypath]
but it seems incredibly unlikely anyone is using `filepath-securejoin` on a
pre-2011 kernel.
### Fixed ###
- Several improvements were made to the errors returned by `Open(at)InRoot` and
`MkdirAll` when dealing with invalid paths under the emulated (ie.
non-`openat2`) implementation. Previously, some paths would return the wrong
error (`ENOENT` when the last component was a non-directory), and other paths
would be returned as though they were acceptable (trailing-slash components
after a non-directory would be ignored by `Open(at)InRoot`).
These changes were done to match `openat2`'s behaviour and purely is a
consistency fix (most users are going to be using `openat2` anyway).
[linux-readlinkat-emptypath]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=65cfc6722361570bfe255698d9cd4dccaf47570d
## [0.3.0] - 2024-07-11 ##
### Added ###
- A new set of `*os.File`-based APIs have been added. These are adapted from
[libpathrs][] and we strongly suggest using them if possible (as they provide
far more protection against attacks than `SecureJoin`):
- `Open(at)InRoot` resolves a path inside a rootfs and returns an `*os.File`
handle to the path. Note that the handle returned is an `O_PATH` handle,
which cannot be used for reading or writing (as well as some other
operations -- [see open(2) for more details][open.2])
- `Reopen` takes an `O_PATH` file handle and safely re-opens it to upgrade
it to a regular handle. This can also be used with non-`O_PATH` handles,
but `O_PATH` is the most obvious application.
- `MkdirAll` is an implementation of `os.MkdirAll` that is safe to use to
create a directory tree within a rootfs.
As these are new APIs, they may change in the future. However, they should be
safe to start migrating to as we have extensive tests ensuring they behave
correctly and are safe against various races and other attacks.
[libpathrs]: https://github.com/openSUSE/libpathrs
[open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html
## [0.2.5] - 2024-05-03 ##
### Changed ###
- Some minor changes were made to how lexical components (like `..` and `.`)
are handled during path generation in `SecureJoin`. There is no behaviour
change as a result of this fix (the resulting paths are the same).
### Fixed ###
- The error returned when we hit a symlink loop now references the correct
path. (#10)
## [0.2.4] - 2023-09-06 ##
### Security ###
- This release fixes a potential security issue in filepath-securejoin when
used on Windows ([GHSA-6xv5-86q9-7xr8][], which could be used to generate
paths outside of the provided rootfs in certain cases), as well as improving
the overall behaviour of filepath-securejoin when dealing with Windows paths
that contain volume names. Thanks to Paulo Gomes for discovering and fixing
these issues.
### Fixed ###
- Switch to GitHub Actions for CI so we can test on Windows as well as Linux
and MacOS.
[GHSA-6xv5-86q9-7xr8]: https://github.com/advisories/GHSA-6xv5-86q9-7xr8
## [0.2.3] - 2021-06-04 ##
### Changed ###
- Switch to Go 1.13-style `%w` error wrapping, letting us drop the dependency
on `github.com/pkg/errors`.
## [0.2.2] - 2018-09-05 ##
### Changed ###
- Use `syscall.ELOOP` as the base error for symlink loops, rather than our own
(internal) error. This allows callers to more easily use `errors.Is` to check
for this case.
## [0.2.1] - 2018-09-05 ##
### Fixed ###
- Use our own `IsNotExist` implementation, which lets us handle `ENOTDIR`
properly within `SecureJoin`.
## [0.2.0] - 2017-07-19 ##
We now have 100% test coverage!
### Added ###
- Add a `SecureJoinVFS` API that can be used for mocking (as we do in our new
tests) or for implementing custom handling of lookup operations (such as for
rootless containers, where work is necessary to access directories with weird
modes because we don't have `CAP_DAC_READ_SEARCH` or `CAP_DAC_OVERRIDE`).
## 0.1.0 - 2017-07-19
This is our first release of `github.com/cyphar/filepath-securejoin`,
containing a full implementation with a coverage of 93.5% (the only missing
cases are the error cases, which are hard to mocktest at the moment).
[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.1...HEAD
[0.3.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.5...v0.3.0
[0.2.5]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.4...v0.2.5
[0.2.4]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.3...v0.2.4
[0.2.3]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.2...v0.2.3
[0.2.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.1...v0.2.2
[0.2.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.1.0...v0.2.0

View file

@ -1,5 +1,5 @@
Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
Copyright (C) 2017 SUSE LLC. All rights reserved.
Copyright (C) 2017-2024 SUSE LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View file

@ -2,31 +2,24 @@
[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml)
An implementation of `SecureJoin`, a [candidate for inclusion in the Go
standard library][go#20126]. The purpose of this function is to be a "secure"
alternative to `filepath.Join`, and in particular it provides certain
guarantees that are not provided by `filepath.Join`.
### Old API ###
> **NOTE**: This code is *only* safe if you are not at risk of other processes
> modifying path components after you've used `SecureJoin`. If it is possible
> for a malicious process to modify path components of the resolved path, then
> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There
> are some Linux kernel patches I'm working on which might allow for a better
> solution.][lwn-obeneath]
>
> In addition, with a slightly modified API it might be possible to use
> `O_PATH` and verify that the opened path is actually the resolved one -- but
> I have not done that yet. I might add it in the future as a helper function
> to help users verify the path (we can't just return `/proc/self/fd/<foo>`
> because that doesn't always work transparently for all users).
This library was originally just an implementation of `SecureJoin` which was
[intended to be included in the Go standard library][go#20126] as a safer
`filepath.Join` that would restrict the path lookup to be inside a root
directory.
This is the function prototype:
The implementation was based on code that existed in several container
runtimes. Unfortunately, this API is **fundamentally unsafe** against attackers
that can modify path components after `SecureJoin` returns and before the
caller uses the path, allowing for some fairly trivial TOCTOU attacks.
```go
func SecureJoin(root, unsafePath string) (string, error)
```
`SecureJoin` (and `SecureJoinVFS`) are still provided by this library to
support legacy users, but new users are strongly suggested to avoid using
`SecureJoin` and instead use the [new api](#new-api) or switch to
[libpathrs][libpathrs].
This library **guarantees** the following:
With the above limitations in mind, this library guarantees the following:
* If no error is set, the resulting string **must** be a child path of
`root` and will not contain any symlink path components (they will all be
@ -47,7 +40,7 @@ This library **guarantees** the following:
A (trivial) implementation of this function on GNU/Linux systems could be done
with the following (note that this requires root privileges and is far more
opaque than the implementation in this library, and also requires that
`readlink` is inside the `root` path):
`readlink` is inside the `root` path and is trustworthy):
```go
package securejoin
@ -70,9 +63,105 @@ func SecureJoin(root, unsafePath string) (string, error) {
}
```
[lwn-obeneath]: https://lwn.net/Articles/767547/
[libpathrs]: https://github.com/openSUSE/libpathrs
[go#20126]: https://github.com/golang/go/issues/20126
### New API ###
While we recommend users switch to [libpathrs][libpathrs] as soon as it has a
stable release, some methods implemented by libpathrs have been ported to this
library to ease the transition. These APIs are only supported on Linux.
These APIs are implemented such that `filepath-securejoin` will
opportunistically use certain newer kernel APIs that make these operations far
more secure. In particular:
* All of the lookup operations will use [`openat2`][openat2.2] on new enough
kernels (Linux 5.6 or later) to restrict lookups through magic-links and
bind-mounts (for certain operations) and to make use of `RESOLVE_IN_ROOT` to
efficiently resolve symlinks within a rootfs.
* The APIs provide hardening against a malicious `/proc` mount to either detect
or avoid being tricked by a `/proc` that is not legitimate. This is done
using [`openat2`][openat2.2] for all users, and privileged users will also be
further protected by using [`fsopen`][fsopen.2] and [`open_tree`][open_tree.2]
(Linux 4.18 or later).
[openat2.2]: https://www.man7.org/linux/man-pages/man2/openat2.2.html
[fsopen.2]: https://github.com/brauner/man-pages-md/blob/main/fsopen.md
[open_tree.2]: https://github.com/brauner/man-pages-md/blob/main/open_tree.md
#### `OpenInRoot` ####
```go
func OpenInRoot(root, unsafePath string) (*os.File, error)
func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error)
func Reopen(handle *os.File, flags int) (*os.File, error)
```
`OpenInRoot` is a much safer version of
```go
path, err := securejoin.SecureJoin(root, unsafePath)
file, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC)
```
that protects against various race attacks that could lead to serious security
issues, depending on the application. Note that the returned `*os.File` is an
`O_PATH` file descriptor, which is quite restricted. Callers will probably need
to use `Reopen` to get a more usable handle (this split is done to provide
useful features like PTY spawning and to avoid users accidentally opening bad
inodes that could cause a DoS).
Callers need to be careful in how they use the returned `*os.File`. Usually it
is only safe to operate on the handle directly, and it is very easy to create a
security issue. [libpathrs][libpathrs] provides far more helpers to make using
these handles safer -- there is currently no plan to port them to
`filepath-securejoin`.
`OpenatInRoot` is like `OpenInRoot` except that the root is provided using an
`*os.File`. This allows you to ensure that multiple `OpenatInRoot` (or
`MkdirAllHandle`) calls are operating on the same rootfs.
> **NOTE**: Unlike `SecureJoin`, `OpenInRoot` will error out as soon as it hits
> a dangling symlink or non-existent path. This is in contrast to `SecureJoin`
> which treated non-existent components as though they were real directories,
> and would allow for partial resolution of dangling symlinks. These behaviours
> are at odds with how Linux treats non-existent paths and dangling symlinks,
> and so these are no longer allowed.
#### `MkdirAll` ####
```go
func MkdirAll(root, unsafePath string, mode int) error
func MkdirAllHandle(root *os.File, unsafePath string, mode int) (*os.File, error)
```
`MkdirAll` is a much safer version of
```go
path, err := securejoin.SecureJoin(root, unsafePath)
err = os.MkdirAll(path, mode)
```
that protects against the same kinds of races that `OpenInRoot` protects
against.
`MkdirAllHandle` is like `MkdirAll` except that the root is provided using an
`*os.File` (the reason for this is the same as with `OpenatInRoot`) and an
`*os.File` of the final created directory is returned (this directory is
guaranteed to be effectively identical to the directory created by
`MkdirAllHandle`, which is not possible to ensure by just using `OpenatInRoot`
after `MkdirAll`).
> **NOTE**: Unlike `SecureJoin`, `MkdirAll` will error out as soon as it hits
> a dangling symlink or non-existent path. This is in contrast to `SecureJoin`
> which treated non-existent components as though they were real directories,
> and would allow for partial resolution of dangling symlinks. These behaviours
> are at odds with how Linux treats non-existent paths and dangling symlinks,
> and so these are no longer allowed. This means that `MkdirAll` will not
> create non-existent directories referenced by a dangling symlink.
### License ###
The license of this project is the same as Go, which is a BSD 3-clause license

View file

@ -1 +1 @@
0.2.5
0.3.1

Some files were not shown because too many files have changed in this diff Show more