go.mod: update osbuild/images (v0.172.0) and osbuild/blueprint (v1.12.0)

The internal blueprint implementation has been removed from
osbuild/images.  Conversion from osbuild/blueprint blueprints to
osbuild/images blueprints is no longer necessary.
This commit is contained in:
Achilleas Koutsou 2025-08-07 17:52:12 +02:00
parent 19dd832876
commit 8d9f52cf1f
35 changed files with 44 additions and 2465 deletions

View file

@ -10,8 +10,8 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/arch" "github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/distro" "github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distrofactory" "github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/dnfjson" "github.com/osbuild/images/pkg/dnfjson"

View file

@ -23,8 +23,7 @@ import (
) )
func getManifest(bp blueprint.Blueprint, t distro.ImageType, a distro.Arch, d distro.Distro, cacheDir string, repos []rpmmd.RepoConfig) (manifest.OSBuildManifest, []rpmmd.PackageSpec) { func getManifest(bp blueprint.Blueprint, t distro.ImageType, a distro.Arch, d distro.Distro, cacheDir string, repos []rpmmd.RepoConfig) (manifest.OSBuildManifest, []rpmmd.PackageSpec) {
ibp := blueprint.Convert(bp) manifest, _, err := t.Manifest(&bp, distro.ImageOptions{}, repos, nil)
manifest, _, err := t.Manifest(&ibp, distro.ImageOptions{}, repos, nil)
if err != nil { if err != nil {
panic(err) panic(err)
} }

4
go.mod
View file

@ -37,8 +37,8 @@ require (
github.com/oapi-codegen/runtime v1.1.2 github.com/oapi-codegen/runtime v1.1.2
github.com/openshift-online/ocm-sdk-go v0.1.473 github.com/openshift-online/ocm-sdk-go v0.1.473
github.com/oracle/oci-go-sdk/v54 v54.0.0 github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/blueprint v1.11.0 github.com/osbuild/blueprint v1.12.0
github.com/osbuild/images v0.171.0 github.com/osbuild/images v0.172.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d
github.com/osbuild/pulp-client v0.1.0 github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.23.0 github.com/prometheus/client_golang v1.23.0

8
go.sum
View file

@ -520,10 +520,10 @@ github.com/openshift-online/ocm-sdk-go v0.1.473 h1:m/NWIBCzhC/8PototMQ7x8MQXCeSL
github.com/openshift-online/ocm-sdk-go v0.1.473/go.mod h1:5Gw/YZE+c5FAPaBtO1w/asd9qbs2ljQwg7fpVq51UW4= github.com/openshift-online/ocm-sdk-go v0.1.473/go.mod h1:5Gw/YZE+c5FAPaBtO1w/asd9qbs2ljQwg7fpVq51UW4=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4= github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/blueprint v1.11.0 h1:Crqt+RRSE84JOoajzTIGrQaXXxnAgGUCDYe3nump54g= github.com/osbuild/blueprint v1.12.0 h1:Q2VXPyOnRs9uqgH1lNsvob6PS+73oPF0K9FmsyC98RI=
github.com/osbuild/blueprint v1.11.0/go.mod h1:uknOfX/bAoi+dbeNJj+uAir1T++/LVEtoY8HO3U7MiQ= github.com/osbuild/blueprint v1.12.0/go.mod h1:HPlJzkEl7q5g8hzaGksUk7ifFAy9QFw9LmzhuFOAVm4=
github.com/osbuild/images v0.171.0 h1:7lfYqIJUYh6QM6ioLW3cYLAzIu8lqPX5aGreyzEwRV8= github.com/osbuild/images v0.172.0 h1:UccT9dK7P5325HavVEYg4kG96GDqxQT4MTxC0mC1rdU=
github.com/osbuild/images v0.171.0/go.mod h1:uZAQRhxUB5G9aAczIcgU2d9VxLude4OXEdvtnKNgAEs= github.com/osbuild/images v0.172.0/go.mod h1:Iz2dCTJOrKBjiwp6mt24m3hqLWkfVmg8ZvrhAQbgV9g=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d h1:r9BFPDv0uuA9k1947Jybcxs36c/pTywWS1gjeizvtcQ= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d h1:r9BFPDv0uuA9k1947Jybcxs36c/pTywWS1gjeizvtcQ=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d/go.mod h1:zR1iu/hOuf+OQNJlk70tju9IqzzM4ycq0ectkFBm94U= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d/go.mod h1:zR1iu/hOuf+OQNJlk70tju9IqzzM4ycq0ectkFBm94U=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8= github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=

View file

@ -572,6 +572,6 @@ func TestComposeUnsupportedMountPointV0(t *testing.T) {
require.NoError(t, err, "failed with a client error") require.NoError(t, err, "failed with a client error")
require.NotNil(t, resp) require.NotNil(t, resp)
require.Equal(t, "ManifestCreationFailed", resp.Errors[0].ID) require.Equal(t, "ManifestCreationFailed", resp.Errors[0].ID)
require.Contains(t, resp.Errors[0].Msg, "path \"/etc\" is not allowed") require.Contains(t, resp.Errors[0].Msg, "The following custom mountpoints are not supported [\"/etc\"]")
require.Equal(t, 0, len(body)) require.Equal(t, 0, len(body))
} }

View file

@ -10,7 +10,6 @@ import (
"math/big" "math/big"
"time" "time"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/distrofactory" "github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/reporegistry" "github.com/osbuild/images/pkg/reporegistry"
"github.com/osbuild/images/pkg/rpmmd" "github.com/osbuild/images/pkg/rpmmd"
@ -82,8 +81,7 @@ func (request *DepsolveRequest) Depsolve(df *distrofactory.Factory, rr *reporegi
manifestSeed: manifestSeed, manifestSeed: manifestSeed,
} }
ibp := blueprint.Convert(bp) manifestSource, _, err := ir.imageType.Manifest(&bp, ir.imageOptions, ir.repositories, &ir.manifestSeed)
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, &ir.manifestSeed)
if err != nil { if err != nil {
return nil, HTTPErrorWithInternal(ErrorFailedToDepsolve, err) return nil, HTTPErrorWithInternal(ErrorFailedToDepsolve, err)
} }

View file

@ -22,7 +22,6 @@ import (
"github.com/osbuild/osbuild-composer/pkg/jobqueue" "github.com/osbuild/osbuild-composer/pkg/jobqueue"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/container" "github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distrofactory" "github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/dnfjson" "github.com/osbuild/images/pkg/dnfjson"
@ -160,12 +159,11 @@ func (s *Server) enqueueCompose(irs []imageRequest, channel string) (uuid.UUID,
} }
ir := irs[0] ir := irs[0]
ibp := blueprint.Convert(ir.blueprint)
// shortcuts // shortcuts
arch := ir.imageType.Arch() arch := ir.imageType.Arch()
distribution := arch.Distro() distribution := arch.Distro()
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, &ir.manifestSeed) manifestSource, _, err := ir.imageType.Manifest(&ir.blueprint, ir.imageOptions, ir.repositories, &ir.manifestSeed)
if err != nil { if err != nil {
logrus.Warningf("ErrorEnqueueingJob, failed generating manifest: %v", err) logrus.Warningf("ErrorEnqueueingJob, failed generating manifest: %v", err)
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err) return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
@ -292,13 +290,12 @@ func (s *Server) enqueueKojiCompose(taskID uint64, server, name, version, releas
var kojiFilenames []string var kojiFilenames []string
var buildIDs []uuid.UUID var buildIDs []uuid.UUID
for idx, ir := range irs { for idx, ir := range irs {
ibp := blueprint.Convert(ir.blueprint)
// shortcuts // shortcuts
arch := ir.imageType.Arch() arch := ir.imageType.Arch()
distribution := arch.Distro() distribution := arch.Distro()
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, &irs[idx].manifestSeed) manifestSource, _, err := ir.imageType.Manifest(&ir.blueprint, ir.imageOptions, ir.repositories, &irs[idx].manifestSeed)
if err != nil { if err != nil {
logrus.Errorf("ErrorEnqueueingJob, failed generating manifest: %v", err) logrus.Errorf("ErrorEnqueueingJob, failed generating manifest: %v", err)
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err) return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)

View file

@ -57,8 +57,7 @@ func (suite *storeTest) SetupSuite() {
suite.NoError(err) suite.NoError(err)
suite.myImageType, err = suite.myArch.GetImageType(test_distro.TestImageTypeName) suite.myImageType, err = suite.myArch.GetImageType(test_distro.TestImageTypeName)
suite.NoError(err) suite.NoError(err)
ibp := blueprint.Convert(suite.myBP) manifest, _, _ := suite.myImageType.Manifest(&suite.myBP, suite.myImageOptions, suite.myRepoConfig, nil)
manifest, _, _ := suite.myImageType.Manifest(&ibp, suite.myImageOptions, suite.myRepoConfig, nil)
suite.myManifest, _ = manifest.Serialize(nil, nil, nil, nil) suite.myManifest, _ = manifest.Serialize(nil, nil, nil, nil)
suite.mySourceConfig = SourceConfig{ suite.mySourceConfig = SourceConfig{
Name: "testSourceConfig", Name: "testSourceConfig",

View file

@ -2563,8 +2563,7 @@ func (api *API) composeHandler(writer http.ResponseWriter, request *http.Request
return return
} }
ibp := blueprint.Convert(*bp) manifest, warnings, err := imageType.Manifest(bp, options, imageRepos, &seed)
manifest, warnings, err := imageType.Manifest(&ibp, options, imageRepos, &seed)
if err != nil { if err != nil {
errors := responseError{ errors := responseError{
ID: "ManifestCreationFailed", ID: "ManifestCreationFailed",

View file

@ -5,11 +5,9 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/crypt" "github.com/osbuild/images/pkg/crypt"
"github.com/coreos/go-semver/semver" "github.com/coreos/go-semver/semver"
iblueprint "github.com/osbuild/images/pkg/blueprint"
) )
// A Blueprint is a high-level description of an image. // A Blueprint is a high-level description of an image.
@ -229,285 +227,3 @@ func (b *Blueprint) CryptPasswords() error {
return nil return nil
} }
func Convert(bp Blueprint) iblueprint.Blueprint {
var pkgs []iblueprint.Package
if len(bp.Packages) > 0 {
pkgs = make([]iblueprint.Package, len(bp.Packages))
for idx := range bp.Packages {
pkgs[idx] = iblueprint.Package(bp.Packages[idx])
}
}
var modules []iblueprint.Package
if len(bp.Modules) > 0 {
modules = make([]iblueprint.Package, len(bp.Modules))
for idx := range bp.Modules {
modules[idx] = iblueprint.Package(bp.Modules[idx])
}
}
var enabledModules []iblueprint.EnabledModule
if len(bp.EnabledModules) > 0 {
enabledModules = make([]iblueprint.EnabledModule, len(bp.EnabledModules))
for idx := range bp.EnabledModules {
enabledModules[idx] = iblueprint.EnabledModule(bp.EnabledModules[idx])
}
}
var groups []iblueprint.Group
if len(bp.Groups) > 0 {
groups = make([]iblueprint.Group, len(bp.Groups))
for idx := range bp.Groups {
groups[idx] = iblueprint.Group(bp.Groups[idx])
}
}
var containers []iblueprint.Container
if len(bp.Containers) > 0 {
containers = make([]iblueprint.Container, len(bp.Containers))
for idx := range bp.Containers {
containers[idx] = iblueprint.Container(bp.Containers[idx])
}
}
var customizations *iblueprint.Customizations
if c := bp.Customizations; c != nil {
customizations = &iblueprint.Customizations{
Hostname: c.Hostname,
InstallationDevice: c.InstallationDevice,
}
if fdo := c.FDO; fdo != nil {
ifdo := iblueprint.FDOCustomization(*fdo)
customizations.FDO = &ifdo
}
if oscap := c.OpenSCAP; oscap != nil {
ioscap := iblueprint.OpenSCAPCustomization{
DataStream: oscap.DataStream,
ProfileID: oscap.ProfileID,
}
if tailoring := oscap.Tailoring; tailoring != nil {
itailoring := iblueprint.OpenSCAPTailoringCustomizations(*tailoring)
ioscap.Tailoring = &itailoring
}
customizations.OpenSCAP = &ioscap
}
if ign := c.Ignition; ign != nil {
iign := iblueprint.IgnitionCustomization{}
if embed := ign.Embedded; embed != nil {
iembed := iblueprint.EmbeddedIgnitionCustomization(*embed)
iign.Embedded = &iembed
}
if fb := ign.FirstBoot; fb != nil {
ifb := iblueprint.FirstBootIgnitionCustomization(*fb)
iign.FirstBoot = &ifb
}
customizations.Ignition = &iign
}
if dirs := c.Directories; dirs != nil {
idirs := make([]iblueprint.DirectoryCustomization, len(dirs))
for idx := range dirs {
idirs[idx] = iblueprint.DirectoryCustomization(dirs[idx])
}
customizations.Directories = idirs
}
if files := c.Files; files != nil {
ifiles := make([]iblueprint.FileCustomization, len(files))
for idx := range files {
ifiles[idx] = iblueprint.FileCustomization(files[idx])
}
customizations.Files = ifiles
}
if repos := c.Repositories; repos != nil {
irepos := make([]iblueprint.RepositoryCustomization, len(repos))
for idx := range repos {
irepos[idx] = iblueprint.RepositoryCustomization(repos[idx])
}
customizations.Repositories = irepos
}
if kernel := c.Kernel; kernel != nil {
ikernel := iblueprint.KernelCustomization(*kernel)
customizations.Kernel = &ikernel
}
if users := c.GetUsers(); users != nil { // contains both user customizations and converted sshkey customizations
iusers := make([]iblueprint.UserCustomization, len(users))
for idx := range users {
iusers[idx] = iblueprint.UserCustomization(users[idx])
}
customizations.User = iusers
}
if groups := c.Group; groups != nil {
igroups := make([]iblueprint.GroupCustomization, len(groups))
for idx := range groups {
igroups[idx] = iblueprint.GroupCustomization(groups[idx])
}
customizations.Group = igroups
}
if fs := c.Filesystem; fs != nil {
ifs := make([]iblueprint.FilesystemCustomization, len(fs))
for idx := range fs {
ifs[idx] = iblueprint.FilesystemCustomization(fs[idx])
}
customizations.Filesystem = ifs
}
if disk := c.Disk; disk != nil {
idisk := &iblueprint.DiskCustomization{
Type: disk.Type,
MinSize: disk.MinSize,
Partitions: make([]iblueprint.PartitionCustomization, len(disk.Partitions)),
}
for idx, part := range disk.Partitions {
ipart := iblueprint.PartitionCustomization{
Type: part.Type,
MinSize: part.MinSize,
PartType: part.PartType,
PartLabel: part.PartLabel,
PartUUID: part.PartUUID,
BtrfsVolumeCustomization: iblueprint.BtrfsVolumeCustomization{},
VGCustomization: iblueprint.VGCustomization{
Name: part.VGCustomization.Name,
},
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization(part.FilesystemTypedCustomization),
}
if len(part.LogicalVolumes) > 0 {
ipart.LogicalVolumes = make([]iblueprint.LVCustomization, len(part.LogicalVolumes))
for lvidx, lv := range part.LogicalVolumes {
ipart.LogicalVolumes[lvidx] = iblueprint.LVCustomization{
Name: lv.Name,
MinSize: lv.MinSize,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization(lv.FilesystemTypedCustomization),
}
}
}
if len(part.Subvolumes) > 0 {
ipart.Subvolumes = make([]iblueprint.BtrfsSubvolumeCustomization, len(part.Subvolumes))
for svidx, sv := range part.Subvolumes {
ipart.Subvolumes[svidx] = iblueprint.BtrfsSubvolumeCustomization(sv)
}
}
idisk.Partitions[idx] = ipart
}
customizations.Disk = idisk
}
if tz := c.Timezone; tz != nil {
itz := iblueprint.TimezoneCustomization(*tz)
customizations.Timezone = &itz
}
if locale := c.Locale; locale != nil {
ilocale := iblueprint.LocaleCustomization(*locale)
customizations.Locale = &ilocale
}
if fw := c.Firewall; fw != nil {
ifw := iblueprint.FirewallCustomization{
Ports: fw.Ports,
}
if services := fw.Services; services != nil {
iservices := iblueprint.FirewallServicesCustomization(*services)
ifw.Services = &iservices
}
if zones := fw.Zones; zones != nil {
izones := make([]iblueprint.FirewallZoneCustomization, len(zones))
for idx := range zones {
izones[idx] = iblueprint.FirewallZoneCustomization(zones[idx])
}
ifw.Zones = izones
}
customizations.Firewall = &ifw
}
if services := c.Services; services != nil {
iservices := iblueprint.ServicesCustomization(*services)
customizations.Services = &iservices
}
if fips := c.FIPS; fips != nil {
customizations.FIPS = fips
}
if installer := c.Installer; installer != nil {
iinst := iblueprint.InstallerCustomization{
Unattended: installer.Unattended,
SudoNopasswd: installer.SudoNopasswd,
}
if installer.Kickstart != nil {
iinst.Kickstart = &iblueprint.Kickstart{
Contents: installer.Kickstart.Contents,
}
}
if installer.Modules != nil {
iinst.Modules = &iblueprint.AnacondaModules{
Enable: installer.Modules.Enable,
Disable: installer.Modules.Disable,
}
}
customizations.Installer = &iinst
}
if rpm := c.RPM; rpm != nil && rpm.ImportKeys != nil {
irpm := iblueprint.RPMCustomization{
ImportKeys: &iblueprint.RPMImportKeys{
Files: rpm.ImportKeys.Files,
},
}
customizations.RPM = &irpm
}
if rhsm := c.RHSM; rhsm != nil && rhsm.Config != nil {
irhsm := iblueprint.RHSMCustomization{
Config: &iblueprint.RHSMConfig{},
}
if plugins := rhsm.Config.DNFPlugins; plugins != nil {
irhsm.Config.DNFPlugins = &iblueprint.SubManDNFPluginsConfig{}
if plugins.ProductID != nil && plugins.ProductID.Enabled != nil {
irhsm.Config.DNFPlugins.ProductID = &iblueprint.DNFPluginConfig{
Enabled: common.ToPtr(*plugins.ProductID.Enabled),
}
}
if plugins.SubscriptionManager != nil && plugins.SubscriptionManager.Enabled != nil {
irhsm.Config.DNFPlugins.SubscriptionManager = &iblueprint.DNFPluginConfig{
Enabled: common.ToPtr(*plugins.SubscriptionManager.Enabled),
}
}
}
if subManConf := rhsm.Config.SubscriptionManager; subManConf != nil {
irhsm.Config.SubscriptionManager = &iblueprint.SubManConfig{}
if subManConf.RHSMConfig != nil && subManConf.RHSMConfig.ManageRepos != nil {
irhsm.Config.SubscriptionManager.RHSMConfig = &iblueprint.SubManRHSMConfig{
ManageRepos: common.ToPtr(*subManConf.RHSMConfig.ManageRepos),
}
}
if subManConf.RHSMCertdConfig != nil && subManConf.RHSMCertdConfig.AutoRegistration != nil {
irhsm.Config.SubscriptionManager.RHSMCertdConfig = &iblueprint.SubManRHSMCertdConfig{
AutoRegistration: common.ToPtr(*subManConf.RHSMCertdConfig.AutoRegistration),
}
}
}
customizations.RHSM = &irhsm
}
if ca := c.CACerts; ca != nil {
ica := iblueprint.CACustomization{
PEMCerts: ca.PEMCerts,
}
customizations.CACerts = &ica
}
}
ibp := iblueprint.Blueprint{
Name: bp.Name,
Description: bp.Description,
Version: bp.Version,
Packages: pkgs,
Modules: modules,
EnabledModules: enabledModules,
Groups: groups,
Containers: containers,
Customizations: customizations,
Distro: bp.Distro,
}
return ibp
}

View file

@ -8,7 +8,7 @@ import (
"github.com/osbuild/images/pkg/cert" "github.com/osbuild/images/pkg/cert"
"github.com/osbuild/images/pkg/customizations/anaconda" "github.com/osbuild/images/pkg/customizations/anaconda"
"github.com/osbuild/images/pkg/disk" "github.com/osbuild/images/pkg/disk/partition"
) )
type Customizations struct { type Customizations struct {
@ -346,22 +346,22 @@ func (c *Customizations) GetPartitioning() (*DiskCustomization, error) {
} }
// GetPartitioningMode converts the string to a disk.PartitioningMode type // GetPartitioningMode converts the string to a disk.PartitioningMode type
func (c *Customizations) GetPartitioningMode() (disk.PartitioningMode, error) { func (c *Customizations) GetPartitioningMode() (partition.PartitioningMode, error) {
if c == nil { if c == nil {
return disk.DefaultPartitioningMode, nil return partition.DefaultPartitioningMode, nil
} }
switch c.PartitioningMode { switch c.PartitioningMode {
case "raw": case "raw":
return disk.RawPartitioningMode, nil return partition.RawPartitioningMode, nil
case "lvm": case "lvm":
return disk.LVMPartitioningMode, nil return partition.LVMPartitioningMode, nil
case "auto-lvm": case "auto-lvm":
return disk.AutoLVMPartitioningMode, nil return partition.AutoLVMPartitioningMode, nil
case "": case "":
return disk.DefaultPartitioningMode, nil return partition.DefaultPartitioningMode, nil
default: default:
return disk.DefaultPartitioningMode, fmt.Errorf("invalid partitioning mode '%s'", c.PartitioningMode) return partition.DefaultPartitioningMode, fmt.Errorf("invalid partitioning mode '%s'", c.PartitioningMode)
} }
} }

View file

@ -607,6 +607,14 @@ func (p *PartitionCustomization) ValidatePartitionLabel(ptType string) error {
} }
func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) error { func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) error {
if p.FSType == "none" {
// make sure the mountpoint is empty and return
if p.Mountpoint != "" {
return fmt.Errorf("mountpoint for none partition must be empty (got %q)", p.Mountpoint)
}
return nil
}
if p.FSType == "swap" { if p.FSType == "swap" {
// make sure the mountpoint is empty and return // make sure the mountpoint is empty and return
if p.Mountpoint != "" { if p.Mountpoint != "" {

View file

@ -1,98 +0,0 @@
// Package blueprint contains primitives for representing weldr blueprints
package blueprint
// A Blueprint is a high-level description of an image.
type Blueprint struct {
Name string `json:"name" toml:"name"`
Description string `json:"description" toml:"description"`
Version string `json:"version,omitempty" toml:"version,omitempty"`
Packages []Package `json:"packages" toml:"packages"`
Modules []Package `json:"modules" toml:"modules"`
// Note, this is called "enabled modules" because we already have "modules" except
// the "modules" refers to packages and "enabled modules" refers to modularity modules.
EnabledModules []EnabledModule `json:"enabled_modules" toml:"enabled_modules"`
Groups []Group `json:"groups" toml:"groups"`
Containers []Container `json:"containers,omitempty" toml:"containers,omitempty"`
Customizations *Customizations `json:"customizations,omitempty" toml:"customizations,omitempty"`
Distro string `json:"distro" toml:"distro"`
// EXPERIMENTAL
Minimal bool `json:"minimal" toml:"minimal"`
}
// A Package specifies an RPM package.
type Package struct {
Name string `json:"name" toml:"name"`
Version string `json:"version,omitempty" toml:"version,omitempty"`
}
// A module specifies a modularity stream.
type EnabledModule struct {
Name string `json:"name" toml:"name"`
Stream string `json:"stream,omitempty" toml:"stream,omitempty"`
}
// A group specifies an package group.
type Group struct {
Name string `json:"name" toml:"name"`
}
type Container struct {
Source string `json:"source" toml:"source"`
Name string `json:"name,omitempty" toml:"name,omitempty"`
TLSVerify *bool `json:"tls-verify,omitempty" toml:"tls-verify,omitempty"`
LocalStorage bool `json:"local-storage,omitempty" toml:"local-storage,omitempty"`
}
// packages, modules, and groups all resolve to rpm packages right now. This
// function returns a combined list of "name-version" strings.
func (b *Blueprint) GetPackages() []string {
return b.GetPackagesEx(true)
}
func (b *Blueprint) GetPackagesEx(bootable bool) []string {
packages := []string{}
for _, pkg := range b.Packages {
packages = append(packages, pkg.ToNameVersion())
}
for _, pkg := range b.Modules {
packages = append(packages, pkg.ToNameVersion())
}
for _, group := range b.Groups {
packages = append(packages, "@"+group.Name)
}
if bootable {
kc := b.Customizations.GetKernel()
kpkg := Package{Name: kc.Name}
packages = append(packages, kpkg.ToNameVersion())
}
return packages
}
func (b *Blueprint) GetEnabledModules() []string {
modules := []string{}
for _, mod := range b.EnabledModules {
modules = append(modules, mod.ToNameStream())
}
return modules
}
func (p Package) ToNameVersion() string {
// Omit version to prevent all packages with prefix of name to be installed
if p.Version == "*" || p.Version == "" {
return p.Name
}
return p.Name + "-" + p.Version
}
func (p EnabledModule) ToNameStream() string {
return p.Name + ":" + p.Stream
}

View file

@ -1,471 +0,0 @@
package blueprint
import (
"fmt"
"reflect"
"slices"
"strings"
"github.com/osbuild/images/pkg/cert"
"github.com/osbuild/images/pkg/customizations/anaconda"
)
type Customizations struct {
Hostname *string `json:"hostname,omitempty" toml:"hostname,omitempty"`
Kernel *KernelCustomization `json:"kernel,omitempty" toml:"kernel,omitempty"`
User []UserCustomization `json:"user,omitempty" toml:"user,omitempty"`
Group []GroupCustomization `json:"group,omitempty" toml:"group,omitempty"`
Timezone *TimezoneCustomization `json:"timezone,omitempty" toml:"timezone,omitempty"`
Locale *LocaleCustomization `json:"locale,omitempty" toml:"locale,omitempty"`
Firewall *FirewallCustomization `json:"firewall,omitempty" toml:"firewall,omitempty"`
Services *ServicesCustomization `json:"services,omitempty" toml:"services,omitempty"`
Filesystem []FilesystemCustomization `json:"filesystem,omitempty" toml:"filesystem,omitempty"`
Disk *DiskCustomization `json:"disk,omitempty" toml:"disk,omitempty"`
InstallationDevice string `json:"installation_device,omitempty" toml:"installation_device,omitempty"`
FDO *FDOCustomization `json:"fdo,omitempty" toml:"fdo,omitempty"`
OpenSCAP *OpenSCAPCustomization `json:"openscap,omitempty" toml:"openscap,omitempty"`
Ignition *IgnitionCustomization `json:"ignition,omitempty" toml:"ignition,omitempty"`
Directories []DirectoryCustomization `json:"directories,omitempty" toml:"directories,omitempty"`
Files []FileCustomization `json:"files,omitempty" toml:"files,omitempty"`
Repositories []RepositoryCustomization `json:"repositories,omitempty" toml:"repositories,omitempty"`
FIPS *bool `json:"fips,omitempty" toml:"fips,omitempty"`
ContainersStorage *ContainerStorageCustomization `json:"containers-storage,omitempty" toml:"containers-storage,omitempty"`
Installer *InstallerCustomization `json:"installer,omitempty" toml:"installer,omitempty"`
RPM *RPMCustomization `json:"rpm,omitempty" toml:"rpm,omitempty"`
RHSM *RHSMCustomization `json:"rhsm,omitempty" toml:"rhsm,omitempty"`
CACerts *CACustomization `json:"cacerts,omitempty" toml:"cacerts,omitempty"`
}
type IgnitionCustomization struct {
Embedded *EmbeddedIgnitionCustomization `json:"embedded,omitempty" toml:"embedded,omitempty"`
FirstBoot *FirstBootIgnitionCustomization `json:"firstboot,omitempty" toml:"firstboot,omitempty"`
}
type EmbeddedIgnitionCustomization struct {
Config string `json:"config,omitempty" toml:"config,omitempty"`
}
type FirstBootIgnitionCustomization struct {
ProvisioningURL string `json:"url,omitempty" toml:"url,omitempty"`
}
type FDOCustomization struct {
ManufacturingServerURL string `json:"manufacturing_server_url,omitempty" toml:"manufacturing_server_url,omitempty"`
DiunPubKeyInsecure string `json:"diun_pub_key_insecure,omitempty" toml:"diun_pub_key_insecure,omitempty"`
// This is the output of:
// echo "sha256:$(openssl x509 -fingerprint -sha256 -noout -in diun_cert.pem | cut -d"=" -f2 | sed 's/://g')"
DiunPubKeyHash string `json:"diun_pub_key_hash,omitempty" toml:"diun_pub_key_hash,omitempty"`
DiunPubKeyRootCerts string `json:"diun_pub_key_root_certs,omitempty" toml:"diun_pub_key_root_certs,omitempty"`
DiMfgStringTypeMacIface string `json:"di_mfg_string_type_mac_iface,omitempty" toml:"di_mfg_string_type_mac_iface,omitempty"`
}
type KernelCustomization struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
Append string `json:"append" toml:"append"`
}
type SSHKeyCustomization struct {
User string `json:"user" toml:"user"`
Key string `json:"key" toml:"key"`
}
type UserCustomization struct {
Name string `json:"name" toml:"name"`
Description *string `json:"description,omitempty" toml:"description,omitempty"`
Password *string `json:"password,omitempty" toml:"password,omitempty"`
Key *string `json:"key,omitempty" toml:"key,omitempty"`
Home *string `json:"home,omitempty" toml:"home,omitempty"`
Shell *string `json:"shell,omitempty" toml:"shell,omitempty"`
Groups []string `json:"groups,omitempty" toml:"groups,omitempty"`
UID *int `json:"uid,omitempty" toml:"uid,omitempty"`
GID *int `json:"gid,omitempty" toml:"gid,omitempty"`
ExpireDate *int `json:"expiredate,omitempty" toml:"expiredate,omitempty"`
ForcePasswordReset *bool `json:"force_password_reset,omitempty" toml:"force_password_reset,omitempty"`
}
type GroupCustomization struct {
Name string `json:"name" toml:"name"`
GID *int `json:"gid,omitempty" toml:"gid,omitempty"`
}
type TimezoneCustomization struct {
Timezone *string `json:"timezone,omitempty" toml:"timezone,omitempty"`
NTPServers []string `json:"ntpservers,omitempty" toml:"ntpservers,omitempty"`
}
type LocaleCustomization struct {
Languages []string `json:"languages,omitempty" toml:"languages,omitempty"`
Keyboard *string `json:"keyboard,omitempty" toml:"keyboard,omitempty"`
}
type FirewallCustomization struct {
Ports []string `json:"ports,omitempty" toml:"ports,omitempty"`
Services *FirewallServicesCustomization `json:"services,omitempty" toml:"services,omitempty"`
Zones []FirewallZoneCustomization `json:"zones,omitempty" toml:"zones,omitempty"`
}
type FirewallZoneCustomization struct {
Name *string `json:"name,omitempty" toml:"name,omitempty"`
Sources []string `json:"sources,omitempty" toml:"sources,omitempty"`
}
type FirewallServicesCustomization struct {
Enabled []string `json:"enabled,omitempty" toml:"enabled,omitempty"`
Disabled []string `json:"disabled,omitempty" toml:"disabled,omitempty"`
}
type ServicesCustomization struct {
Enabled []string `json:"enabled,omitempty" toml:"enabled,omitempty"`
Disabled []string `json:"disabled,omitempty" toml:"disabled,omitempty"`
Masked []string `json:"masked,omitempty" toml:"masked,omitempty"`
}
type OpenSCAPCustomization struct {
DataStream string `json:"datastream,omitempty" toml:"datastream,omitempty"`
ProfileID string `json:"profile_id,omitempty" toml:"profile_id,omitempty"`
Tailoring *OpenSCAPTailoringCustomizations `json:"tailoring,omitempty" toml:"tailoring,omitempty"`
JSONTailoring *OpenSCAPJSONTailoringCustomizations `json:"json_tailoring,omitempty" toml:"json_tailoring,omitempty"`
}
type OpenSCAPTailoringCustomizations struct {
Selected []string `json:"selected,omitempty" toml:"selected,omitempty"`
Unselected []string `json:"unselected,omitempty" toml:"unselected,omitempty"`
}
type OpenSCAPJSONTailoringCustomizations struct {
ProfileID string `json:"profile_id,omitempty" toml:"profile_id,omitempty"`
Filepath string `json:"filepath,omitempty" toml:"filepath,omitempty"`
}
// Configure the container storage separately from containers, since we most likely would
// like to use the same storage path for all of the containers.
type ContainerStorageCustomization struct {
// destination is always `containers-storage`, so we won't expose this
StoragePath *string `json:"destination-path,omitempty" toml:"destination-path,omitempty"`
}
type CACustomization struct {
PEMCerts []string `json:"pem_certs,omitempty" toml:"pem_certs,omitempty"`
}
type CustomizationError struct {
Message string
}
func (e *CustomizationError) Error() string {
return e.Message
}
// CheckCustomizations returns an error of type `CustomizationError`
// if `c` has any customizations not specified in `allowed`
func (c *Customizations) CheckAllowed(allowed ...string) error {
if c == nil {
return nil
}
allowMap := make(map[string]bool)
for _, a := range allowed {
allowMap[a] = true
}
t := reflect.TypeOf(*c)
v := reflect.ValueOf(*c)
for i := 0; i < t.NumField(); i++ {
empty := false
field := v.Field(i)
switch field.Kind() {
case reflect.String:
if field.String() == "" {
empty = true
}
case reflect.Array, reflect.Slice:
if field.Len() == 0 {
empty = true
}
case reflect.Ptr:
if field.IsNil() {
empty = true
}
default:
panic(fmt.Sprintf("unhandled customization field type %s, %s", v.Kind(), t.Field(i).Name))
}
if !empty && !allowMap[t.Field(i).Name] {
return &CustomizationError{fmt.Sprintf("'%s' is not allowed", t.Field(i).Name)}
}
}
return nil
}
func (c *Customizations) GetHostname() *string {
if c == nil {
return nil
}
return c.Hostname
}
func (c *Customizations) GetPrimaryLocale() (*string, *string) {
if c == nil {
return nil, nil
}
if c.Locale == nil {
return nil, nil
}
if len(c.Locale.Languages) == 0 {
return nil, c.Locale.Keyboard
}
return &c.Locale.Languages[0], c.Locale.Keyboard
}
func (c *Customizations) GetTimezoneSettings() (*string, []string) {
if c == nil {
return nil, nil
}
if c.Timezone == nil {
return nil, nil
}
return c.Timezone.Timezone, c.Timezone.NTPServers
}
func (c *Customizations) GetUsers() []UserCustomization {
if c == nil || c.User == nil {
return nil
}
users := c.User
// sanitize user home directory in blueprint: if it has a trailing slash,
// it might lead to the directory not getting the correct selinux labels
for idx := range users {
u := users[idx]
if u.Home != nil {
homedir := strings.TrimRight(*u.Home, "/")
u.Home = &homedir
users[idx] = u
}
}
return users
}
func (c *Customizations) GetGroups() []GroupCustomization {
if c == nil {
return nil
}
return c.Group
}
func (c *Customizations) GetKernel() *KernelCustomization {
var kernelName, kernelAppend string
if c != nil && c.Kernel != nil {
kernelName = c.Kernel.Name
kernelAppend = c.Kernel.Append
}
if kernelName == "" {
kernelName = "kernel"
}
return &KernelCustomization{
Name: kernelName,
Append: kernelAppend,
}
}
func (c *Customizations) GetFirewall() *FirewallCustomization {
if c == nil {
return nil
}
return c.Firewall
}
func (c *Customizations) GetServices() *ServicesCustomization {
if c == nil {
return nil
}
return c.Services
}
func (c *Customizations) GetFilesystems() []FilesystemCustomization {
if c == nil {
return nil
}
return c.Filesystem
}
func (c *Customizations) GetFilesystemsMinSize() uint64 {
if c == nil {
return 0
}
var agg uint64
for _, m := range c.Filesystem {
agg += m.MinSize
}
// This ensures that file system customization `size` is a multiple of
// sector size (512)
if agg%512 != 0 {
agg = (agg/512 + 1) * 512
}
return agg
}
func (c *Customizations) GetPartitioning() (*DiskCustomization, error) {
if c == nil {
return nil, nil
}
if err := c.Disk.Validate(); err != nil {
return nil, err
}
return c.Disk, nil
}
func (c *Customizations) GetInstallationDevice() string {
if c == nil || c.InstallationDevice == "" {
return ""
}
return c.InstallationDevice
}
func (c *Customizations) GetFDO() *FDOCustomization {
if c == nil {
return nil
}
return c.FDO
}
func (c *Customizations) GetOpenSCAP() *OpenSCAPCustomization {
if c == nil {
return nil
}
return c.OpenSCAP
}
func (c *Customizations) GetIgnition() *IgnitionCustomization {
if c == nil {
return nil
}
return c.Ignition
}
func (c *Customizations) GetDirectories() []DirectoryCustomization {
if c == nil {
return nil
}
return c.Directories
}
func (c *Customizations) GetFiles() []FileCustomization {
if c == nil {
return nil
}
return c.Files
}
func (c *Customizations) GetRepositories() ([]RepositoryCustomization, error) {
if c == nil {
return nil, nil
}
for _, repo := range c.Repositories {
err := validateCustomRepository(&repo)
if err != nil {
return nil, err
}
}
return c.Repositories, nil
}
func (c *Customizations) GetFIPS() bool {
if c == nil || c.FIPS == nil {
return false
}
return *c.FIPS
}
func (c *Customizations) GetContainerStorage() *ContainerStorageCustomization {
if c == nil || c.ContainersStorage == nil {
return nil
}
if *c.ContainersStorage.StoragePath == "" {
return nil
}
return c.ContainersStorage
}
func (c *Customizations) GetInstaller() (*InstallerCustomization, error) {
if c == nil || c.Installer == nil {
return nil, nil
}
// Validate conflicting customizations: Installer options aren't supported
// when the user adds their own kickstart content
if c.Installer.Kickstart != nil && len(c.Installer.Kickstart.Contents) > 0 {
if c.Installer.Unattended {
return nil, fmt.Errorf("installer.unattended is not supported when adding custom kickstart contents")
}
if len(c.Installer.SudoNopasswd) > 0 {
return nil, fmt.Errorf("installer.sudo-nopasswd is not supported when adding custom kickstart contents")
}
}
// Disabling the user module isn't supported when users or groups are
// defined
if c.Installer.Modules != nil &&
slices.Contains(c.Installer.Modules.Disable, anaconda.ModuleUsers) &&
len(c.User)+len(c.Group) > 0 {
return nil, fmt.Errorf("blueprint contains user or group customizations but disables the required Users Anaconda module")
}
return c.Installer, nil
}
func (c *Customizations) GetRPM() *RPMCustomization {
if c == nil {
return nil
}
return c.RPM
}
func (c *Customizations) GetRHSM() *RHSMCustomization {
if c == nil {
return nil
}
return c.RHSM
}
func (c *Customizations) checkCACerts() error {
if c == nil || c.CACerts == nil {
return nil
}
for _, bundle := range c.CACerts.PEMCerts {
_, err := cert.ParseCerts(bundle)
if err != nil {
return err
}
}
return nil
}
func (c *Customizations) GetCACerts() (*CACustomization, error) {
if c == nil {
return nil, nil
}
if err := c.checkCACerts(); err != nil {
return nil, err
}
return c.CACerts, nil
}

View file

@ -1,750 +0,0 @@
package blueprint
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"regexp"
"slices"
"strings"
"unicode/utf16"
"github.com/google/uuid"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type DiskCustomization struct {
// Type of the partition table: gpt or dos.
// Optional, the default depends on the distro and image type.
Type string
MinSize uint64
Partitions []PartitionCustomization
}
type diskCustomizationMarshaler struct {
Type string `json:"type,omitempty" toml:"type,omitempty"`
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"partitions,omitempty"`
}
func (dc *DiskCustomization) UnmarshalJSON(data []byte) error {
var dcm diskCustomizationMarshaler
if err := json.Unmarshal(data, &dcm); err != nil {
return err
}
dc.Type = dcm.Type
dc.MinSize = dcm.MinSize.Uint64()
dc.Partitions = dcm.Partitions
return nil
}
func (dc *DiskCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(dc, data)
}
// PartitionCustomization defines a single partition on a disk. The Type
// defines the kind of "payload" for the partition: plain, lvm, or btrfs.
// - plain: the payload will be a filesystem on a partition (e.g. xfs, ext4).
// See [FilesystemTypedCustomization] for extra fields.
// - lvm: the payload will be an LVM volume group. See [VGCustomization] for
// extra fields
// - btrfs: the payload will be a btrfs volume. See
// [BtrfsVolumeCustomization] for extra fields.
type PartitionCustomization struct {
// The type of payload for the partition (optional, defaults to "plain").
Type string `json:"type" toml:"type"`
// Minimum size of the partition that contains the filesystem (for "plain"
// filesystem), volume group ("lvm"), or btrfs volume ("btrfs"). The final
// size of the partition will be larger than the minsize if the sum of the
// contained volumes (logical volumes or subvolumes) is larger. In
// addition, certain mountpoints have required minimum sizes. See
// https://osbuild.org/docs/user-guide/partitioning for more details.
// (optional, defaults depend on payload and mountpoints).
MinSize uint64 `json:"minsize" toml:"minsize"`
// The partition type GUID for GPT partitions. For DOS partitions, this
// field can be used to set the (2 hex digit) partition type.
// If not set, the type will be automatically set based on the mountpoint
// or the payload type.
PartType string `json:"part_type,omitempty" toml:"part_type,omitempty"`
// The partition label for GPT partitions, not supported for dos partitions.
// Note: This is not the same as the label, which can be set in "Label"
PartLabel string `json:"part_label,omitempty" toml:"part_label,omitempty"`
// The partition GUID for GPT partitions, not supported for dos partitions.
// Note: This is the unique uuid, not the type guid, that is PartType
PartUUID string `json:"part_uuid,omitempty" toml:"part_uuid,omitempty"`
BtrfsVolumeCustomization
VGCustomization
FilesystemTypedCustomization
}
// A filesystem on a plain partition or LVM logical volume.
// Note the differences from [FilesystemCustomization]:
// - Adds a label.
// - Adds a filesystem type (fs_type).
// - Does not define a size. The size is defined by its container: a
// partition ([PartitionCustomization]) or LVM logical volume
// ([LVCustomization]).
//
// Setting the FSType to "swap" creates a swap area (and the Mountpoint must be
// empty).
type FilesystemTypedCustomization struct {
Mountpoint string `json:"mountpoint" toml:"mountpoint"`
Label string `json:"label,omitempty" toml:"label,omitempty"`
FSType string `json:"fs_type,omitempty" toml:"fs_type,omitempty"`
}
// An LVM volume group with one or more logical volumes.
type VGCustomization struct {
// Volume group name (optional, default will be automatically generated).
Name string `json:"name" toml:"name"`
LogicalVolumes []LVCustomization `json:"logical_volumes,omitempty" toml:"logical_volumes,omitempty"`
}
type LVCustomization struct {
// Logical volume name
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Minimum size of the logical volume
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
// Custom JSON unmarshaller for LVCustomization for handling the conversion of
// data sizes (minsize) expressed as strings to uint64.
func (lv *LVCustomization) UnmarshalJSON(data []byte) error {
var lvAnySize struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
MinSize any `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
if err := json.Unmarshal(data, &lvAnySize); err != nil {
return err
}
lv.Name = lvAnySize.Name
lv.FilesystemTypedCustomization = lvAnySize.FilesystemTypedCustomization
if lvAnySize.MinSize == nil {
return fmt.Errorf("minsize is required")
}
size, err := decodeSize(lvAnySize.MinSize)
if err != nil {
return err
}
lv.MinSize = size
return nil
}
// A btrfs volume consisting of one or more subvolumes.
type BtrfsVolumeCustomization struct {
Subvolumes []BtrfsSubvolumeCustomization
}
type BtrfsSubvolumeCustomization struct {
// The name of the subvolume, which defines the location (path) on the
// root volume (required).
// See https://btrfs.readthedocs.io/en/latest/Subvolumes.html
Name string `json:"name" toml:"name"`
// Mountpoint for the subvolume.
Mountpoint string `json:"mountpoint" toml:"mountpoint"`
}
// Custom JSON unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalJSON(data []byte) error {
errPrefix := "JSON unmarshal:"
var typeSniffer struct {
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
PartLabel string `json:"part_label"`
PartUUID string `json:"part_uuid"`
}
if err := json.Unmarshal(data, &typeSniffer); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
partType := "plain"
if typeSniffer.Type != "" {
partType = typeSniffer.Type
}
switch partType {
case "plain":
if err := decodePlain(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
v.PartType = typeSniffer.PartType
v.PartLabel = typeSniffer.PartLabel
v.PartUUID = typeSniffer.PartUUID
if typeSniffer.MinSize == nil {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(typeSniffer.MinSize)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// decodePlain decodes the data into a struct that only embeds the
// FilesystemCustomization with DisallowUnknownFields. This ensures that when
// the type is "plain", none of the fields for btrfs or lvm are used.
func decodePlain(v *PartitionCustomization, data []byte) error {
var plain struct {
// Type, minsize, and part_* are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
PartLabel string `json:"part_label"`
PartUUID string `json:"part_uuid"`
FilesystemTypedCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&plain)
if err != nil {
return fmt.Errorf("error decoding partition with type \"plain\": %w", err)
}
v.FilesystemTypedCustomization = plain.FilesystemTypedCustomization
return nil
}
// decodeBtrfs decodes the data into a struct that only embeds the
// BtrfsVolumeCustomization with DisallowUnknownFields. This ensures that when
// the type is btrfs, none of the fields for plain or lvm are used.
func decodeBtrfs(v *PartitionCustomization, data []byte) error {
var btrfs struct {
// Type, minsize, and part_* are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
PartLabel string `json:"part_label"`
PartUUID string `json:"part_uuid"`
BtrfsVolumeCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&btrfs)
if err != nil {
return fmt.Errorf("error decoding partition with type \"btrfs\": %w", err)
}
v.BtrfsVolumeCustomization = btrfs.BtrfsVolumeCustomization
return nil
}
// decodeLVM decodes the data into a struct that only embeds the
// VGCustomization with DisallowUnknownFields. This ensures that when the type
// is lvm, none of the fields for plain or btrfs are used.
func decodeLVM(v *PartitionCustomization, data []byte) error {
var vg struct {
// Type, minsize, and part_* are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
PartLabel string `json:"part_label"`
PartUUID string `json:"part_uuid"`
VGCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&vg); err != nil {
return fmt.Errorf("error decoding partition with type \"lvm\": %w", err)
}
v.VGCustomization = vg.VGCustomization
return nil
}
// Custom TOML unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalTOML(data any) error {
errPrefix := "TOML unmarshal:"
d, ok := data.(map[string]any)
if !ok {
return fmt.Errorf("%s customizations.partition is not an object", errPrefix)
}
partType := "plain"
if typeField, ok := d["type"]; ok {
typeStr, ok := typeField.(string)
if !ok {
return fmt.Errorf("%s type must be a string, got \"%v\" of type %T", errPrefix, typeField, typeField)
}
partType = typeStr
}
// serialise the data to JSON and reuse the subobject decoders
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("%s error while decoding partition customization: %w", errPrefix, err)
}
switch partType {
case "plain":
if err := decodePlain(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
minsizeField, ok := d["minsize"]
if !ok {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(minsizeField)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// Validate checks for customization combinations that are generally not
// supported or can create conflicts, regardless of specific distro or image
// type policies. The validator ensures all of the following properties:
// - All mountpoints are valid
// - All mountpoints are unique
// - All LVM volume group names are unique
// - All LVM logical volume names are unique within a given volume group
// - All btrfs subvolume names are unique within a given btrfs volume
// - All btrfs subvolume names are valid and non-empty
// - All filesystems are valid for their mountpoints (e.g. xfs or ext4 for /boot)
// - No LVM logical volume has an invalid mountpoint (/boot or /boot/efi)
// - Plain filesystem types are valid for the partition type
// - All non-empty properties are valid for the partition type (e.g.
// LogicalVolumes is empty when the type is "plain" or "btrfs")
// - Filesystems with FSType set to "none" or "swap" do not specify a mountpoint.
//
// Note that in *addition* consumers should also call
// ValidateLayoutConstraints() to validate that the policy for disk
// customizations is met.
func (p *DiskCustomization) Validate() error {
if p == nil {
return nil
}
switch p.Type {
case "gpt", "":
case "dos":
// dos/mbr only supports 4 partitions
// Unfortunately, at this stage it's unknown whether we will need extra
// partitions (bios boot, root, esp), so this check is just to catch
// obvious invalid customizations early. The final partition table is
// checked after it's created.
if len(p.Partitions) > 4 {
return fmt.Errorf("invalid partitioning customizations: \"dos\" partition table type only supports up to 4 partitions: got %d", len(p.Partitions))
}
default:
return fmt.Errorf("unknown partition table type: %s (valid: gpt, dos)", p.Type)
}
mountpoints := make(map[string]bool)
vgnames := make(map[string]bool)
var errs []error
for _, part := range p.Partitions {
if err := part.ValidatePartitionTypeID(p.Type); err != nil {
errs = append(errs, err)
}
if err := part.ValidatePartitionID(p.Type); err != nil {
errs = append(errs, err)
}
if err := part.ValidatePartitionLabel(p.Type); err != nil {
errs = append(errs, err)
}
switch part.Type {
case "plain", "":
errs = append(errs, part.validatePlain(mountpoints))
case "lvm":
errs = append(errs, part.validateLVM(mountpoints, vgnames))
case "btrfs":
errs = append(errs, part.validateBtrfs(mountpoints))
default:
errs = append(errs, fmt.Errorf("unknown partition type: %s", part.Type))
}
}
// will discard all nil errors
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("invalid partitioning customizations:\n%w", err)
}
return nil
}
func validateMountpoint(path string) error {
if path == "" {
return fmt.Errorf("mountpoint is empty")
}
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("mountpoint %q is not an absolute path", path)
}
if cleanPath := filepath.Clean(path); path != cleanPath {
return fmt.Errorf("mountpoint %q is not a canonical path (did you mean %q?)", path, cleanPath)
}
return nil
}
// ValidateLayoutConstraints checks that at most one LVM Volume Group or btrfs
// volume is defined. Returns an error if both LVM and btrfs are set and if
// either has more than one element.
//
// Note that this is a *policy* validation, in theory the "disk" code
// does support the constraints but we choose not to allow them for
// now. Each consumer of "DiskCustomization" should call this
// *unless* it's very low-level and not end-user-facing.
func (p *DiskCustomization) ValidateLayoutConstraints() error {
if p == nil {
return nil
}
var btrfsVols, lvmVGs uint
for _, part := range p.Partitions {
switch part.Type {
case "lvm":
lvmVGs++
case "btrfs":
btrfsVols++
}
if lvmVGs > 0 && btrfsVols > 0 {
return fmt.Errorf("btrfs and lvm partitioning cannot be combined")
}
}
if btrfsVols > 1 {
return fmt.Errorf("multiple btrfs volumes are not yet supported")
}
if lvmVGs > 1 {
return fmt.Errorf("multiple LVM volume groups are not yet supported")
}
return nil
}
// Check that the fs type is valid for the mountpoint.
func validateFilesystemType(path, fstype string) error {
badfsMsgFmt := "unsupported filesystem type for %q: %s"
switch path {
case "/boot":
switch fstype {
case "xfs", "ext4":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
case "/boot/efi":
switch fstype {
case "vfat":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
}
return nil
}
// These mountpoints must be on a plain partition (i.e. not on LVM or btrfs).
var plainOnlyMountpoints = []string{
"/boot",
"/boot/efi", // not allowed by our global policies, but that might change
}
var validPlainFSTypes = []string{
"ext4",
"vfat",
"xfs",
}
// exactly 2 hex digits
var validDosPartitionType = regexp.MustCompile(`^[0-9a-fA-F]{2}$`)
// ValidatePartitionTypeID returns an error if the partition type ID is not
// valid given the partition table type. If the partition table type is an
// empty string, the function returns an error only if the partition type ID is
// invalid for both gpt and dos partition tables.
func (p *PartitionCustomization) ValidatePartitionTypeID(ptType string) error {
// Empty PartType is fine, it will be selected automatically
if p.PartType == "" {
return nil
}
_, uuidErr := uuid.Parse(p.PartType)
validDosType := validDosPartitionType.MatchString(p.PartType)
switch ptType {
case "gpt":
if uuidErr != nil {
return fmt.Errorf("invalid partition part_type %q for partition table type %q (must be a valid UUID): %w", p.PartType, ptType, uuidErr)
}
case "dos":
if !validDosType {
return fmt.Errorf("invalid partition part_type %q for partition table type %q (must be a 2-digit hex number)", p.PartType, ptType)
}
case "":
// We don't know the partition table type yet, the fallback is controlled
// by the CustomPartitionTableOptions, so return an error if it fails both.
if uuidErr != nil && !validDosType {
return fmt.Errorf("invalid part_type %q: must be a valid UUID for GPT partition tables or a 2-digit hex number for DOS partition tables", p.PartType)
}
default:
// ignore: handled elsewhere
}
return nil
}
// ValidatePartitionID returns an error if the partition ID is not
// valid given the partition table type. If the partition table type is an
// empty string, the function returns an error only if the partition type ID is
// invalid for both gpt and dos partition tables.
func (p *PartitionCustomization) ValidatePartitionID(ptType string) error {
// Empty PartUUID is fine, it will be selected automatically if needed
if p.PartUUID == "" {
return nil
}
if ptType == "dos" {
return fmt.Errorf("part_type is not supported for dos partition tables")
}
_, uuidErr := uuid.Parse(p.PartUUID)
if uuidErr != nil {
return fmt.Errorf("invalid partition part_uuid %q (must be a valid UUID): %w", p.PartUUID, uuidErr)
}
return nil
}
// ValidatePartitionID returns an error if the partition ID is not
// valid given the partition table type.
func (p *PartitionCustomization) ValidatePartitionLabel(ptType string) error {
// Empty PartLabel is fine
if p.PartLabel == "" {
return nil
}
if ptType == "dos" {
return fmt.Errorf("part_label is not supported for dos partition tables")
}
// GPT Labels are up to 36 utf-16 chars
if len(utf16.Encode([]rune(p.PartLabel))) > 36 {
return fmt.Errorf("part_label is not a valid GPT label, it is too long")
}
return nil
}
func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) error {
if p.FSType == "none" {
// make sure the mountpoint is empty and return
if p.Mountpoint != "" {
return fmt.Errorf("mountpoint for none partition must be empty (got %q)", p.Mountpoint)
}
return nil
}
if p.FSType == "swap" {
// make sure the mountpoint is empty and return
if p.Mountpoint != "" {
return fmt.Errorf("mountpoint for swap partition must be empty (got %q)", p.Mountpoint)
}
return nil
}
if err := validateMountpoint(p.Mountpoint); err != nil {
return err
}
if mountpoints[p.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", p.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, p.FSType) {
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for mountpoint %q: %s", p.Mountpoint, p.FSType)
}
if err := validateFilesystemType(p.Mountpoint, p.FSType); err != nil {
return err
}
mountpoints[p.Mountpoint] = true
return nil
}
func (p *PartitionCustomization) validateLVM(mountpoints, vgnames map[string]bool) error {
if p.Name != "" && vgnames[p.Name] { // VGs with no name get autogenerated names
return fmt.Errorf("duplicate LVM volume group name %q in partitioning customizations", p.Name)
}
// check for invalid property usage
if len(p.Subvolumes) > 0 {
return fmt.Errorf("subvolumes defined for LVM volume group (partition type \"lvm\")")
}
if p.Label != "" {
return fmt.Errorf("label %q defined for LVM volume group (partition type \"lvm\")", p.Label)
}
vgnames[p.Name] = true
lvnames := make(map[string]bool)
for _, lv := range p.LogicalVolumes {
if lv.Name != "" && lvnames[lv.Name] { // LVs with no name get autogenerated names
return fmt.Errorf("duplicate LVM logical volume name %q in volume group %q in partitioning customizations", lv.Name, p.Name)
}
lvnames[lv.Name] = true
if lv.FSType == "swap" {
// make sure the mountpoint is empty and return
if lv.Mountpoint != "" {
return fmt.Errorf("mountpoint for swap logical volume with name %q in volume group %q must be empty", lv.Name, p.Name)
}
return nil
}
if err := validateMountpoint(lv.Mountpoint); err != nil {
return fmt.Errorf("invalid logical volume customization: %w", err)
}
if mountpoints[lv.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", lv.Mountpoint)
}
mountpoints[lv.Mountpoint] = true
if slices.Contains(plainOnlyMountpoints, lv.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for logical volume", lv.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, lv.FSType) {
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for logical volume with mountpoint %q: %s", lv.Mountpoint, lv.FSType)
}
}
return nil
}
func (p *PartitionCustomization) validateBtrfs(mountpoints map[string]bool) error {
if p.Mountpoint != "" {
return fmt.Errorf(`"mountpoint" is not supported for btrfs volumes (only subvolumes can have mountpoints)`)
}
if len(p.Subvolumes) == 0 {
return fmt.Errorf("btrfs volume requires subvolumes")
}
if len(p.LogicalVolumes) > 0 {
return fmt.Errorf("LVM logical volumes defined for btrfs volume (partition type \"btrfs\")")
}
subvolnames := make(map[string]bool)
for _, subvol := range p.Subvolumes {
if subvol.Name == "" {
return fmt.Errorf("btrfs subvolume with empty name in partitioning customizations")
}
if subvolnames[subvol.Name] {
return fmt.Errorf("duplicate btrfs subvolume name %q in partitioning customizations", subvol.Name)
}
subvolnames[subvol.Name] = true
if err := validateMountpoint(subvol.Mountpoint); err != nil {
return fmt.Errorf("invalid btrfs subvolume customization: %w", err)
}
if mountpoints[subvol.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", subvol.Mountpoint)
}
if slices.Contains(plainOnlyMountpoints, subvol.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for btrfs subvolume", subvol.Mountpoint)
}
mountpoints[subvol.Mountpoint] = true
}
return nil
}
// CheckDiskMountpointsPolicy checks if the mountpoints under a [DiskCustomization] are allowed by the policy.
func CheckDiskMountpointsPolicy(partitioning *DiskCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
if partitioning == nil {
return nil
}
// collect all mountpoints
var mountpoints []string
for _, part := range partitioning.Partitions {
if part.Mountpoint != "" {
mountpoints = append(mountpoints, part.Mountpoint)
}
for _, lv := range part.LogicalVolumes {
if lv.Mountpoint != "" {
mountpoints = append(mountpoints, lv.Mountpoint)
}
}
for _, subvol := range part.Subvolumes {
mountpoints = append(mountpoints, subvol.Mountpoint)
}
}
var errs []error
for _, mp := range mountpoints {
if err := mountpointAllowList.Check(mp); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return fmt.Errorf("The following errors occurred while setting up custom mountpoints:\n%w", errors.Join(errs...))
}
return nil
}

View file

@ -1,78 +0,0 @@
package blueprint
import (
"encoding/json"
"errors"
"fmt"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type FilesystemCustomization struct {
Mountpoint string
MinSize uint64
}
type filesystemCustomizationMarshaling struct {
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
}
func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error {
var fc filesystemCustomizationMarshaling
if err := json.Unmarshal(data, &fc); err != nil {
if fc.Mountpoint != "" {
return fmt.Errorf("error decoding minsize value for mountpoint %q: %w", fc.Mountpoint, err)
}
return err
}
fsc.Mountpoint = fc.Mountpoint
fsc.MinSize = fc.MinSize.Uint64()
return nil
}
func (fsc *FilesystemCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(fsc, data)
}
// CheckMountpointsPolicy checks if the mountpoints are allowed by the policy
func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
var errs []error
for _, m := range mountpoints {
if err := mountpointAllowList.Check(m.Mountpoint); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return fmt.Errorf("The following errors occurred while setting up custom mountpoints:\n%w", errors.Join(errs...))
}
return nil
}
// decodeSize takes an integer or string representing a data size (with a data
// suffix) and returns the uint64 representation.
func decodeSize(size any) (uint64, error) {
switch s := size.(type) {
case string:
return datasizes.Parse(s)
case int64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(s), nil
case float64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
// TODO: emit warning of possible truncation?
return uint64(s), nil
case uint64:
return s, nil
default:
return 0, fmt.Errorf("failed to convert value \"%v\" to number", size)
}
}

View file

@ -1,498 +0,0 @@
package blueprint
import (
"encoding/json"
"fmt"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/pathpolicy"
)
// validateModeString checks that the given string is a valid mode octal number
func validateModeString(mode string) error {
// Check that the mode string matches the octal format regular expression.
// The leading is optional.
if regexp.MustCompile(`^[0]{0,1}[0-7]{3}$`).MatchString(mode) {
return nil
}
return fmt.Errorf("invalid mode %s: must be an octal number", mode)
}
// DirectoryCustomization represents a directory to be created in the image
type DirectoryCustomization struct {
// Absolute path to the directory
Path string `json:"path" toml:"path"`
// Owner of the directory specified as a string (user name), int64 (UID) or nil
User interface{} `json:"user,omitempty" toml:"user,omitempty"`
// Owner of the directory specified as a string (group name), int64 (UID) or nil
Group interface{} `json:"group,omitempty" toml:"group,omitempty"`
// Permissions of the directory specified as an octal number
Mode string `json:"mode,omitempty" toml:"mode,omitempty"`
// EnsureParents ensures that all parent directories of the directory exist
EnsureParents bool `json:"ensure_parents,omitempty" toml:"ensure_parents,omitempty"`
}
// Custom TOML unmarshalling for DirectoryCustomization with validation
func (d *DirectoryCustomization) UnmarshalTOML(data interface{}) error {
var dir DirectoryCustomization
dataMap, _ := data.(map[string]interface{})
switch path := dataMap["path"].(type) {
case string:
dir.Path = path
default:
return fmt.Errorf("UnmarshalTOML: path must be a string")
}
switch user := dataMap["user"].(type) {
case string:
dir.User = user
case int64:
dir.User = user
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: user must be a string or an integer, got %T", user)
}
switch group := dataMap["group"].(type) {
case string:
dir.Group = group
case int64:
dir.Group = group
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: group must be a string or an integer")
}
switch mode := dataMap["mode"].(type) {
case string:
dir.Mode = mode
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: mode must be a string")
}
switch ensureParents := dataMap["ensure_parents"].(type) {
case bool:
dir.EnsureParents = ensureParents
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: ensure_parents must be a bool")
}
// try converting to fsnode.Directory to validate all values
_, err := dir.ToFsNodeDirectory()
if err != nil {
return err
}
*d = dir
return nil
}
// Custom JSON unmarshalling for DirectoryCustomization with validation
func (d *DirectoryCustomization) UnmarshalJSON(data []byte) error {
type directoryCustomization DirectoryCustomization
var dirPrivate directoryCustomization
if err := json.Unmarshal(data, &dirPrivate); err != nil {
return err
}
dir := DirectoryCustomization(dirPrivate)
if uid, ok := dir.User.(float64); ok {
// check if uid can be converted to int64
if uid != float64(int64(uid)) {
return fmt.Errorf("invalid user %f: must be an integer", uid)
}
dir.User = int64(uid)
}
if gid, ok := dir.Group.(float64); ok {
// check if gid can be converted to int64
if gid != float64(int64(gid)) {
return fmt.Errorf("invalid group %f: must be an integer", gid)
}
dir.Group = int64(gid)
}
// try converting to fsnode.Directory to validate all values
_, err := dir.ToFsNodeDirectory()
if err != nil {
return err
}
*d = dir
return nil
}
// ToFsNodeDirectory converts the DirectoryCustomization to an fsnode.Directory
func (d DirectoryCustomization) ToFsNodeDirectory() (*fsnode.Directory, error) {
var mode *os.FileMode
if d.Mode != "" {
err := validateModeString(d.Mode)
if err != nil {
return nil, err
}
modeNum, err := strconv.ParseUint(d.Mode, 8, 32)
if err != nil {
return nil, fmt.Errorf("invalid mode %s: %v", d.Mode, err)
}
mode = common.ToPtr(os.FileMode(modeNum))
}
return fsnode.NewDirectory(d.Path, mode, d.User, d.Group, d.EnsureParents)
}
// DirectoryCustomizationsToFsNodeDirectories converts a slice of DirectoryCustomizations
// to a slice of fsnode.Directories
func DirectoryCustomizationsToFsNodeDirectories(dirs []DirectoryCustomization) ([]*fsnode.Directory, error) {
if len(dirs) == 0 {
return nil, nil
}
var fsDirs []*fsnode.Directory
var errors []error
for _, dir := range dirs {
fsDir, err := dir.ToFsNodeDirectory()
if err != nil {
errors = append(errors, err)
}
fsDirs = append(fsDirs, fsDir)
}
if len(errors) > 0 {
return nil, fmt.Errorf("invalid directory customizations: %v", errors)
}
return fsDirs, nil
}
// FileCustomization represents a file to be created in the image
type FileCustomization struct {
// Absolute path to the file
Path string `json:"path" toml:"path"`
// Owner of the directory specified as a string (user name), int64 (UID) or nil
User interface{} `json:"user,omitempty" toml:"user,omitempty"`
// Owner of the directory specified as a string (group name), int64 (UID) or nil
Group interface{} `json:"group,omitempty" toml:"group,omitempty"`
// Permissions of the file specified as an octal number
Mode string `json:"mode,omitempty" toml:"mode,omitempty"`
// Data is the file content in plain text
Data string `json:"data,omitempty" toml:"data,omitempty"`
// URI references the given URI, this makes the manifest alone
// no-longer portable (but future offline manifest bundles
// will fix that). It will still be reproducible as the
// manifest will include all the hashes of the content so any
// change will make the build fail.
//
// Initially only single files are supported, but this can be
// expanded to dirs (which will just be added recursively) and
// http{,s}.
URI string `json:"uri,omitempty" toml:"uri,omitempty"`
}
// Custom TOML unmarshalling for FileCustomization with validation
func (f *FileCustomization) UnmarshalTOML(data interface{}) error {
var file FileCustomization
dataMap, _ := data.(map[string]interface{})
switch path := dataMap["path"].(type) {
case string:
file.Path = path
default:
return fmt.Errorf("UnmarshalTOML: path must be a string")
}
switch user := dataMap["user"].(type) {
case string:
file.User = user
case int64:
file.User = user
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: user must be a string or an integer")
}
switch group := dataMap["group"].(type) {
case string:
file.Group = group
case int64:
file.Group = group
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: group must be a string or an integer")
}
switch mode := dataMap["mode"].(type) {
case string:
file.Mode = mode
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: mode must be a string")
}
switch data := dataMap["data"].(type) {
case string:
file.Data = data
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: data must be a string")
}
switch uri := dataMap["uri"].(type) {
case string:
file.URI = uri
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: uri must be a string")
}
// try converting to fsnode.File to validate all values
_, err := file.ToFsNodeFile()
if err != nil {
return err
}
*f = file
return nil
}
// Custom JSON unmarshalling for FileCustomization with validation
func (f *FileCustomization) UnmarshalJSON(data []byte) error {
type fileCustomization FileCustomization
var filePrivate fileCustomization
if err := json.Unmarshal(data, &filePrivate); err != nil {
return err
}
file := FileCustomization(filePrivate)
if uid, ok := file.User.(float64); ok {
// check if uid can be converted to int64
if uid != float64(int64(uid)) {
return fmt.Errorf("invalid user %f: must be an integer", uid)
}
file.User = int64(uid)
}
if gid, ok := file.Group.(float64); ok {
// check if gid can be converted to int64
if gid != float64(int64(gid)) {
return fmt.Errorf("invalid group %f: must be an integer", gid)
}
file.Group = int64(gid)
}
// try converting to fsnode.File to validate all values
_, err := file.ToFsNodeFile()
if err != nil {
return err
}
*f = file
return nil
}
// ToFsNodeFile converts the FileCustomization to an fsnode.File
func (f FileCustomization) ToFsNodeFile() (*fsnode.File, error) {
if f.Data != "" && f.URI != "" {
return nil, fmt.Errorf("cannot specify both data %q and URI %q", f.Data, f.URI)
}
var data []byte
if f.Data != "" {
data = []byte(f.Data)
}
var mode *os.FileMode
if f.Mode != "" {
err := validateModeString(f.Mode)
if err != nil {
return nil, err
}
modeNum, err := strconv.ParseUint(f.Mode, 8, 32)
if err != nil {
return nil, fmt.Errorf("invalid mode %s: %v", f.Mode, err)
}
mode = common.ToPtr(os.FileMode(modeNum))
}
if f.URI != "" {
return fsnode.NewFileForURI(f.Path, mode, f.User, f.Group, f.URI)
}
return fsnode.NewFile(f.Path, mode, f.User, f.Group, data)
}
// FileCustomizationsToFsNodeFiles converts a slice of FileCustomization to a slice of *fsnode.File
func FileCustomizationsToFsNodeFiles(files []FileCustomization) ([]*fsnode.File, error) {
if len(files) == 0 {
return nil, nil
}
var fsFiles []*fsnode.File
var errors []error
for _, file := range files {
fsFile, err := file.ToFsNodeFile()
if err != nil {
errors = append(errors, err)
}
fsFiles = append(fsFiles, fsFile)
}
if len(errors) > 0 {
return nil, fmt.Errorf("invalid file customizations: %v", errors)
}
return fsFiles, nil
}
// ValidateDirFileCustomizations validates the given Directory and File customizations.
// If the customizations are invalid, an error is returned. Otherwise, nil is returned.
//
// It currently ensures that:
// - No file path is a prefix of another file or directory path
// - There are no duplicate file or directory paths in the customizations
func ValidateDirFileCustomizations(dirs []DirectoryCustomization, files []FileCustomization) error {
fsNodesMap := make(map[string]interface{}, len(dirs)+len(files))
nodesPaths := make([]string, 0, len(dirs)+len(files))
// First check for duplicate paths
duplicatePaths := make([]string, 0)
for _, dir := range dirs {
if _, ok := fsNodesMap[dir.Path]; ok {
duplicatePaths = append(duplicatePaths, dir.Path)
}
fsNodesMap[dir.Path] = dir
nodesPaths = append(nodesPaths, dir.Path)
}
for _, file := range files {
if _, ok := fsNodesMap[file.Path]; ok {
duplicatePaths = append(duplicatePaths, file.Path)
}
fsNodesMap[file.Path] = file
nodesPaths = append(nodesPaths, file.Path)
}
// There is no point in continuing if there are duplicate paths,
// since the fsNodesMap will not be valid.
if len(duplicatePaths) > 0 {
return fmt.Errorf("duplicate files / directory customization paths: %v", duplicatePaths)
}
invalidFSNodes := make([]string, 0)
checkedPaths := make(map[string]bool)
// Sort the paths so that we always check the longest paths first. This
// ensures that we don't check a parent path before we check the child
// path. Reverse sort the slice based on directory depth.
sort.Slice(nodesPaths, func(i, j int) bool {
return strings.Count(nodesPaths[i], "/") > strings.Count(nodesPaths[j], "/")
})
for _, nodePath := range nodesPaths {
// Skip paths that we have already checked
if checkedPaths[nodePath] {
continue
}
// Check all parent paths of the current path. If any of them have
// already been checked, then we do not need to check them again.
// This is because we always check the longest paths first. If a parent
// path exists in the filesystem nodes map and it is a File,
// then it is an error because it is a parent of a Directory or File.
// Parent paths can be only Directories.
parentPath := nodePath
for {
parentPath = path.Dir(parentPath)
// "." is returned only when the path is relative and we reached
// the root directory. This should never happen because File
// and Directory customization paths are validated as part of
// the unmarshalling process from JSON and TOML.
if parentPath == "." {
panic("filesystem node has relative path set.")
}
if parentPath == "/" {
break
}
if checkedPaths[parentPath] {
break
}
// If the node is not a Directory, then it is an error because
// it is a parent of a Directory or File.
if node, ok := fsNodesMap[parentPath]; ok {
switch node.(type) {
case DirectoryCustomization:
break
case FileCustomization:
invalidFSNodes = append(invalidFSNodes, nodePath)
default:
panic(fmt.Sprintf("unexpected filesystem node customization type: %T", node))
}
}
checkedPaths[parentPath] = true
}
checkedPaths[nodePath] = true
}
if len(invalidFSNodes) > 0 {
return fmt.Errorf("the following filesystem nodes are parents of another node and are not directories: %s", invalidFSNodes)
}
return nil
}
// CheckFileCustomizationsPolicy checks if the given File customizations are allowed by the path policy.
// If any of the customizations are not allowed by the path policy, an error is returned. Otherwise, nil is returned.
func CheckFileCustomizationsPolicy(files []FileCustomization, pathPolicy *pathpolicy.PathPolicies) error {
var invalidPaths []string
for _, file := range files {
if err := pathPolicy.Check(file.Path); err != nil {
invalidPaths = append(invalidPaths, file.Path)
}
}
if len(invalidPaths) > 0 {
return fmt.Errorf("the following custom files are not allowed: %+q", invalidPaths)
}
return nil
}
// CheckDirectoryCustomizationsPolicy checks if the given Directory customizations are allowed by the path policy.
// If any of the customizations are not allowed by the path policy, an error is returned. Otherwise, nil is returned.
func CheckDirectoryCustomizationsPolicy(dirs []DirectoryCustomization, pathPolicy *pathpolicy.PathPolicies) error {
var invalidPaths []string
for _, dir := range dirs {
if err := pathPolicy.Check(dir.Path); err != nil {
invalidPaths = append(invalidPaths, dir.Path)
}
}
if len(invalidPaths) > 0 {
return fmt.Errorf("the following custom directories are not allowed: %+q", invalidPaths)
}
return nil
}

View file

@ -1,17 +0,0 @@
package blueprint
type InstallerCustomization struct {
Unattended bool `json:"unattended,omitempty" toml:"unattended,omitempty"`
SudoNopasswd []string `json:"sudo-nopasswd,omitempty" toml:"sudo-nopasswd,omitempty"`
Kickstart *Kickstart `json:"kickstart,omitempty" toml:"kickstart,omitempty"`
Modules *AnacondaModules `json:"modules,omitempty" toml:"modules,omitempty"`
}
type Kickstart struct {
Contents string `json:"contents" toml:"contents"`
}
type AnacondaModules struct {
Enable []string `json:"enable,omitempty" toml:"enable,omitempty"`
Disable []string `json:"disable,omitempty" toml:"disable,omitempty"`
}

View file

@ -1,154 +0,0 @@
package blueprint
import (
"fmt"
"net/url"
"regexp"
"strings"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/rpmmd"
)
type RepositoryCustomization struct {
Id string `json:"id" toml:"id"`
BaseURLs []string `json:"baseurls,omitempty" toml:"baseurls,omitempty"`
GPGKeys []string `json:"gpgkeys,omitempty" toml:"gpgkeys,omitempty"`
Metalink string `json:"metalink,omitempty" toml:"metalink,omitempty"`
Mirrorlist string `json:"mirrorlist,omitempty" toml:"mirrorlist,omitempty"`
Name string `json:"name,omitempty" toml:"name,omitempty"`
Priority *int `json:"priority,omitempty" toml:"priority,omitempty"`
Enabled *bool `json:"enabled,omitempty" toml:"enabled,omitempty"`
GPGCheck *bool `json:"gpgcheck,omitempty" toml:"gpgcheck,omitempty"`
RepoGPGCheck *bool `json:"repo_gpgcheck,omitempty" toml:"repo_gpgcheck,omitempty"`
SSLVerify *bool `json:"sslverify,omitempty" toml:"sslverify,omitempty"`
ModuleHotfixes *bool `json:"module_hotfixes,omitempty" toml:"module_hotfixes,omitempty"`
Filename string `json:"filename,omitempty" toml:"filename,omitempty"`
// When set the repository will be used during the depsolve of
// payload repositories to install packages from it.
InstallFrom bool `json:"install_from" toml:"install_from"`
}
const repoFilenameRegex = "^[\\w.-]{1,250}\\.repo$"
func validateCustomRepository(repo *RepositoryCustomization) error {
if repo.Id == "" {
return fmt.Errorf("Repository ID is required")
}
filenameRegex := regexp.MustCompile(repoFilenameRegex)
if !filenameRegex.MatchString(repo.getFilename()) {
return fmt.Errorf("Repository filename %q is invalid", repo.getFilename())
}
if len(repo.BaseURLs) == 0 && repo.Mirrorlist == "" && repo.Metalink == "" {
return fmt.Errorf("Repository base URL, mirrorlist or metalink is required")
}
if repo.GPGCheck != nil && *repo.GPGCheck && len(repo.GPGKeys) == 0 {
return fmt.Errorf("Repository gpg check is set to true but no gpg keys are provided")
}
for _, key := range repo.GPGKeys {
// check for a valid GPG key prefix & contains GPG suffix
keyIsGPGKey := strings.HasPrefix(key, "-----BEGIN PGP PUBLIC KEY BLOCK-----") && strings.Contains(key, "-----END PGP PUBLIC KEY BLOCK-----")
// check for a valid URL
keyIsURL := false
_, err := url.ParseRequestURI(key)
if err == nil {
keyIsURL = true
}
if !keyIsGPGKey && !keyIsURL {
return fmt.Errorf("Repository gpg key is not a valid URL or a valid gpg key")
}
}
return nil
}
func (rc *RepositoryCustomization) getFilename() string {
if rc.Filename == "" {
return fmt.Sprintf("%s.repo", rc.Id)
}
if !strings.HasSuffix(rc.Filename, ".repo") {
return fmt.Sprintf("%s.repo", rc.Filename)
}
return rc.Filename
}
func RepoCustomizationsInstallFromOnly(repos []RepositoryCustomization) []rpmmd.RepoConfig {
var res []rpmmd.RepoConfig
for _, repo := range repos {
if !repo.InstallFrom {
continue
}
res = append(res, repo.customRepoToRepoConfig())
}
return res
}
func RepoCustomizationsToRepoConfigAndGPGKeyFiles(repos []RepositoryCustomization) (map[string][]rpmmd.RepoConfig, []*fsnode.File, error) {
if len(repos) == 0 {
return nil, nil, nil
}
repoMap := make(map[string][]rpmmd.RepoConfig, len(repos))
var gpgKeyFiles []*fsnode.File
for _, repo := range repos {
filename := repo.getFilename()
convertedRepo := repo.customRepoToRepoConfig()
// convert any inline gpgkeys to fsnode.File and
// replace the gpgkey with the file path
for idx, gpgkey := range repo.GPGKeys {
if _, ok := url.ParseRequestURI(gpgkey); ok != nil {
// create the file path
path := fmt.Sprintf("/etc/pki/rpm-gpg/RPM-GPG-KEY-%s-%d", repo.Id, idx)
// replace the gpgkey with the file path
convertedRepo.GPGKeys[idx] = fmt.Sprintf("file://%s", path)
// create the fsnode for the gpgkey keyFile
keyFile, err := fsnode.NewFile(path, nil, nil, nil, []byte(gpgkey))
if err != nil {
return nil, nil, err
}
gpgKeyFiles = append(gpgKeyFiles, keyFile)
}
}
repoMap[filename] = append(repoMap[filename], convertedRepo)
}
return repoMap, gpgKeyFiles, nil
}
func (repo RepositoryCustomization) customRepoToRepoConfig() rpmmd.RepoConfig {
urls := make([]string, len(repo.BaseURLs))
copy(urls, repo.BaseURLs)
keys := make([]string, len(repo.GPGKeys))
copy(keys, repo.GPGKeys)
repoConfig := rpmmd.RepoConfig{
Id: repo.Id,
BaseURLs: urls,
GPGKeys: keys,
Name: repo.Name,
Metalink: repo.Metalink,
MirrorList: repo.Mirrorlist,
CheckGPG: repo.GPGCheck,
CheckRepoGPG: repo.RepoGPGCheck,
Priority: repo.Priority,
ModuleHotfixes: repo.ModuleHotfixes,
Enabled: repo.Enabled,
}
if repo.SSLVerify != nil {
repoConfig.IgnoreSSL = common.ToPtr(!*repo.SSLVerify)
}
return repoConfig
}

View file

@ -1,36 +0,0 @@
package blueprint
// Subscription Manager [rhsm] configuration
type SubManRHSMConfig struct {
ManageRepos *bool `json:"manage_repos,omitempty" toml:"manage_repos,omitempty"`
AutoEnableYumPlugins *bool `json:"auto_enable_yum_plugins,omitempty" toml:"auto_enable_yum_plugins,omitempty"`
}
// Subscription Manager [rhsmcertd] configuration
type SubManRHSMCertdConfig struct {
AutoRegistration *bool `json:"auto_registration,omitempty" toml:"auto_registration,omitempty"`
}
// Subscription Manager 'rhsm.conf' configuration
type SubManConfig struct {
RHSMConfig *SubManRHSMConfig `json:"rhsm,omitempty" toml:"rhsm,omitempty"`
RHSMCertdConfig *SubManRHSMCertdConfig `json:"rhsmcertd,omitempty" toml:"rhsmcertd,omitempty"`
}
type DNFPluginConfig struct {
Enabled *bool `json:"enabled,omitempty" toml:"enabled,omitempty"`
}
type SubManDNFPluginsConfig struct {
ProductID *DNFPluginConfig `json:"product_id,omitempty" toml:"product_id,omitempty"`
SubscriptionManager *DNFPluginConfig `json:"subscription_manager,omitempty" toml:"subscription_manager,omitempty"`
}
type RHSMConfig struct {
DNFPlugins *SubManDNFPluginsConfig `json:"dnf_plugins,omitempty" toml:"dnf_plugins,omitempty"`
SubscriptionManager *SubManConfig `json:"subscription_manager,omitempty" toml:"subscription_manager,omitempty"`
}
type RHSMCustomization struct {
Config *RHSMConfig `json:"config,omitempty" toml:"config,omitempty"`
}

View file

@ -1,10 +0,0 @@
package blueprint
type RPMImportKeys struct {
// File paths in the image to import keys from
Files []string `json:"files,omitempty" toml:"files,omitempty"`
}
type RPMCustomization struct {
ImportKeys *RPMImportKeys `json:"import_keys,omitempty" toml:"import_keys,omitempty"`
}

View file

@ -1,24 +0,0 @@
package blueprint
import (
"encoding/json"
"fmt"
)
// XXX: move to interal/common ?
func unmarshalTOMLviaJSON(u json.Unmarshaler, data any) error {
// This is the most efficient way to reuse code when unmarshaling
// structs in toml, it leaks json errors which is a bit sad but
// because the toml unmarshaler gives us not "[]byte" but an
// already pre-processed "any" we cannot just unmarshal into our
// "fooMarshaling" struct and reuse the result so we resort to
// this workaround (but toml will go away long term anyway).
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("error unmarshaling TOML data %v: %w", data, err)
}
if err := u.UnmarshalJSON(dataJSON); err != nil {
return fmt.Errorf("error decoding TOML %v: %w", data, err)
}
return nil
}

View file

@ -1,6 +1,6 @@
package fdo package fdo
import "github.com/osbuild/images/pkg/blueprint" import "github.com/osbuild/blueprint/pkg/blueprint"
type Options struct { type Options struct {
ManufacturingServerURL string ManufacturingServerURL string

View file

@ -4,7 +4,7 @@ import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"github.com/osbuild/images/pkg/blueprint" "github.com/osbuild/blueprint/pkg/blueprint"
) )
type FirstBootOptions struct { type FirstBootOptions struct {

View file

@ -3,7 +3,7 @@ package kickstart
import ( import (
"fmt" "fmt"
"github.com/osbuild/images/pkg/blueprint" "github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/customizations/users" "github.com/osbuild/images/pkg/customizations/users"
) )

View file

@ -5,7 +5,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/osbuild/images/pkg/blueprint" "github.com/osbuild/blueprint/pkg/blueprint"
) )
type Profile string type Profile string

View file

@ -1,8 +1,8 @@
package subscription package subscription
import ( import (
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/internal/common" "github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/blueprint"
) )
// The ImageOptions specify subscription-specific image options // The ImageOptions specify subscription-specific image options

View file

@ -1,6 +1,6 @@
package users package users
import "github.com/osbuild/images/pkg/blueprint" import "github.com/osbuild/blueprint/pkg/blueprint"
type User struct { type User struct {
Name string Name string

View file

@ -7,9 +7,9 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/internal/common" "github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch" "github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/datasizes" "github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk/partition" "github.com/osbuild/images/pkg/disk/partition"
"github.com/osbuild/images/pkg/platform" "github.com/osbuild/images/pkg/platform"

View file

@ -3,7 +3,7 @@ package distro
import ( import (
"math/rand" "math/rand"
"github.com/osbuild/images/pkg/blueprint" "github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/customizations/subscription" "github.com/osbuild/images/pkg/customizations/subscription"
"github.com/osbuild/images/pkg/disk" "github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/disk/partition" "github.com/osbuild/images/pkg/disk/partition"

View file

@ -5,8 +5,8 @@ import (
"math/rand" "math/rand"
"strings" "strings"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/internal/workload" "github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container" "github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/customizations/anaconda" "github.com/osbuild/images/pkg/customizations/anaconda"
"github.com/osbuild/images/pkg/customizations/bootc" "github.com/osbuild/images/pkg/customizations/bootc"

View file

@ -6,9 +6,9 @@ import (
"math/rand" "math/rand"
"slices" "slices"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/internal/common" "github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/workload" "github.com/osbuild/images/internal/workload"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container" "github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/datasizes" "github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk" "github.com/osbuild/images/pkg/disk"

View file

@ -5,9 +5,9 @@ import (
"slices" "slices"
"strings" "strings"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/internal/common" "github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch" "github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/customizations/oscap" "github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro" "github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/policies" "github.com/osbuild/images/pkg/policies"

View file

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"sort" "sort"
"github.com/osbuild/images/pkg/blueprint" "github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/disk" "github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro" "github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/manifest" "github.com/osbuild/images/pkg/manifest"

5
vendor/modules.txt vendored
View file

@ -957,11 +957,11 @@ github.com/oracle/oci-go-sdk/v54/identity
github.com/oracle/oci-go-sdk/v54/objectstorage github.com/oracle/oci-go-sdk/v54/objectstorage
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
github.com/oracle/oci-go-sdk/v54/workrequests github.com/oracle/oci-go-sdk/v54/workrequests
# github.com/osbuild/blueprint v1.11.0 # github.com/osbuild/blueprint v1.12.0
## explicit; go 1.23.9 ## explicit; go 1.23.9
github.com/osbuild/blueprint/internal/common github.com/osbuild/blueprint/internal/common
github.com/osbuild/blueprint/pkg/blueprint github.com/osbuild/blueprint/pkg/blueprint
# github.com/osbuild/images v0.171.0 # github.com/osbuild/images v0.172.0
## explicit; go 1.23.9 ## explicit; go 1.23.9
github.com/osbuild/images/data/dependencies github.com/osbuild/images/data/dependencies
github.com/osbuild/images/data/distrodefs github.com/osbuild/images/data/distrodefs
@ -971,7 +971,6 @@ github.com/osbuild/images/internal/environment
github.com/osbuild/images/internal/workload github.com/osbuild/images/internal/workload
github.com/osbuild/images/pkg/arch github.com/osbuild/images/pkg/arch
github.com/osbuild/images/pkg/artifact github.com/osbuild/images/pkg/artifact
github.com/osbuild/images/pkg/blueprint
github.com/osbuild/images/pkg/cert github.com/osbuild/images/pkg/cert
github.com/osbuild/images/pkg/cloud/azure github.com/osbuild/images/pkg/cloud/azure
github.com/osbuild/images/pkg/container github.com/osbuild/images/pkg/container