Delete internal/blueprint/ and import from osbuild/blueprint

Import osbuild/blueprint v1.6.0
This commit is contained in:
Achilleas Koutsou 2025-03-25 17:15:30 +01:00
parent 362712a71d
commit cf956ff5a6
93 changed files with 2300 additions and 4163 deletions

View file

@ -19,7 +19,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/osbuild-composer/internal/weldr"
)

View file

@ -9,6 +9,7 @@ import (
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/fedora"
@ -18,7 +19,6 @@ import (
"github.com/osbuild/images/pkg/reporegistry"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/sbom"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/store"
"github.com/osbuild/osbuild-composer/internal/target"
)

7
go.mod
View file

@ -15,7 +15,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
github.com/Azure/go-autorest/autorest v0.11.30
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13
github.com/BurntSushi/toml v1.4.0
github.com/BurntSushi/toml v1.5.1-0.20250403130103-3d3abc24416a
github.com/aws/aws-sdk-go-v2 v1.32.7
github.com/aws/aws-sdk-go-v2/config v1.28.7
github.com/aws/aws-sdk-go-v2/credentials v1.17.48
@ -25,7 +25,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.0
github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0
github.com/aws/smithy-go v1.22.1
github.com/coreos/go-semver v0.3.1
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/getkin/kin-openapi v0.131.0
github.com/getsentry/sentry-go v0.28.1
@ -45,7 +44,8 @@ require (
github.com/oapi-codegen/runtime v1.1.1
github.com/openshift-online/ocm-sdk-go v0.1.438
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.128.0
github.com/osbuild/blueprint v1.6.0
github.com/osbuild/images v0.131.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d
github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.20.5
@ -118,6 +118,7 @@ require (
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/ocicrypt v1.2.1 // indirect
github.com/containers/storage v1.57.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect

10
go.sum
View file

@ -83,8 +83,8 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mo
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.5.1-0.20250403130103-3d3abc24416a h1:pRZNZLyCUkX30uKttIh5ihOtsqCgugM+a4WTxUULiMw=
github.com/BurntSushi/toml v1.5.1-0.20250403130103-3d3abc24416a/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI=
@ -575,8 +575,10 @@ github.com/openshift-online/ocm-sdk-go v0.1.438 h1:tsLCCUzbLCTL4RZG02y9RuopmGCXp
github.com/openshift-online/ocm-sdk-go v0.1.438/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.128.0 h1:zYq/0Bd3eeBfBtBRJiJ/WklmsgqQrTQmm+XnmiUDnI0=
github.com/osbuild/images v0.128.0/go.mod h1:Ag87vmyxooiPQBJEDILbypG8/SRIear75YA78NwLix0=
github.com/osbuild/blueprint v1.6.0 h1:HUV1w/dMxpgqOgVtHhfTZE3zRmWQkuW/qTfx9smKImI=
github.com/osbuild/blueprint v1.6.0/go.mod h1:0d3dlY8aSJ6jM6NHwBmJFF1VIySsp/GsDpcJQ0yrOqM=
github.com/osbuild/images v0.131.0 h1:UbAS2OtJa4iKJYIBE+TRhODGsA4N5hxZLHnevnImLmQ=
github.com/osbuild/images v0.131.0/go.mod h1:Ag87vmyxooiPQBJEDILbypG8/SRIear75YA78NwLix0=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d h1:r9BFPDv0uuA9k1947Jybcxs36c/pTywWS1gjeizvtcQ=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d/go.mod h1:zR1iu/hOuf+OQNJlk70tju9IqzzM4ycq0ectkFBm94U=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=

View file

@ -1,636 +0,0 @@
package blueprint
import (
"testing"
iblueprint "github.com/osbuild/images/pkg/blueprint"
"github.com/stretchr/testify/assert"
"github.com/osbuild/osbuild-composer/internal/common"
)
func TestConvert(t *testing.T) {
tests := []struct {
name string
src Blueprint
expected iblueprint.Blueprint
}{
{
name: "empty",
src: Blueprint{},
expected: iblueprint.Blueprint{},
},
{
name: "everything",
src: Blueprint{
Name: "name",
Description: "desc",
Version: "version",
Packages: []Package{
{
Name: "package-name",
Version: "package-version",
},
},
Modules: []Package{
{
Name: "module-name",
Version: "module-version",
},
},
Groups: []Group{
{
Name: "group-name",
},
},
Containers: []Container{
{
Source: "source",
Name: "name",
TLSVerify: common.ToPtr(true),
},
},
Customizations: &Customizations{
Hostname: common.ToPtr("hostname"),
Kernel: &KernelCustomization{
Name: "kernel-name",
Append: "kernel-append",
},
SSHKey: []SSHKeyCustomization{
{
User: "ssh-user",
Key: "ssh-key",
},
},
User: []UserCustomization{
{
Name: "user-name",
Description: common.ToPtr("user-desc"),
Password: common.ToPtr("user-password"),
Key: common.ToPtr("user-key"),
Home: common.ToPtr("/home/user"),
Shell: common.ToPtr("fish"),
Groups: []string{"wheel"},
UID: common.ToPtr(42),
GID: common.ToPtr(2023),
},
},
Group: []GroupCustomization{
{
Name: "group",
GID: common.ToPtr(7),
},
},
Timezone: &TimezoneCustomization{
Timezone: common.ToPtr("timezone"),
NTPServers: []string{"ntp-server"},
},
Locale: &LocaleCustomization{
Languages: []string{"language"},
Keyboard: common.ToPtr("keyboard"),
},
Firewall: &FirewallCustomization{
Ports: []string{"80"},
Services: &FirewallServicesCustomization{
Enabled: []string{"ssh"},
Disabled: []string{"ntp"},
},
Zones: []FirewallZoneCustomization{
{
Name: common.ToPtr("name"),
Sources: []string{"src"},
},
},
},
Services: &ServicesCustomization{
Enabled: []string{"osbuild-composer.service"},
Disabled: []string{"lorax-composer.service"},
},
Filesystem: []FilesystemCustomization{
{
Mountpoint: "/usr",
MinSize: 1024,
},
},
Disk: &DiskCustomization{
MinSize: 10240,
Type: "gpt",
Partitions: []PartitionCustomization{
{
// this partition is invalid, since only one of
// btrfs, vg, or filesystem should be set, but
// the converter copies everything
// unconditionally, so let's test the full
// thing
Type: "plain",
MinSize: 1024,
PartType: "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
BtrfsVolumeCustomization: BtrfsVolumeCustomization{
Subvolumes: []BtrfsSubvolumeCustomization{
{
Name: "subvol1",
Mountpoint: "/subvol1",
},
{
Name: "subvol2",
Mountpoint: "/subvol2",
},
},
},
VGCustomization: VGCustomization{
Name: "vg1",
LogicalVolumes: []LVCustomization{
{
Name: "vg1lv1",
MinSize: 0,
FilesystemTypedCustomization: FilesystemTypedCustomization{
Mountpoint: "/one",
Label: "one",
FSType: "xfs",
},
},
{
Name: "vg1lv2",
MinSize: 0,
FilesystemTypedCustomization: FilesystemTypedCustomization{
Mountpoint: "/two",
Label: "two",
FSType: "ext4",
},
},
},
},
FilesystemTypedCustomization: FilesystemTypedCustomization{
Mountpoint: "/root",
Label: "roothome",
FSType: "xfs",
},
},
{
Type: "plain",
MinSize: 1024,
FilesystemTypedCustomization: FilesystemTypedCustomization{
Mountpoint: "/root",
Label: "roothome",
FSType: "xfs",
},
},
{
Type: "lvm",
MinSize: 1024,
VGCustomization: VGCustomization{
Name: "vg1",
LogicalVolumes: []LVCustomization{
{
Name: "vg1lv1",
MinSize: 0,
FilesystemTypedCustomization: FilesystemTypedCustomization{
Mountpoint: "/one",
Label: "one",
FSType: "xfs",
},
},
{
Name: "vg1lv2",
MinSize: 0,
FilesystemTypedCustomization: FilesystemTypedCustomization{
Mountpoint: "/two",
Label: "two",
FSType: "ext4",
},
},
},
},
},
{
Type: "btrfs",
MinSize: 1024,
BtrfsVolumeCustomization: BtrfsVolumeCustomization{
Subvolumes: []BtrfsSubvolumeCustomization{
{
Name: "subvol1",
Mountpoint: "/subvol1",
},
{
Name: "subvol2",
Mountpoint: "/subvol2",
},
},
},
},
},
},
InstallationDevice: "/dev/sda",
FDO: &FDOCustomization{
ManufacturingServerURL: "http://manufacturing.fdo",
DiunPubKeyInsecure: "insecure-pubkey",
DiunPubKeyHash: "hash-pubkey",
DiunPubKeyRootCerts: "root-certs",
DiMfgStringTypeMacIface: "iface",
},
OpenSCAP: &OpenSCAPCustomization{
DataStream: "stream",
ProfileID: "profile",
Tailoring: &OpenSCAPTailoringCustomizations{
Selected: []string{"cloth"},
Unselected: []string{"leather"},
},
},
Ignition: &IgnitionCustomization{
Embedded: &EmbeddedIgnitionCustomization{
Config: "ignition-config",
},
FirstBoot: &FirstBootIgnitionCustomization{
ProvisioningURL: "http://provisioning.edge",
},
},
Directories: []DirectoryCustomization{
{
Path: "/dir",
User: common.ToPtr("dir-user"),
Group: common.ToPtr("dir-group"),
Mode: "0777",
EnsureParents: true,
},
},
Files: []FileCustomization{
{
Path: "/file",
User: common.ToPtr("file-user`"),
Group: common.ToPtr("file-group"),
Mode: "0755",
Data: "literal easter egg",
},
},
Repositories: []RepositoryCustomization{
{
Id: "repoid",
BaseURLs: []string{"http://baseurl"},
GPGKeys: []string{"repo-gpgkey"},
Metalink: "http://metalink",
Mirrorlist: "http://mirrorlist",
Name: "reponame",
Priority: common.ToPtr(987),
Enabled: common.ToPtr(true),
GPGCheck: common.ToPtr(true),
RepoGPGCheck: common.ToPtr(true),
SSLVerify: common.ToPtr(true),
Filename: "repofile",
},
},
Installer: &InstallerCustomization{
Unattended: true,
SudoNopasswd: []string{"%group", "user"},
Kickstart: &Kickstart{
Contents: "# test kickstart addition created by osbuild-composer",
},
Modules: &AnacondaModules{
Enable: []string{
"org.fedoraproject.Anaconda.Modules.Localization",
"org.fedoraproject.Anaconda.Modules.Users",
},
Disable: []string{
"org.fedoraproject.Anaconda.Modules.Network",
},
},
},
RPM: &RPMCustomization{
ImportKeys: &RPMImportKeys{
Files: []string{"/root/gpg-key"},
},
},
RHSM: &RHSMCustomization{
Config: &RHSMConfig{
DNFPlugins: &SubManDNFPluginsConfig{
ProductID: &DNFPluginConfig{
Enabled: common.ToPtr(true),
},
SubscriptionManager: &DNFPluginConfig{
Enabled: common.ToPtr(false),
},
},
SubscriptionManager: &SubManConfig{
RHSMConfig: &SubManRHSMConfig{
ManageRepos: common.ToPtr(true),
},
RHSMCertdConfig: &SubManRHSMCertdConfig{
AutoRegistration: common.ToPtr(false),
},
},
},
},
CACerts: &CACustomization{
PEMCerts: []string{"pem-cert"},
},
},
Distro: "distro",
},
expected: iblueprint.Blueprint{
Name: "name",
Description: "desc",
Version: "version",
Packages: []iblueprint.Package{
{
Name: "package-name",
Version: "package-version",
},
},
Modules: []iblueprint.Package{
{
Name: "module-name",
Version: "module-version",
},
},
Groups: []iblueprint.Group{
{
Name: "group-name",
},
},
Containers: []iblueprint.Container{
{
Source: "source",
Name: "name",
TLSVerify: common.ToPtr(true),
},
},
Customizations: &iblueprint.Customizations{
Hostname: common.ToPtr("hostname"),
Kernel: &iblueprint.KernelCustomization{
Name: "kernel-name",
Append: "kernel-append",
},
User: []iblueprint.UserCustomization{
{
Name: "ssh-user", // converted from sshkey
Key: common.ToPtr("ssh-key"),
},
{
Name: "user-name",
Description: common.ToPtr("user-desc"),
Password: common.ToPtr("user-password"),
Key: common.ToPtr("user-key"),
Home: common.ToPtr("/home/user"),
Shell: common.ToPtr("fish"),
Groups: []string{"wheel"},
UID: common.ToPtr(42),
GID: common.ToPtr(2023),
},
},
Group: []iblueprint.GroupCustomization{
{
Name: "group",
GID: common.ToPtr(7),
},
},
Timezone: &iblueprint.TimezoneCustomization{
Timezone: common.ToPtr("timezone"),
NTPServers: []string{"ntp-server"},
},
Locale: &iblueprint.LocaleCustomization{
Languages: []string{"language"},
Keyboard: common.ToPtr("keyboard"),
},
Firewall: &iblueprint.FirewallCustomization{
Ports: []string{"80"},
Services: &iblueprint.FirewallServicesCustomization{
Enabled: []string{"ssh"},
Disabled: []string{"ntp"},
},
Zones: []iblueprint.FirewallZoneCustomization{
{
Name: common.ToPtr("name"),
Sources: []string{"src"},
},
},
},
Services: &iblueprint.ServicesCustomization{
Enabled: []string{"osbuild-composer.service"},
Disabled: []string{"lorax-composer.service"},
},
Filesystem: []iblueprint.FilesystemCustomization{
{
Mountpoint: "/usr",
MinSize: 1024,
},
},
Disk: &iblueprint.DiskCustomization{
MinSize: 10240,
Type: "gpt",
Partitions: []iblueprint.PartitionCustomization{
{
// this partition is invalid, since only one of
// btrfs, vg, or filesystem should be set, but
// the converter copies everything
// unconditionally, so let's test the full
// thing
Type: "plain",
MinSize: 1024,
PartType: "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
BtrfsVolumeCustomization: iblueprint.BtrfsVolumeCustomization{
Subvolumes: []iblueprint.BtrfsSubvolumeCustomization{
{
Name: "subvol1",
Mountpoint: "/subvol1",
},
{
Name: "subvol2",
Mountpoint: "/subvol2",
},
},
},
VGCustomization: iblueprint.VGCustomization{
Name: "vg1",
LogicalVolumes: []iblueprint.LVCustomization{
{
Name: "vg1lv1",
MinSize: 0,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization{
Mountpoint: "/one",
Label: "one",
FSType: "xfs",
},
},
{
Name: "vg1lv2",
MinSize: 0,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization{
Mountpoint: "/two",
Label: "two",
FSType: "ext4",
},
},
},
},
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization{
Mountpoint: "/root",
Label: "roothome",
FSType: "xfs",
},
},
{
Type: "plain",
MinSize: 1024,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization{
Mountpoint: "/root",
Label: "roothome",
FSType: "xfs",
},
},
{
Type: "lvm",
MinSize: 1024,
VGCustomization: iblueprint.VGCustomization{
Name: "vg1",
LogicalVolumes: []iblueprint.LVCustomization{
{
Name: "vg1lv1",
MinSize: 0,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization{
Mountpoint: "/one",
Label: "one",
FSType: "xfs",
},
},
{
Name: "vg1lv2",
MinSize: 0,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization{
Mountpoint: "/two",
Label: "two",
FSType: "ext4",
},
},
},
},
},
{
Type: "btrfs",
MinSize: 1024,
BtrfsVolumeCustomization: iblueprint.BtrfsVolumeCustomization{
Subvolumes: []iblueprint.BtrfsSubvolumeCustomization{
{
Name: "subvol1",
Mountpoint: "/subvol1",
},
{
Name: "subvol2",
Mountpoint: "/subvol2",
},
},
},
},
},
},
InstallationDevice: "/dev/sda",
FDO: &iblueprint.FDOCustomization{
ManufacturingServerURL: "http://manufacturing.fdo",
DiunPubKeyInsecure: "insecure-pubkey",
DiunPubKeyHash: "hash-pubkey",
DiunPubKeyRootCerts: "root-certs",
DiMfgStringTypeMacIface: "iface",
},
OpenSCAP: &iblueprint.OpenSCAPCustomization{
DataStream: "stream",
ProfileID: "profile",
Tailoring: &iblueprint.OpenSCAPTailoringCustomizations{
Selected: []string{"cloth"},
Unselected: []string{"leather"},
},
},
Ignition: &iblueprint.IgnitionCustomization{
Embedded: &iblueprint.EmbeddedIgnitionCustomization{
Config: "ignition-config",
},
FirstBoot: &iblueprint.FirstBootIgnitionCustomization{
ProvisioningURL: "http://provisioning.edge",
},
},
Directories: []iblueprint.DirectoryCustomization{
{
Path: "/dir",
User: common.ToPtr("dir-user"),
Group: common.ToPtr("dir-group"),
Mode: "0777",
EnsureParents: true,
},
},
Files: []iblueprint.FileCustomization{
{
Path: "/file",
User: common.ToPtr("file-user`"),
Group: common.ToPtr("file-group"),
Mode: "0755",
Data: "literal easter egg",
},
},
Repositories: []iblueprint.RepositoryCustomization{
{
Id: "repoid",
BaseURLs: []string{"http://baseurl"},
GPGKeys: []string{"repo-gpgkey"},
Metalink: "http://metalink",
Mirrorlist: "http://mirrorlist",
Name: "reponame",
Priority: common.ToPtr(987),
Enabled: common.ToPtr(true),
GPGCheck: common.ToPtr(true),
RepoGPGCheck: common.ToPtr(true),
SSLVerify: common.ToPtr(true),
Filename: "repofile",
},
},
Installer: &iblueprint.InstallerCustomization{
Unattended: true,
SudoNopasswd: []string{"%group", "user"},
Kickstart: &iblueprint.Kickstart{
Contents: "# test kickstart addition created by osbuild-composer",
},
Modules: &iblueprint.AnacondaModules{
Enable: []string{
"org.fedoraproject.Anaconda.Modules.Localization",
"org.fedoraproject.Anaconda.Modules.Users",
},
Disable: []string{
"org.fedoraproject.Anaconda.Modules.Network",
},
},
},
RPM: &iblueprint.RPMCustomization{
ImportKeys: &iblueprint.RPMImportKeys{
Files: []string{"/root/gpg-key"},
},
},
RHSM: &iblueprint.RHSMCustomization{
Config: &iblueprint.RHSMConfig{
DNFPlugins: &iblueprint.SubManDNFPluginsConfig{
ProductID: &iblueprint.DNFPluginConfig{
Enabled: common.ToPtr(true),
},
SubscriptionManager: &iblueprint.DNFPluginConfig{
Enabled: common.ToPtr(false),
},
},
SubscriptionManager: &iblueprint.SubManConfig{
RHSMConfig: &iblueprint.SubManRHSMConfig{
ManageRepos: common.ToPtr(true),
},
RHSMCertdConfig: &iblueprint.SubManRHSMCertdConfig{
AutoRegistration: common.ToPtr(false),
},
},
},
},
CACerts: &iblueprint.CACustomization{
PEMCerts: []string{"pem-cert"},
},
},
Distro: "distro",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, Convert(tt.src))
})
}
}

View file

@ -1,251 +0,0 @@
package blueprint
import (
"encoding/json"
"strings"
"testing"
"github.com/BurntSushi/toml"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBlueprintParse(t *testing.T) {
blueprint := `
name = "test"
description = "Test"
version = "0.0.0"
[[packages]]
name = "httpd"
version = "2.4.*"
[[customizations.filesystem]]
mountpoint = "/var"
size = 2147483648
[[customizations.filesystem]]
mountpoint = "/opt"
size = "20 GB"
`
var bp Blueprint
err := toml.Unmarshal([]byte(blueprint), &bp)
require.Nil(t, err)
assert.Equal(t, bp.Name, "test")
assert.Equal(t, "/var", bp.Customizations.Filesystem[0].Mountpoint)
assert.Equal(t, uint64(2147483648), bp.Customizations.Filesystem[0].MinSize)
assert.Equal(t, "/opt", bp.Customizations.Filesystem[1].Mountpoint)
assert.Equal(t, uint64(20*1000*1000*1000), bp.Customizations.Filesystem[1].MinSize)
blueprint = `{
"name": "test",
"customizations": {
"filesystem": [{
"mountpoint": "/opt",
"minsize": "20 GiB"
}]
}
}`
err = json.Unmarshal([]byte(blueprint), &bp)
require.Nil(t, err)
assert.Equal(t, bp.Name, "test")
assert.Equal(t, "/opt", bp.Customizations.Filesystem[0].Mountpoint)
assert.Equal(t, uint64(20*1024*1024*1024), bp.Customizations.Filesystem[0].MinSize)
}
func TestDeepCopy(t *testing.T) {
bpOrig := Blueprint{
Name: "deepcopy-test",
Description: "Testing DeepCopy function",
Version: "0.0.1",
Packages: []Package{
{Name: "dep-package1", Version: "*"}},
Modules: []Package{
{Name: "dep-package2", Version: "*"}},
}
bpCopy := bpOrig.DeepCopy()
require.Equalf(t, bpOrig, bpCopy, "Blueprints.DeepCopy is different from original.")
// Modify the copy
bpCopy.Packages[0].Version = "1.2.3"
require.Equalf(t, bpOrig.Packages[0].Version, "*", "Blueprint.DeepCopy failed, original modified")
// Modify the original
bpOrig.Packages[0].Version = "42.0"
require.Equalf(t, bpCopy.Packages[0].Version, "1.2.3", "Blueprint.DeepCopy failed, copy modified.")
}
func TestBlueprintInitialize(t *testing.T) {
cases := []struct {
NewBlueprint Blueprint
ExpectedError bool
}{
{Blueprint{Name: "bp-test-1", Description: "Empty version", Version: ""}, false},
{Blueprint{Name: "bp-test-2", Description: "Invalid version 1", Version: "0"}, true},
{Blueprint{Name: "bp-test-2", Description: "Invalid version 2", Version: "0.0"}, true},
{Blueprint{Name: "bp-test-3", Description: "Invalid version 3", Version: "0.0.0.0"}, true},
{Blueprint{Name: "bp-test-4", Description: "Invalid version 4", Version: "0.a.0"}, true},
{Blueprint{Name: "bp-test-5", Description: "Invalid version 5", Version: "foo"}, true},
{Blueprint{Name: "bp-test-7", Description: "Zero version", Version: "0.0.0"}, false},
{Blueprint{Name: "bp-test-8", Description: "X.Y.Z version", Version: "2.1.3"}, false},
}
for _, c := range cases {
bp := c.NewBlueprint
err := bp.Initialize()
assert.Equalf(t, (err != nil), c.ExpectedError, "Initialize(%#v) returnted an unexpected error: %#v", c.NewBlueprint, err)
}
}
func TestBumpVersion(t *testing.T) {
cases := []struct {
NewBlueprint Blueprint
OldVersion string
ExpectedVersion string
}{
{Blueprint{Name: "bp-test-1", Description: "Empty version", Version: "0.0.1"}, "", "0.0.1"},
{Blueprint{Name: "bp-test-2", Description: "Invalid version 1", Version: "0.0.1"}, "0", "0.0.1"},
{Blueprint{Name: "bp-test-3", Description: "Invalid version 2", Version: "0.0.1"}, "0.0.0.0", "0.0.1"},
{Blueprint{Name: "bp-test-4", Description: "Invalid version 3", Version: "0.0.1"}, "0.a.0", "0.0.1"},
{Blueprint{Name: "bp-test-5", Description: "Invalid version 4", Version: "0.0.1"}, "foo", "0.0.1"},
{Blueprint{Name: "bp-test-6", Description: "Invalid version 5", Version: "0.0.1"}, "0.0", "0.0.1"},
{Blueprint{Name: "bp-test-8", Description: "Same version", Version: "4.2.0"}, "4.2.0", "4.2.1"},
}
for _, c := range cases {
bp := c.NewBlueprint
err := bp.Initialize()
require.NoError(t, err)
bp.BumpVersion(c.OldVersion)
assert.Equalf(t, c.ExpectedVersion, bp.Version, "BumpVersion(%#v) is expected to return %#v, but instead returned %#v.", c.OldVersion, c.ExpectedVersion, bp.Version)
}
}
func TestGetPackages(t *testing.T) {
bp := Blueprint{
Name: "packages-test",
Description: "Testing GetPackages function",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"}},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
}
Received_packages := bp.GetPackages()
assert.ElementsMatch(t, []string{"tmux-1.2", "openssh-server", "@anaconda-tools", "kernel"}, Received_packages)
}
func TestKernelNameCustomization(t *testing.T) {
kernels := []string{"kernel", "kernel-debug", "kernel-rt"}
for _, k := range kernels {
// kernel in customizations
bp := Blueprint{
Name: "kernel-test",
Description: "Testing GetPackages function with custom Kernel",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"}},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
Customizations: &Customizations{
Kernel: &KernelCustomization{
Name: k,
},
},
}
Received_packages := bp.GetPackages()
assert.ElementsMatch(t, []string{"tmux-1.2", "openssh-server", "@anaconda-tools", k}, Received_packages)
}
for _, k := range kernels {
// kernel in packages
bp := Blueprint{
Name: "kernel-test",
Description: "Testing GetPackages function with custom Kernel",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"},
{Name: k},
},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
}
Received_packages := bp.GetPackages()
// adds default kernel as well
assert.ElementsMatch(t, []string{"tmux-1.2", k, "openssh-server", "@anaconda-tools", "kernel"}, Received_packages)
}
for _, bk := range kernels {
for _, ck := range kernels {
// all combos of both kernels
bp := Blueprint{
Name: "kernel-test",
Description: "Testing GetPackages function with custom Kernel",
Version: "0.0.1",
Packages: []Package{
{Name: "tmux", Version: "1.2"},
{Name: bk},
},
Modules: []Package{
{Name: "openssh-server", Version: "*"}},
Groups: []Group{
{Name: "anaconda-tools"}},
Customizations: &Customizations{
Kernel: &KernelCustomization{
Name: ck,
},
},
}
Received_packages := bp.GetPackages()
// both kernels are included, even if they're the same
assert.ElementsMatch(t, []string{"tmux-1.2", bk, "openssh-server", "@anaconda-tools", ck}, Received_packages)
}
}
}
// TestBlueprintPasswords check to make sure all passwords are hashed
func TestBlueprintPasswords(t *testing.T) {
blueprint := `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.user]]
name = "bart"
password = "nobodysawmedoit"
[[customizations.user]]
name = "lisa"
password = "$6$RWdHzrPfoM6BMuIP$gKYlBXQuJgP.G2j2twbOyxYjFDPUQw8Jp.gWe1WD/obX0RMyfgw5vt.Mn/tLLX4mQjaklSiIzoAW3HrVQRg4Q."
[[customizations.user]]
name = "maggie"
password = ""
`
var bp Blueprint
err := toml.Unmarshal([]byte(blueprint), &bp)
require.Nil(t, err)
require.Nil(t, bp.Initialize())
// Note: User entries are in the same order as the toml
users := bp.Customizations.GetUsers()
assert.Equal(t, "bart", users[0].Name)
assert.True(t, strings.HasPrefix(*users[0].Password, "$6$"))
assert.Equal(t, "lisa", users[1].Name)
assert.Equal(t, "$6$RWdHzrPfoM6BMuIP$gKYlBXQuJgP.G2j2twbOyxYjFDPUQw8Jp.gWe1WD/obX0RMyfgw5vt.Mn/tLLX4mQjaklSiIzoAW3HrVQRg4Q.", *users[1].Password)
assert.Equal(t, "maggie", users[2].Name)
assert.Nil(t, users[2].Password)
}

View file

@ -1,345 +0,0 @@
package blueprint
import (
"testing"
"github.com/osbuild/images/pkg/disk"
"github.com/stretchr/testify/assert"
)
func TestCheckAllowed(t *testing.T) {
Desc := "Test descritpion"
Pass := "testpass"
Key := "testkey"
Home := "Home"
Shell := "Shell"
Groups := []string{
"Group",
}
UID := 123
GID := 321
expectedUsers := []UserCustomization{
{
Name: "John",
Description: &Desc,
Password: &Pass,
Key: &Key,
Home: &Home,
Shell: &Shell,
Groups: Groups,
UID: &UID,
GID: &GID,
},
}
expectedHostname := "Hostname"
x := Customizations{Hostname: &expectedHostname, User: expectedUsers}
err := x.CheckAllowed("Hostname", "User")
assert.NoError(t, err)
// "User" not allowed anymore
err = x.CheckAllowed("Hostname")
assert.Error(t, err)
// "Hostname" not allowed anymore
err = x.CheckAllowed("User")
assert.Error(t, err)
}
func TestGetHostname(t *testing.T) {
expectedHostname := "Hostname"
TestCustomizations := Customizations{
Hostname: &expectedHostname,
}
retHostname := TestCustomizations.GetHostname()
assert.Equal(t, &expectedHostname, retHostname)
}
func TestGetKernel(t *testing.T) {
expectedKernel := KernelCustomization{
Append: "--test",
Name: "kernel",
}
TestCustomizations := Customizations{
Kernel: &expectedKernel,
}
retKernel := TestCustomizations.GetKernel()
assert.Equal(t, &expectedKernel, retKernel)
}
func TestSSHKey(t *testing.T) {
expectedSSHKeys := []SSHKeyCustomization{
{
User: "test-user",
Key: "test-key",
},
}
TestCustomizations := Customizations{
SSHKey: expectedSSHKeys,
}
retUser := TestCustomizations.GetUsers()[0].Name
retKey := *TestCustomizations.GetUsers()[0].Key
assert.Equal(t, expectedSSHKeys[0].User, retUser)
assert.Equal(t, expectedSSHKeys[0].Key, retKey)
}
func TestGetUsers(t *testing.T) {
Desc := "Test descritpion"
Pass := "testpass"
Key := "testkey"
Home := "Home"
Shell := "Shell"
Groups := []string{
"Group",
}
UID := 123
GID := 321
ExpireDate := 12345
expectedUsers := []UserCustomization{
{
Name: "John",
Description: &Desc,
Password: &Pass,
Key: &Key,
Home: &Home,
Shell: &Shell,
Groups: Groups,
UID: &UID,
GID: &GID,
ExpireDate: &ExpireDate,
},
}
TestCustomizations := Customizations{
User: expectedUsers,
}
retUsers := TestCustomizations.GetUsers()
assert.ElementsMatch(t, expectedUsers, retUsers)
}
func TestGetGroups(t *testing.T) {
GID := 1234
expectedGroups := []GroupCustomization{
{
Name: "TestGroup",
GID: &GID,
},
}
TestCustomizations := Customizations{
Group: expectedGroups,
}
retGroups := TestCustomizations.GetGroups()
assert.ElementsMatch(t, expectedGroups, retGroups)
}
func TestGetTimezoneSettings(t *testing.T) {
expectedTimezone := "testZONE"
expectedNTPServers := []string{
"server",
}
expectedTimezoneCustomization := TimezoneCustomization{
Timezone: &expectedTimezone,
NTPServers: expectedNTPServers,
}
TestCustomizations := Customizations{
Timezone: &expectedTimezoneCustomization,
}
retTimezone, retNTPServers := TestCustomizations.GetTimezoneSettings()
assert.Equal(t, expectedTimezone, *retTimezone)
assert.Equal(t, expectedNTPServers, retNTPServers)
}
func TestGetPrimaryLocale(t *testing.T) {
expectedLanguages := []string{
"enUS",
}
expectedKeyboard := "en"
expectedLocaleCustomization := LocaleCustomization{
Languages: expectedLanguages,
Keyboard: &expectedKeyboard,
}
TestCustomizations := Customizations{
Locale: &expectedLocaleCustomization,
}
retLanguage, retKeyboard := TestCustomizations.GetPrimaryLocale()
assert.Equal(t, expectedLanguages[0], *retLanguage)
assert.Equal(t, expectedKeyboard, *retKeyboard)
}
func TestGetFirewall(t *testing.T) {
expectedPorts := []string{"22", "9090"}
expectedServices := FirewallServicesCustomization{
Enabled: []string{"cockpit", "osbuild-composer"},
Disabled: []string{"TCP", "httpd"},
}
expectedFirewall := FirewallCustomization{
Ports: expectedPorts,
Services: &expectedServices,
}
TestCustomizations := Customizations{
Firewall: &expectedFirewall,
}
retFirewall := TestCustomizations.GetFirewall()
assert.ElementsMatch(t, expectedFirewall.Ports, retFirewall.Ports)
assert.ElementsMatch(t, expectedFirewall.Services.Enabled, retFirewall.Services.Enabled)
assert.ElementsMatch(t, expectedFirewall.Services.Disabled, retFirewall.Services.Disabled)
}
func TestGetServices(t *testing.T) {
expectedServices := ServicesCustomization{
Enabled: []string{"cockpit", "osbuild-composer"},
Disabled: []string{"sshd", "ftp"},
Masked: []string{"firewalld"},
}
TestCustomizations := Customizations{
Services: &expectedServices,
}
retServices := TestCustomizations.GetServices()
assert.ElementsMatch(t, expectedServices.Enabled, retServices.Enabled)
assert.ElementsMatch(t, expectedServices.Disabled, retServices.Disabled)
assert.ElementsMatch(t, expectedServices.Masked, retServices.Masked)
}
func TestError(t *testing.T) {
expectedError := CustomizationError{
Message: "test error",
}
retError := expectedError.Error()
assert.Equal(t, expectedError.Message, retError)
}
// This tests calling all the functions on a Blueprint with no Customizations
func TestNoCustomizationsInBlueprint(t *testing.T) {
TestBP := Blueprint{}
assert.Nil(t, TestBP.Customizations.GetHostname())
assert.Nil(t, TestBP.Customizations.GetUsers())
assert.Nil(t, TestBP.Customizations.GetGroups())
assert.Equal(t, &KernelCustomization{Name: "kernel"}, TestBP.Customizations.GetKernel())
assert.Nil(t, TestBP.Customizations.GetFirewall())
assert.Nil(t, TestBP.Customizations.GetServices())
nilLanguage, nilKeyboard := TestBP.Customizations.GetPrimaryLocale()
assert.Nil(t, nilLanguage)
assert.Nil(t, nilKeyboard)
nilTimezone, nilNTPServers := TestBP.Customizations.GetTimezoneSettings()
assert.Nil(t, nilTimezone)
assert.Nil(t, nilNTPServers)
}
// This tests additional scenarios where GetPrimaryLocale() returns nil values
func TestNilGetPrimaryLocale(t *testing.T) {
// Case empty Customization
TestCustomizationsEmpty := Customizations{}
retLanguage, retKeyboard := TestCustomizationsEmpty.GetPrimaryLocale()
assert.Nil(t, retLanguage)
assert.Nil(t, retKeyboard)
// Case empty Languages
expectedKeyboard := "en"
expectedLocaleCustomization := LocaleCustomization{
Keyboard: &expectedKeyboard,
}
TestCustomizations := Customizations{
Locale: &expectedLocaleCustomization,
}
retLanguage, retKeyboard = TestCustomizations.GetPrimaryLocale()
assert.Nil(t, retLanguage)
assert.Equal(t, expectedKeyboard, *retKeyboard)
}
// This tests additional scenario where GetTimezoneSEtting() returns nil values
func TestNilGetTimezoneSettings(t *testing.T) {
TestCustomizationsEmpty := Customizations{}
retTimezone, retNTPServers := TestCustomizationsEmpty.GetTimezoneSettings()
assert.Nil(t, retTimezone)
assert.Nil(t, retNTPServers)
}
func TestGetOpenSCAPConfig(t *testing.T) {
expectedOscap := OpenSCAPCustomization{
DataStream: "test-data-stream.xml",
ProfileID: "test_profile",
}
TestCustomizations := Customizations{
OpenSCAP: &expectedOscap,
}
retOpenSCAPCustomiztions := TestCustomizations.GetOpenSCAP()
assert.EqualValues(t, expectedOscap, *retOpenSCAPCustomiztions)
}
func TestGetPartitioningMode(t *testing.T) {
// No customizations returns Default which is actually AutoLVM,
// but that is handled by the images code
var c *Customizations
pm, err := c.GetPartitioningMode()
assert.NoError(t, err)
assert.Equal(t, disk.DefaultPartitioningMode, pm)
// Empty defaults to Default which is actually AutoLVM,
// but that is handled by the images code
c = &Customizations{}
_, err = c.GetPartitioningMode()
assert.NoError(t, err)
assert.Equal(t, disk.DefaultPartitioningMode, pm)
// Unknown mode returns an error
c = &Customizations{
PartitioningMode: "all-of-them",
}
_, err = c.GetPartitioningMode()
assert.Error(t, err)
// And a known mode returns the correct type
c = &Customizations{
PartitioningMode: "lvm",
}
pm, err = c.GetPartitioningMode()
assert.NoError(t, err)
assert.Equal(t, disk.LVMPartitioningMode, pm)
}

View file

@ -1,363 +0,0 @@
package blueprint
import (
"bytes"
"encoding/json"
"fmt"
"github.com/osbuild/images/pkg/datasizes"
)
type DiskCustomization struct {
Type string
MinSize uint64
Partitions []PartitionCustomization
}
type diskCustomizationMarshaler struct {
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"partitions,omitempty"`
}
func (dc *DiskCustomization) UnmarshalJSON(data []byte) error {
var dcm diskCustomizationMarshaler
if err := json.Unmarshal(data, &dcm); err != nil {
return err
}
dc.MinSize = dcm.MinSize.Uint64()
dc.Partitions = dcm.Partitions
return nil
}
func (dc *DiskCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(dc, data)
}
// PartitionCustomization defines a single partition on a disk. The Type
// defines the kind of "payload" for the partition: plain, lvm, or btrfs.
// - plain: the payload will be a filesystem on a partition (e.g. xfs, ext4).
// See [FilesystemTypedCustomization] for extra fields.
// - lvm: the payload will be an LVM volume group. See [VGCustomization] for
// extra fields
// - btrfs: the payload will be a btrfs volume. See
// [BtrfsVolumeCustomization] for extra fields.
type PartitionCustomization struct {
// The type of payload for the partition (optional, defaults to "plain").
Type string `json:"type" toml:"type"`
// Minimum size of the partition that contains the filesystem (for "plain"
// filesystem), volume group ("lvm"), or btrfs volume ("btrfs"). The final
// size of the partition will be larger than the minsize if the sum of the
// contained volumes (logical volumes or subvolumes) is larger. In
// addition, certain mountpoints have required minimum sizes. See
// https://osbuild.org/docs/user-guide/partitioning for more details.
// (optional, defaults depend on payload and mountpoints).
MinSize uint64 `json:"minsize" toml:"minsize"`
// The partition type GUID for GPT partitions. For DOS partitions, this
// field can be used to set the (2 hex digit) partition type.
// If not set, the type will be automatically set based on the mountpoint
// or the payload type.
PartType string `json:"part_type,omitempty" toml:"part_type,omitempty"`
BtrfsVolumeCustomization
VGCustomization
FilesystemTypedCustomization
}
// A filesystem on a plain partition or LVM logical volume.
// Note the differences from [FilesystemCustomization]:
// - Adds a label.
// - Adds a filesystem type (fs_type).
// - Does not define a size. The size is defined by its container: a
// partition ([PartitionCustomization]) or LVM logical volume
// ([LVCustomization]).
//
// Setting the FSType to "swap" creates a swap area (and the Mountpoint must be
// empty).
type FilesystemTypedCustomization struct {
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
Label string `json:"label,omitempty" toml:"label,omitempty"`
FSType string `json:"fs_type,omitempty" toml:"fs_type,omitempty"`
}
// An LVM volume group with one or more logical volumes.
type VGCustomization struct {
// Volume group name (optional, default will be automatically generated).
Name string `json:"name,omitempty" toml:"name,omitempty"`
LogicalVolumes []LVCustomization `json:"logical_volumes,omitempty" toml:"logical_volumes,omitempty"`
}
type LVCustomization struct {
// Logical volume name
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Minimum size of the logical volume
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
// Custom JSON unmarshaller for LVCustomization for handling the conversion of
// data sizes (minsize) expressed as strings to uint64.
func (lv *LVCustomization) UnmarshalJSON(data []byte) error {
var lvAnySize struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
MinSize any `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
if err := json.Unmarshal(data, &lvAnySize); err != nil {
return err
}
lv.Name = lvAnySize.Name
lv.FilesystemTypedCustomization = lvAnySize.FilesystemTypedCustomization
if lvAnySize.MinSize == nil {
return fmt.Errorf("minsize is required")
}
size, err := decodeSize(lvAnySize.MinSize)
if err != nil {
return err
}
lv.MinSize = size
return nil
}
// A btrfs volume consisting of one or more subvolumes.
type BtrfsVolumeCustomization struct {
Subvolumes []BtrfsSubvolumeCustomization `json:"subvolumes,omitempty" toml:"subvolumes,omitempty"`
}
type BtrfsSubvolumeCustomization struct {
// The name of the subvolume, which defines the location (path) on the
// root volume (required).
// See https://btrfs.readthedocs.io/en/latest/Subvolumes.html
Name string `json:"name" toml:"name"`
// Mountpoint for the subvolume.
Mountpoint string `json:"mountpoint" toml:"mountpoint"`
}
// Custom JSON unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalJSON(data []byte) error {
errPrefix := "JSON unmarshal:"
var typeSniffer struct {
Type string `json:"type"`
MinSize any `json:"minsize"`
}
if err := json.Unmarshal(data, &typeSniffer); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
partType := "plain"
if typeSniffer.Type != "" {
partType = typeSniffer.Type
}
switch partType {
case "plain":
if err := decodePlain(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
if typeSniffer.MinSize == nil {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(typeSniffer.MinSize)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// decodePlain decodes the data into a struct that only embeds the
// FilesystemCustomization with DisallowUnknownFields. This ensures that when
// the type is "plain", none of the fields for btrfs or lvm are used.
func decodePlain(v *PartitionCustomization, data []byte) error {
var plain struct {
// Type and minsize are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
FilesystemTypedCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&plain)
if err != nil {
return fmt.Errorf("error decoding partition with type \"plain\": %w", err)
}
v.FilesystemTypedCustomization = plain.FilesystemTypedCustomization
return nil
}
// decodeBtrfs decodes the data into a struct that only embeds the
// BtrfsVolumeCustomization with DisallowUnknownFields. This ensures that when
// the type is btrfs, none of the fields for plain or lvm are used.
func decodeBtrfs(v *PartitionCustomization, data []byte) error {
var btrfs struct {
// Type and minsize are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
BtrfsVolumeCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&btrfs)
if err != nil {
return fmt.Errorf("error decoding partition with type \"btrfs\": %w", err)
}
v.BtrfsVolumeCustomization = btrfs.BtrfsVolumeCustomization
return nil
}
// decodeLVM decodes the data into a struct that only embeds the
// VGCustomization with DisallowUnknownFields. This ensures that when the type
// is lvm, none of the fields for plain or btrfs are used.
func decodeLVM(v *PartitionCustomization, data []byte) error {
var vg struct {
// Type and minsize are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
VGCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&vg); err != nil {
return fmt.Errorf("error decoding partition with type \"lvm\": %w", err)
}
v.VGCustomization = vg.VGCustomization
return nil
}
// Custom TOML unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalTOML(data any) error {
errPrefix := "TOML unmarshal:"
d, ok := data.(map[string]any)
if !ok {
return fmt.Errorf("%s customizations.partition is not an object", errPrefix)
}
partType := "plain"
if typeField, ok := d["type"]; ok {
typeStr, ok := typeField.(string)
if !ok {
return fmt.Errorf("%s type must be a string, got \"%v\" of type %T", errPrefix, typeField, typeField)
}
partType = typeStr
}
// serialise the data to JSON and reuse the subobject decoders
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("%s error while decoding partition customization: %w", errPrefix, err)
}
switch partType {
case "plain":
if err := decodePlain(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
minsizeField, ok := d["minsize"]
if !ok {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(minsizeField)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
func unmarshalTOMLviaJSON(u json.Unmarshaler, data any) error {
// This is the most efficient way to reuse code when unmarshaling
// structs in toml, it leaks json errors which is a bit sad but
// because the toml unmarshaler gives us not "[]byte" but an
// already pre-processed "any" we cannot just unmarshal into our
// "fooMarshaling" struct and reuse the result so we resort to
// this workaround (but toml will go away long term anyway).
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("error unmarshaling TOML data %v: %w", data, err)
}
if err := u.UnmarshalJSON(dataJSON); err != nil {
return fmt.Errorf("error decoding TOML %v: %w", data, err)
}
return nil
}
// decodeSize takes an integer or string representing a data size (with a data
// suffix) and returns the uint64 representation.
func decodeSize(size any) (uint64, error) {
switch s := size.(type) {
case string:
return datasizes.Parse(s)
case int64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(s), nil
case float64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
// TODO: emit warning of possible truncation?
return uint64(s), nil
case uint64:
return s, nil
default:
return 0, fmt.Errorf("failed to convert value \"%v\" to number", size)
}
}

View file

@ -1,159 +0,0 @@
package blueprint
import (
"testing"
"github.com/BurntSushi/toml"
"github.com/stretchr/testify/assert"
)
func TestGetFilesystems(t *testing.T) {
expectedFilesystems := []FilesystemCustomization{
{
MinSize: 1024,
Mountpoint: "/",
},
}
TestCustomizations := Customizations{
Filesystem: expectedFilesystems,
}
retFilesystems := TestCustomizations.GetFilesystems()
assert.ElementsMatch(t, expectedFilesystems, retFilesystems)
}
func TestGetFilesystemsMinSize(t *testing.T) {
expectedFilesystems := []FilesystemCustomization{
{
MinSize: 1024,
Mountpoint: "/",
},
{
MinSize: 4096,
Mountpoint: "/var",
},
}
TestCustomizations := Customizations{
Filesystem: expectedFilesystems,
}
retFilesystemsSize := TestCustomizations.GetFilesystemsMinSize()
assert.EqualValues(t, uint64(5120), retFilesystemsSize)
}
func TestGetFilesystemsMinSizeNonSectorSize(t *testing.T) {
expectedFilesystems := []FilesystemCustomization{
{
MinSize: 1025,
Mountpoint: "/",
},
{
MinSize: 4097,
Mountpoint: "/var",
},
}
TestCustomizations := Customizations{
Filesystem: expectedFilesystems,
}
retFilesystemsSize := TestCustomizations.GetFilesystemsMinSize()
assert.EqualValues(t, uint64(5632), retFilesystemsSize)
}
func TestGetFilesystemsMinSizeTOML(t *testing.T) {
tests := []struct {
Name string
TOML string
Want []FilesystemCustomization
Error bool
}{
{
Name: "size set, no minsize",
TOML: `
[[customizations.filesystem]]
mountpoint = "/var"
size = 1024
`,
Want: []FilesystemCustomization{{MinSize: 1024, Mountpoint: "/var"}},
Error: false,
},
{
Name: "size set (string), no minsize",
TOML: `
[[customizations.filesystem]]
mountpoint = "/var"
size = "1KiB"
`,
Want: []FilesystemCustomization{{MinSize: 1024, Mountpoint: "/var"}},
Error: false,
},
{
Name: "minsize set, no size",
TOML: `
[[customizations.filesystem]]
mountpoint = "/var"
minsize = 1024
`,
Want: []FilesystemCustomization{{MinSize: 1024, Mountpoint: "/var"}},
Error: false,
},
{
Name: "minsize set (string), no size",
TOML: `
[[customizations.filesystem]]
mountpoint = "/var"
minsize = "1KiB"
`,
Want: []FilesystemCustomization{{MinSize: 1024, Mountpoint: "/var"}},
Error: false,
},
{
Name: "size and minsize set",
TOML: `
[[customizations.filesystem]]
mountpoint = "/var"
size = 1024
minsize = 1024
`,
Want: []FilesystemCustomization{},
Error: true,
},
{
Name: "size and minsize not set",
TOML: `
[[customizations.filesystem]]
mountpoint = "/var"
`,
Want: []FilesystemCustomization{},
Error: true,
},
}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
var blueprint Blueprint
err := toml.Unmarshal([]byte(tt.TOML), &blueprint)
if tt.Error {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, blueprint.Customizations)
assert.Equal(t, tt.Want, blueprint.Customizations.Filesystem)
}
})
}
}

View file

@ -1,977 +0,0 @@
package blueprint
import (
"encoding/json"
"os"
"testing"
"github.com/BurntSushi/toml"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/fsnode"
"github.com/stretchr/testify/assert"
)
func TestDirectoryCustomizationToFsNodeDirectory(t *testing.T) {
ensureDirCreation := func(dir *fsnode.Directory, err error) *fsnode.Directory {
t.Helper()
assert.NoError(t, err)
assert.NotNil(t, dir)
return dir
}
testCases := []struct {
Name string
Dir DirectoryCustomization
WantDir *fsnode.Directory
Error bool
}{
{
Name: "empty",
Dir: DirectoryCustomization{},
Error: true,
},
{
Name: "path-only",
Dir: DirectoryCustomization{
Path: "/etc/dir",
},
WantDir: ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, nil, nil, false)),
},
{
Name: "path-invalid",
Dir: DirectoryCustomization{
Path: "etc/dir",
},
Error: true,
},
{
Name: "path-and-mode",
Dir: DirectoryCustomization{
Path: "/etc/dir",
Mode: "0700",
},
WantDir: ensureDirCreation(fsnode.NewDirectory("/etc/dir", common.ToPtr(os.FileMode(0700)), nil, nil, false)),
},
{
Name: "path-and-mode-no-leading-zero",
Dir: DirectoryCustomization{
Path: "/etc/dir",
Mode: "700",
},
WantDir: ensureDirCreation(fsnode.NewDirectory("/etc/dir", common.ToPtr(os.FileMode(0700)), nil, nil, false)),
},
{
Name: "path-and-mode-invalid",
Dir: DirectoryCustomization{
Path: "/etc/dir",
Mode: "12345",
},
Error: true,
},
{
Name: "path-user-group-string",
Dir: DirectoryCustomization{
Path: "/etc/dir",
User: "root",
Group: "root",
},
WantDir: ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, "root", "root", false)),
},
{
Name: "path-user-group-int64",
Dir: DirectoryCustomization{
Path: "/etc/dir",
User: int64(0),
Group: int64(0),
},
WantDir: ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, int64(0), int64(0), false)),
},
{
Name: "path-and-user-invalid-string",
Dir: DirectoryCustomization{
Path: "/etc/dir",
User: "r@@t",
},
Error: true,
},
{
Name: "path-and-user-invalid-int64",
Dir: DirectoryCustomization{
Path: "/etc/dir",
User: -1,
},
Error: true,
},
{
Name: "path-and-group-invalid-string",
Dir: DirectoryCustomization{
Path: "/etc/dir",
Group: "r@@t",
},
Error: true,
},
{
Name: "path-and-group-invalid-int64",
Dir: DirectoryCustomization{
Path: "/etc/dir",
Group: -1,
},
Error: true,
},
{
Name: "path-and-ensure-parent-dirs",
Dir: DirectoryCustomization{
Path: "/etc/dir",
EnsureParents: true,
},
WantDir: ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, nil, nil, true)),
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
dir, err := tc.Dir.ToFsNodeDirectory()
if tc.Error {
assert.Error(t, err)
assert.Nil(t, dir)
} else {
assert.NoError(t, err)
assert.EqualValues(t, tc.WantDir, dir)
}
})
}
}
func TestDirectoryCustomizationsToFsNodeDirectories(t *testing.T) {
ensureDirCreation := func(dir *fsnode.Directory, err error) *fsnode.Directory {
t.Helper()
assert.NoError(t, err)
assert.NotNil(t, dir)
return dir
}
testCases := []struct {
Name string
Dirs []DirectoryCustomization
WantDirs []*fsnode.Directory
Error bool
}{
{
Name: "empty",
Dirs: []DirectoryCustomization{},
WantDirs: nil,
},
{
Name: "single-directory",
Dirs: []DirectoryCustomization{
{
Path: "/etc/dir",
User: "root",
Group: "root",
Mode: "0700",
EnsureParents: true,
},
},
WantDirs: []*fsnode.Directory{
ensureDirCreation(fsnode.NewDirectory(
"/etc/dir",
common.ToPtr(os.FileMode(0700)),
"root",
"root",
true,
)),
},
},
{
Name: "multiple-directories",
Dirs: []DirectoryCustomization{
{
Path: "/etc/dir",
User: "root",
Group: "root",
},
{
Path: "/etc/dir2",
User: int64(0),
Group: int64(0),
},
},
WantDirs: []*fsnode.Directory{
ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, "root", "root", false)),
ensureDirCreation(fsnode.NewDirectory("/etc/dir2", nil, int64(0), int64(0), false)),
},
},
{
Name: "multiple-directories-with-errors",
Dirs: []DirectoryCustomization{
{
Path: "/etc/../dir",
},
{
Path: "/etc/dir2",
User: "r@@t",
},
},
Error: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
dirs, err := DirectoryCustomizationsToFsNodeDirectories(tc.Dirs)
if tc.Error {
assert.Error(t, err)
assert.Nil(t, dirs)
} else {
assert.NoError(t, err)
assert.EqualValues(t, tc.WantDirs, dirs)
}
})
}
}
func TestDirectoryCustomizationUnmarshalTOML(t *testing.T) {
testCases := []struct {
Name string
TOML string
Want []DirectoryCustomization
Error bool
}{
{
Name: "directory-with-path",
TOML: `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.directories]]
path = "/etc/dir"
`,
Want: []DirectoryCustomization{
{
Path: "/etc/dir",
},
},
},
{
Name: "multiple-directories",
TOML: `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.directories]]
path = "/etc/dir1"
mode = "0700"
user = "root"
group = "root"
ensure_parents = true
[[customizations.directories]]
path = "/etc/dir2"
mode = "0755"
user = 0
group = 0
ensure_parents = true
[[customizations.directories]]
path = "/etc/dir3"
`,
Want: []DirectoryCustomization{
{
Path: "/etc/dir1",
Mode: "0700",
User: "root",
Group: "root",
EnsureParents: true,
},
{
Path: "/etc/dir2",
Mode: "0755",
User: int64(0),
Group: int64(0),
EnsureParents: true,
},
{
Path: "/etc/dir3",
},
},
},
{
Name: "invalid-directories",
TOML: `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.directories]]
path = "/etc/../dir1"
[[customizations.directories]]
path = "/etc/dir2"
mode = "12345"
[[customizations.directories]]
path = "/etc/dir3"
user = "r@@t"
[[customizations.directories]]
path = "/etc/dir4"
group = "r@@t"
[[customizations.directories]]
path = "/etc/dir5"
user = -1
[[customizations.directories]]
path = "/etc/dir6"
group = -1
[[customizations.directories]]
`,
Error: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var blueprint Blueprint
err := toml.Unmarshal([]byte(tc.TOML), &blueprint)
if tc.Error {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, blueprint.Customizations)
assert.Len(t, blueprint.Customizations.Directories, len(tc.Want))
assert.EqualValues(t, tc.Want, blueprint.Customizations.GetDirectories())
}
})
}
}
func TestDirectoryCustomizationUnmarshalJSON(t *testing.T) {
testCases := []struct {
Name string
JSON string
Want []DirectoryCustomization
Error bool
}{
{
Name: "directory-with-path",
JSON: `
{
"name": "test",
"description": "Test",
"version": "0.0.0",
"customizations": {
"directories": [
{
"path": "/etc/dir"
}
]
}
}`,
Want: []DirectoryCustomization{
{
Path: "/etc/dir",
},
},
},
{
Name: "multiple-directories",
JSON: `
{
"name": "test",
"description": "Test",
"version": "0.0.0",
"customizations": {
"directories": [
{
"path": "/etc/dir1",
"mode": "0700",
"user": "root",
"group": "root",
"ensure_parents": true
},
{
"path": "/etc/dir2",
"mode": "0755",
"user": 0,
"group": 0,
"ensure_parents": true
},
{
"path": "/etc/dir3"
}
]
}
}`,
Want: []DirectoryCustomization{
{
Path: "/etc/dir1",
Mode: "0700",
User: "root",
Group: "root",
EnsureParents: true,
},
{
Path: "/etc/dir2",
Mode: "0755",
User: int64(0),
Group: int64(0),
EnsureParents: true,
},
{
Path: "/etc/dir3",
},
},
},
{
Name: "invalid-directories",
JSON: `
{
"name": "test",
"description": "Test",
"version": "0.0.0",
"customizations": {
"directories": [
{
"path": "/etc/../dir1"
},
{
"path": "/etc/dir2",
"mode": "12345"
},
{
"path": "/etc/dir3",
"user": "r@@t"
},
{
"path": "/etc/dir4",
"group": "r@@t"
},
{
"path": "/etc/dir5",
"user": -1
},
{
"path": "/etc/dir6",
"group": -1
}
{}
]
}
}`,
Error: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var blueprint Blueprint
err := json.Unmarshal([]byte(tc.JSON), &blueprint)
if tc.Error {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, blueprint.Customizations)
assert.Len(t, blueprint.Customizations.Directories, len(tc.Want))
assert.EqualValues(t, tc.Want, blueprint.Customizations.GetDirectories())
}
})
}
}
func TestFileCustomizationToFsNodeFile(t *testing.T) {
ensureFileCreation := func(file *fsnode.File, err error) *fsnode.File {
t.Helper()
assert.NoError(t, err)
assert.NotNil(t, file)
return file
}
testCases := []struct {
Name string
File FileCustomization
Want *fsnode.File
Error bool
}{
{
Name: "empty",
File: FileCustomization{},
Error: true,
},
{
Name: "path-only",
File: FileCustomization{
Path: "/etc/file",
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", nil, nil, nil, nil)),
},
{
Name: "path-invalid",
File: FileCustomization{
Path: "../etc/file",
},
Error: true,
},
{
Name: "path-and-mode",
File: FileCustomization{
Path: "/etc/file",
Mode: "0700",
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", common.ToPtr(os.FileMode(0700)), nil, nil, nil)),
},
{
Name: "path-and-mode-no-leading-zero",
File: FileCustomization{
Path: "/etc/file",
Mode: "700",
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", common.ToPtr(os.FileMode(0700)), nil, nil, nil)),
},
{
Name: "path-and-mode-invalid",
File: FileCustomization{
Path: "/etc/file",
Mode: "12345",
},
Error: true,
},
{
Name: "path-user-group-string",
File: FileCustomization{
Path: "/etc/file",
User: "root",
Group: "root",
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", nil, "root", "root", nil)),
},
{
Name: "path-user-group-int64",
File: FileCustomization{
Path: "/etc/file",
User: int64(0),
Group: int64(0),
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", nil, int64(0), int64(0), nil)),
},
{
Name: "path-and-user-invalid-string",
File: FileCustomization{
Path: "/etc/file",
User: "r@@t",
},
Error: true,
},
{
Name: "path-and-user-invalid-int64",
File: FileCustomization{
Path: "/etc/file",
User: int64(-1),
},
Error: true,
},
{
Name: "path-and-group-string",
File: FileCustomization{
Path: "/etc/file",
Group: "root",
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", nil, nil, "root", nil)),
},
{
Name: "path-and-group-int64",
File: FileCustomization{
Path: "/etc/file",
Group: int64(0),
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", nil, nil, int64(0), nil)),
},
{
Name: "path-and-group-invalid-string",
File: FileCustomization{
Path: "/etc/file",
Group: "r@@t",
},
Error: true,
},
{
Name: "path-and-group-invalid-int64",
File: FileCustomization{
Path: "/etc/file",
Group: int64(-1),
},
Error: true,
},
{
Name: "path-and-data",
File: FileCustomization{
Path: "/etc/file",
Data: "hello world",
},
Want: ensureFileCreation(fsnode.NewFile("/etc/file", nil, nil, nil, []byte("hello world"))),
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
file, err := tc.File.ToFsNodeFile()
if tc.Error {
assert.Error(t, err)
assert.Nil(t, file)
} else {
assert.NoError(t, err)
assert.EqualValues(t, tc.Want, file)
}
})
}
}
func TestFileCustomizationsToFsNodeFiles(t *testing.T) {
ensureFileCreation := func(file *fsnode.File, err error) *fsnode.File {
t.Helper()
assert.NoError(t, err)
assert.NotNil(t, file)
return file
}
testCases := []struct {
Name string
Files []FileCustomization
Want []*fsnode.File
Error bool
}{
{
Name: "empty",
Files: []FileCustomization{},
Want: nil,
},
{
Name: "single-file",
Files: []FileCustomization{
{
Path: "/etc/file",
User: "root",
Group: "root",
Mode: "0700",
Data: "hello world",
},
},
Want: []*fsnode.File{
ensureFileCreation(fsnode.NewFile(
"/etc/file",
common.ToPtr(os.FileMode(0700)),
"root",
"root",
[]byte("hello world"),
)),
},
},
{
Name: "multiple-files",
Files: []FileCustomization{
{
Path: "/etc/file",
Data: "hello world",
User: "root",
Group: "root",
},
{
Path: "/etc/file2",
Data: "hello world",
User: int64(0),
Group: int64(0),
},
},
Want: []*fsnode.File{
ensureFileCreation(fsnode.NewFile("/etc/file", nil, "root", "root", []byte("hello world"))),
ensureFileCreation(fsnode.NewFile("/etc/file2", nil, int64(0), int64(0), []byte("hello world"))),
},
},
{
Name: "multiple-files-with-errors",
Files: []FileCustomization{
{
Path: "/etc/../file",
Data: "hello world",
},
{
Path: "/etc/file2",
Data: "hello world",
User: "r@@t",
},
},
Error: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
files, err := FileCustomizationsToFsNodeFiles(tc.Files)
if tc.Error {
assert.Error(t, err)
assert.Nil(t, files)
} else {
assert.NoError(t, err)
assert.EqualValues(t, tc.Want, files)
}
})
}
}
func TestFileCustomizationUnmarshalTOML(t *testing.T) {
testCases := []struct {
Name string
TOML string
Want []FileCustomization
Error bool
}{
{
Name: "file-with-path",
TOML: `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.files]]
path = "/etc/file"
`,
Want: []FileCustomization{
{
Path: "/etc/file",
},
},
},
{
Name: "multiple-files",
TOML: `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.files]]
path = "/etc/file1"
mode = "0600"
user = "root"
group = "root"
data = "hello world"
[[customizations.files]]
path = "/etc/file2"
mode = "0644"
data = "hello world 2"
[[customizations.files]]
path = "/etc/file3"
user = 0
group = 0
data = "hello world 3"
`,
Want: []FileCustomization{
{
Path: "/etc/file1",
Mode: "0600",
User: "root",
Group: "root",
Data: "hello world",
},
{
Path: "/etc/file2",
Mode: "0644",
Data: "hello world 2",
},
{
Path: "/etc/file3",
User: int64(0),
Group: int64(0),
Data: "hello world 3",
},
},
},
{
Name: "invalid-files",
TOML: `
name = "test"
description = "Test"
version = "0.0.0"
[[customizations.files]]
path = "/etc/../file1"
[[customizations.files]]
path = "/etc/file2"
mode = "12345"
[[customizations.files]]
path = "/etc/file3"
user = "r@@t"
[[customizations.files]]
path = "/etc/file4"
group = "r@@t"
[[customizations.files]]
path = "/etc/file5"
user = -1
[[customizations.files]]
path = "/etc/file6"
group = -1
`,
Error: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var blueprint Blueprint
err := toml.Unmarshal([]byte(tc.TOML), &blueprint)
if tc.Error {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, blueprint.Customizations)
assert.Len(t, blueprint.Customizations.Files, len(tc.Want))
assert.EqualValues(t, tc.Want, blueprint.Customizations.Files)
}
})
}
}
func TestFileCustomizationUnmarshalJSON(t *testing.T) {
testCases := []struct {
Name string
JSON string
Want []FileCustomization
Error bool
}{
{
Name: "file-with-path",
JSON: `
{
"name": "test",
"description": "Test",
"version": "0.0.0",
"customizations": {
"files": [
{
"path": "/etc/file"
}
]
}
}`,
Want: []FileCustomization{
{
Path: "/etc/file",
},
},
},
{
Name: "multiple-files",
JSON: `
{
"name": "test",
"description": "Test",
"version": "0.0.0",
"customizations": {
"files": [
{
"path": "/etc/file1",
"mode": "0600",
"user": "root",
"group": "root",
"data": "hello world"
},
{
"path": "/etc/file2",
"mode": "0644",
"data": "hello world 2"
},
{
"path": "/etc/file3",
"user": 0,
"group": 0,
"data": "hello world 3"
}
]
}
}`,
Want: []FileCustomization{
{
Path: "/etc/file1",
Mode: "0600",
User: "root",
Group: "root",
Data: "hello world",
},
{
Path: "/etc/file2",
Mode: "0644",
Data: "hello world 2",
},
{
Path: "/etc/file3",
User: int64(0),
Group: int64(0),
Data: "hello world 3",
},
},
},
{
Name: "invalid-files",
JSON: `
{
"name": "test",
"description": "Test",
"version": "0.0.0",
"customizations": {
"files": [
{
"path": "/etc/../file1"
},
{
"path": "/etc/file2",
"mode": "12345"
},
{
"path": "/etc/file3",
"user": "r@@t"
},
{
"path": "/etc/file4",
"group": "r@@t"
},
{
"path": "/etc/file5",
"user": -1
},
{
"path": "/etc/file6",
"group": -1
}
]
}
}`,
Error: true,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var blueprint Blueprint
err := json.Unmarshal([]byte(tc.JSON), &blueprint)
if tc.Error {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, blueprint.Customizations)
assert.Len(t, blueprint.Customizations.Files, len(tc.Want))
assert.EqualValues(t, tc.Want, blueprint.Customizations.Files)
}
})
}
}

View file

@ -1,157 +0,0 @@
package blueprint
import (
"fmt"
"testing"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/stretchr/testify/assert"
)
func TestGetCustomRepositories(t *testing.T) {
testCases := []struct {
name string
expectedCustomizations Customizations
wantErr error
}{
{
name: "Test no errors",
expectedCustomizations: Customizations{
Repositories: []RepositoryCustomization{
{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
},
{
Id: "example-2",
BaseURLs: []string{"http://example-2.com"},
},
},
},
wantErr: nil,
},
{
name: "Test empty id error",
expectedCustomizations: Customizations{
Repositories: []RepositoryCustomization{
{},
},
},
wantErr: fmt.Errorf("Repository ID is required"),
},
{
name: "Test empty baseurl, mirrorlist or metalink error",
expectedCustomizations: Customizations{
Repositories: []RepositoryCustomization{
{
Id: "example-1",
},
},
},
wantErr: fmt.Errorf("Repository base URL, mirrorlist or metalink is required"),
},
{
name: "Test missing GPG keys error",
expectedCustomizations: Customizations{
Repositories: []RepositoryCustomization{
{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
GPGCheck: common.ToPtr(true),
},
},
},
wantErr: fmt.Errorf("Repository gpg check is set to true but no gpg keys are provided"),
},
{
name: "Test invalid GPG keys error",
expectedCustomizations: Customizations{
Repositories: []RepositoryCustomization{
{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
GPGKeys: []string{"invalid"},
GPGCheck: common.ToPtr(true),
},
},
},
wantErr: fmt.Errorf("Repository gpg key is not a valid URL or a valid gpg key"),
},
{
name: "Test invalid repository filename error",
expectedCustomizations: Customizations{
Repositories: []RepositoryCustomization{
{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
Filename: "!nval!d",
},
},
},
wantErr: fmt.Errorf("Repository filename %q is invalid", "!nval!d.repo"),
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.wantErr == nil {
retCustomizations, err := tt.expectedCustomizations.GetRepositories()
assert.NoError(t, err)
assert.EqualValues(t, tt.expectedCustomizations.Repositories, retCustomizations)
} else {
_, err := tt.expectedCustomizations.GetRepositories()
assert.Equal(t, tt.wantErr, err)
}
})
}
}
func TestCustomRepoFilename(t *testing.T) {
testCases := []struct {
Name string
Repo RepositoryCustomization
WantFilename string
}{
{
Name: "Test default filename #1",
Repo: RepositoryCustomization{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
},
WantFilename: "example-1.repo",
},
{
Name: "Test default filename #2",
Repo: RepositoryCustomization{
Id: "example-2",
BaseURLs: []string{"http://example-1.com"},
},
WantFilename: "example-2.repo",
},
{
Name: "Test custom filename",
Repo: RepositoryCustomization{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
Filename: "test.repo",
},
WantFilename: "test.repo",
},
{
Name: "Test custom filename without extension",
Repo: RepositoryCustomization{
Id: "example-1",
BaseURLs: []string{"http://example-1.com"},
Filename: "test",
},
WantFilename: "test.repo",
},
}
for _, tt := range testCases {
t.Run(tt.Name, func(t *testing.T) {
got := tt.Repo.getFilename()
assert.Equal(t, tt.WantFilename, got)
})
}
}

View file

@ -8,7 +8,7 @@ import (
"net/http"
"strings"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/osbuild-composer/internal/weldr"
)

View file

@ -10,13 +10,13 @@ import (
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/customizations/subscription"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/reporegistry"
"github.com/osbuild/images/pkg/rhsm/facts"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"
)

View file

@ -5,13 +5,13 @@ import (
"io/fs"
"testing"
"github.com/osbuild/blueprint/pkg/blueprint"
repos "github.com/osbuild/images/data/repositories"
"github.com/osbuild/images/pkg/customizations/subscription"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/reporegistry"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -17,12 +17,12 @@ import (
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/sbom"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/jsondb"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -7,11 +7,11 @@ import (
ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/ostree"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/cloud/gcp"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -3,10 +3,10 @@ package v2
import (
"testing"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro/rhel/rhel9"
"github.com/osbuild/images/pkg/distro/test_distro"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -22,6 +22,7 @@ import (
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/dnfjson"
@ -30,7 +31,6 @@ import (
"github.com/osbuild/images/pkg/reporegistry"
"github.com/osbuild/images/pkg/sbom"
"github.com/osbuild/osbuild-composer/internal/auth"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/prometheus"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -4,10 +4,10 @@ import (
"time"
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"
)

View file

@ -5,11 +5,11 @@ import (
"time"
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/test_distro"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"
)

View file

@ -8,12 +8,12 @@ import (
"time"
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"
)

View file

@ -12,13 +12,13 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/fedora"
"github.com/osbuild/images/pkg/distro/test_distro"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"
)

View file

@ -22,8 +22,8 @@ import (
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/osbuild-composer/internal/jsondb"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -7,13 +7,13 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/suite"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/test_distro"
"github.com/osbuild/images/pkg/distrofactory"
"github.com/osbuild/images/pkg/manifest"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/target"
)

View file

@ -31,6 +31,7 @@ import (
"github.com/julienschmidt/httprouter"
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distro"
@ -43,7 +44,6 @@ import (
"github.com/osbuild/images/pkg/rhsm/facts"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/images/pkg/sbom"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/store"
"github.com/osbuild/osbuild-composer/internal/target"

View file

@ -18,6 +18,7 @@ import (
"time"
ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/test_distro"
@ -27,7 +28,6 @@ import (
"github.com/osbuild/images/pkg/ostree/mock_ostree_repo"
"github.com/osbuild/images/pkg/reporegistry"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
dnfjson_mock "github.com/osbuild/osbuild-composer/internal/mocks/dnfjson"
rpmmd_mock "github.com/osbuild/osbuild-composer/internal/mocks/rpmmd"

View file

@ -5,8 +5,8 @@ package weldr
import (
"github.com/google/uuid"
"github.com/osbuild/blueprint/pkg/blueprint"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/store"
)

View file

@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Documentation: https://godocs.io/github.com/BurntSushi/toml
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show

View file

@ -196,6 +196,26 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
return md.unify(primValue.undecoded, rvalue(v))
}
// markDecodedRecursive is a helper to mark any key under the given tmap as
// decoded, recursing as needed
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
for key := range tmap {
md.decoded[md.context.add(key).String()] = struct{}{}
if tmap, ok := tmap[key].(map[string]any); ok {
md.context = append(md.context, key)
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
if tarr, ok := tmap[key].([]map[string]any); ok {
for _, elm := range tarr {
md.context = append(md.context, key)
markDecodedRecursive(md, elm)
md.context = md.context[0 : len(md.context)-1]
}
}
}
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
@ -222,6 +242,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
if err != nil {
return md.parseErr(err)
}
// Assume the Unmarshaler decoded everything, so mark all keys under
// this table as decoded.
if tmap, ok := data.(map[string]any); ok {
markDecodedRecursive(md, tmap)
}
if aot, ok := data.([]map[string]any); ok {
for _, tmap := range aot {
markDecodedRecursive(md, tmap)
}
}
return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
@ -540,12 +570,14 @@ func (md *MetaData) badtype(dst string, data any) error {
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
d := string(md.data)
return ParseError{
LastKey: k,
Position: md.keyInfo[k].pos,
Line: md.keyInfo[k].pos.Line,
Message: err.Error(),
err: err,
input: string(md.data),
LastKey: k,
Position: md.keyInfo[k].pos.withCol(d),
Line: md.keyInfo[k].pos.Line,
input: d,
}
}

View file

@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
mapKeysSub = append(mapKeysSub, mapKey)
} else {
mapKeysDirect = append(mapKeysDirect, k)
mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
enc.encode(key.add(mapKey.String()), val)
}
}
}
@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
if is32Bit {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
// Need to make a copy because ... ehm, I don't know why... I guess
// allocating a new array can cause it to fail(?)
//
// Done for: https://github.com/BurntSushi/toml/issues/430
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
}
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
addFields(rt, rv, nil)
writeFields := func(fields [][]int) {
writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := rv.FieldByIndex(fieldIndex)
@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
l := len(fieldsDirect) + len(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}

View file

@ -67,21 +67,36 @@ type ParseError struct {
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght in bytes.
Len int // Length of the error in bytes.
}
func (p Position) withCol(tomlFile string) Position {
var (
pos int
lines = strings.Split(tomlFile, "\n")
)
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= p.Start {
p.Col = p.Start - pos + 1
if p.Col < 1 { // Should never happen, but just in case.
p.Col = 1
}
break
}
pos += ll
}
return p
}
func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, msg)
pe.Position.Line, pe.LastKey, pe.Message)
}
// ErrorWithPosition returns the error with detailed location context.
@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error()
}
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
var (
lines = strings.Split(pe.input, "\n")
b = new(strings.Builder)
)
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
msg, pe.Position.Line, col+1)
pe.Message, pe.Position.Line, pe.Position.Col)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
msg, pe.Position.Line, col, col+pe.Position.Len)
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
diff := len(expanded) - len(lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
return m
}
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
func expandTab(s string) string {
var (
b strings.Builder

View file

@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
pos.Line--
}
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default:
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
}
return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
}
return lx.errorf("expected value but found %q instead", r)
}
@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
case 'x':
r = lx.peek()
if !isHex(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool {
if tomlNext {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-' ||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
(r >= 0x037f && r <= 0x1fff) ||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
(r >= 0x10000 && r <= 0xeffff)
}
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || r == '_' || r == '-'
}

View file

@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
if cap(k) > len(k) {
return append(k, piece)
}
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece

View file

@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
// it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:]
}
@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Start: i, Len: 1},
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
Message: err.Error(),
err: err,
Position: it.pos,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: it.pos,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: p.pos,
Position: p.pos.withCol(p.lx.input),
Line: p.pos.Line,
LastKey: p.current(),
})
@ -123,10 +123,11 @@ func (p *parser) next() item {
if it.typ == itemError {
if it.err != nil {
panic(ParseError{
Position: it.pos,
Message: it.err.Error(),
err: it.err,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line,
LastKey: p.current(),
err: it.err,
})
}
@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
}
}
// isHexis a superset of all the permissable characters surrounding an
// isHex is a superset of all the permissible characters surrounding an
// underscore.
accept = isHex(r)
}

View file

@ -0,0 +1,5 @@
package common
func ToPtr[T any](x T) *T {
return &x
}

View file

@ -5,8 +5,8 @@ import (
"encoding/json"
"fmt"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/crypt"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/coreos/go-semver/semver"
iblueprint "github.com/osbuild/images/pkg/blueprint"
@ -19,14 +19,19 @@ type Blueprint struct {
Version string `json:"version,omitempty" toml:"version,omitempty"`
Packages []Package `json:"packages" toml:"packages"`
Modules []Package `json:"modules" toml:"modules"`
// Note, this is called "enabled modules" because we already have "modules" except
// the "modules" refers to packages and "enabled modules" refers to modularity modules.
EnabledModules []EnabledModule `json:"enabled_modules" toml:"enabled_modules"`
Groups []Group `json:"groups" toml:"groups"`
Containers []Container `json:"containers,omitempty" toml:"containers,omitempty"`
Customizations *Customizations `json:"customizations,omitempty" toml:"customizations"`
Distro string `json:"distro" toml:"distro"`
Arch string `json:"architecture,omitempty" toml:"architecture,omitempty"`
// EXPERIMENTAL
Minimal bool `json:"minimal,omitempty" toml:"minimal,omitempty"`
}
type Change struct {
@ -55,7 +60,7 @@ type Group struct {
}
type Container struct {
Source string `json:"source,omitempty" toml:"source"`
Source string `json:"source" toml:"source"`
Name string `json:"name,omitempty" toml:"name,omitempty"`
TLSVerify *bool `json:"tls-verify,omitempty" toml:"tls-verify,omitempty"`
@ -349,14 +354,12 @@ func Convert(bp Blueprint) iblueprint.Blueprint {
}
if disk := c.Disk; disk != nil {
idisk := &iblueprint.DiskCustomization{
Type: disk.Type,
MinSize: disk.MinSize,
Partitions: make([]iblueprint.PartitionCustomization, len(disk.Partitions)),
}
for idx, part := range disk.Partitions {
ipart := iblueprint.PartitionCustomization{
Type: part.Type,
PartType: part.PartType,
MinSize: part.MinSize,
BtrfsVolumeCustomization: iblueprint.BtrfsVolumeCustomization{},
VGCustomization: iblueprint.VGCustomization{

View file

@ -3,8 +3,11 @@ package blueprint
import (
"fmt"
"reflect"
"slices"
"strings"
"github.com/osbuild/images/pkg/cert"
"github.com/osbuild/images/pkg/customizations/anaconda"
"github.com/osbuild/images/pkg/disk"
)
@ -33,6 +36,7 @@ type Customizations struct {
RPM *RPMCustomization `json:"rpm,omitempty" toml:"rpm,omitempty"`
RHSM *RHSMCustomization `json:"rhsm,omitempty" toml:"rhsm,omitempty"`
CACerts *CACustomization `json:"cacerts,omitempty" toml:"cacerts,omitempty"`
ContainersStorage *ContainerStorageCustomization `json:"containers-storage,omitempty" toml:"containers-storage,omitempty"`
}
type IgnitionCustomization struct {
@ -141,6 +145,13 @@ type CACustomization struct {
PEMCerts []string `json:"pem_certs,omitempty" toml:"pem_certs,omitempty"`
}
// Configure the container storage separately from containers, since we most likely would
// like to use the same storage path for all of the containers.
type ContainerStorageCustomization struct {
// destination is always `containers-storage`, so we won't expose this
StoragePath *string `json:"destination-path,omitempty" toml:"destination-path,omitempty"`
}
type CustomizationError struct {
Message string
}
@ -227,11 +238,11 @@ func (c *Customizations) GetTimezoneSettings() (*string, []string) {
}
func (c *Customizations) GetUsers() []UserCustomization {
if c == nil {
if c == nil || (c.User == nil && c.SSHKey == nil) {
return nil
}
users := []UserCustomization{}
var users []UserCustomization
// prepend sshkey for backwards compat (overridden by users)
if len(c.SSHKey) > 0 {
@ -268,20 +279,19 @@ func (c *Customizations) GetGroups() []GroupCustomization {
}
func (c *Customizations) GetKernel() *KernelCustomization {
var name string
var append string
var kernelName, kernelAppend string
if c != nil && c.Kernel != nil {
name = c.Kernel.Name
append = c.Kernel.Append
kernelName = c.Kernel.Name
kernelAppend = c.Kernel.Append
}
if name == "" {
name = "kernel"
if kernelName == "" {
kernelName = "kernel"
}
return &KernelCustomization{
Name: name,
Append: append,
Name: kernelName,
Append: kernelAppend,
}
}
@ -324,6 +334,17 @@ func (c *Customizations) GetFilesystemsMinSize() uint64 {
return agg
}
func (c *Customizations) GetPartitioning() (*DiskCustomization, error) {
if c == nil {
return nil, nil
}
if err := c.Disk.Validate(); err != nil {
return nil, err
}
return c.Disk, nil
}
// GetPartitioningMode converts the string to a disk.PartitioningMode type
func (c *Customizations) GetPartitioningMode() (disk.PartitioningMode, error) {
if c == nil {
@ -391,8 +412,8 @@ func (c *Customizations) GetRepositories() ([]RepositoryCustomization, error) {
return nil, nil
}
for idx := range c.Repositories {
err := validateCustomRepository(&c.Repositories[idx])
for _, repo := range c.Repositories {
err := validateCustomRepository(&repo)
if err != nil {
return nil, err
}
@ -407,3 +428,81 @@ func (c *Customizations) GetFIPS() bool {
}
return *c.FIPS
}
func (c *Customizations) GetContainerStorage() *ContainerStorageCustomization {
if c == nil || c.ContainersStorage == nil {
return nil
}
if *c.ContainersStorage.StoragePath == "" {
return nil
}
return c.ContainersStorage
}
func (c *Customizations) GetInstaller() (*InstallerCustomization, error) {
if c == nil || c.Installer == nil {
return nil, nil
}
// Validate conflicting customizations: Installer options aren't supported
// when the user adds their own kickstart content
if c.Installer.Kickstart != nil && len(c.Installer.Kickstart.Contents) > 0 {
if c.Installer.Unattended {
return nil, fmt.Errorf("installer.unattended is not supported when adding custom kickstart contents")
}
if len(c.Installer.SudoNopasswd) > 0 {
return nil, fmt.Errorf("installer.sudo-nopasswd is not supported when adding custom kickstart contents")
}
}
// Disabling the user module isn't supported when users or groups are
// defined
if c.Installer.Modules != nil &&
slices.Contains(c.Installer.Modules.Disable, anaconda.ModuleUsers) &&
len(c.User)+len(c.Group) > 0 {
return nil, fmt.Errorf("blueprint contains user or group customizations but disables the required Users Anaconda module")
}
return c.Installer, nil
}
func (c *Customizations) GetRPM() *RPMCustomization {
if c == nil {
return nil
}
return c.RPM
}
func (c *Customizations) GetRHSM() *RHSMCustomization {
if c == nil {
return nil
}
return c.RHSM
}
func (c *Customizations) checkCACerts() error {
if c == nil || c.CACerts == nil {
return nil
}
for _, bundle := range c.CACerts.PEMCerts {
_, err := cert.ParseCerts(bundle)
if err != nil {
return err
}
}
return nil
}
func (c *Customizations) GetCACerts() (*CACustomization, error) {
if c == nil {
return nil, nil
}
if err := c.checkCACerts(); err != nil {
return nil, err
}
return c.CACerts, nil
}

View file

@ -0,0 +1,689 @@
package blueprint
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"regexp"
"slices"
"strings"
"github.com/google/uuid"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type DiskCustomization struct {
// Type of the partition table: gpt or dos.
// Optional, the default depends on the distro and image type.
Type string `json:"type,omitempty" toml:"type,omitempty"`
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"minsize,omitempty"`
}
type diskCustomizationMarshaler struct {
Type string `json:"type,omitempty" toml:"type,omitempty"`
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"partitions,omitempty"`
}
func (dc *DiskCustomization) UnmarshalJSON(data []byte) error {
var dcm diskCustomizationMarshaler
if err := json.Unmarshal(data, &dcm); err != nil {
return err
}
dc.Type = dcm.Type
dc.MinSize = dcm.MinSize.Uint64()
dc.Partitions = dcm.Partitions
return nil
}
func (dc *DiskCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(dc, data)
}
// PartitionCustomization defines a single partition on a disk. The Type
// defines the kind of "payload" for the partition: plain, lvm, or btrfs.
// - plain: the payload will be a filesystem on a partition (e.g. xfs, ext4).
// See [FilesystemTypedCustomization] for extra fields.
// - lvm: the payload will be an LVM volume group. See [VGCustomization] for
// extra fields
// - btrfs: the payload will be a btrfs volume. See
// [BtrfsVolumeCustomization] for extra fields.
type PartitionCustomization struct {
// The type of payload for the partition (optional, defaults to "plain").
Type string `json:"type,omitempty" toml:"type,omitempty"`
// Minimum size of the partition that contains the filesystem (for "plain"
// filesystem), volume group ("lvm"), or btrfs volume ("btrfs"). The final
// size of the partition will be larger than the minsize if the sum of the
// contained volumes (logical volumes or subvolumes) is larger. In
// addition, certain mountpoints have required minimum sizes. See
// https://osbuild.org/docs/user-guide/partitioning for more details.
// (optional, defaults depend on payload and mountpoints).
MinSize uint64 `json:"minsize" toml:"minsize"`
// The partition type GUID for GPT partitions. For DOS partitions, this
// field can be used to set the (2 hex digit) partition type.
// If not set, the type will be automatically set based on the mountpoint
// or the payload type.
PartType string `json:"part_type,omitempty" toml:"part_type,omitempty"`
BtrfsVolumeCustomization
VGCustomization
FilesystemTypedCustomization
}
// A filesystem on a plain partition or LVM logical volume.
// Note the differences from [FilesystemCustomization]:
// - Adds a label.
// - Adds a filesystem type (fs_type).
// - Does not define a size. The size is defined by its container: a
// partition ([PartitionCustomization]) or LVM logical volume
// ([LVCustomization]).
//
// Setting the FSType to "swap" creates a swap area (and the Mountpoint must be
// empty).
type FilesystemTypedCustomization struct {
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
// Filesystem label
Label string `json:"label,omitempty" toml:"label,omitempty"`
// Filesystem type (ext4, xfs, vfat)
FSType string `json:"fs_type,omitempty" toml:"fs_type,omitempty"`
}
// An LVM volume group with one or more logical volumes.
type VGCustomization struct {
// Volume group name (optional, default will be automatically generated).
Name string `json:"name,omitempty" toml:"name,omitempty"`
// One or more logical volumes for this volume group (required).
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
LogicalVolumes []LVCustomization `json:"logical_volumes,omitempty" toml:"logical_volumes,omitempty"`
}
type LVCustomization struct {
// Logical volume name
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Minimum size of the logical volume
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
// Custom JSON unmarshaller for LVCustomization for handling the conversion of
// data sizes (minsize) expressed as strings to uint64.
func (lv *LVCustomization) UnmarshalJSON(data []byte) error {
var lvAnySize struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
MinSize any `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
if err := json.Unmarshal(data, &lvAnySize); err != nil {
return err
}
lv.Name = lvAnySize.Name
lv.FilesystemTypedCustomization = lvAnySize.FilesystemTypedCustomization
if lvAnySize.MinSize == nil {
return fmt.Errorf("minsize is required")
}
size, err := decodeSize(lvAnySize.MinSize)
if err != nil {
return err
}
lv.MinSize = size
return nil
}
// A btrfs volume consisting of one or more subvolumes.
type BtrfsVolumeCustomization struct {
Subvolumes []BtrfsSubvolumeCustomization `json:"subvolumes,omitempty" toml:"subvolumes,omitempty"`
}
type BtrfsSubvolumeCustomization struct {
// The name of the subvolume, which defines the location (path) on the
// root volume (required).
// See https://btrfs.readthedocs.io/en/latest/Subvolumes.html
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Mountpoint for the subvolume.
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
}
// Custom JSON unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalJSON(data []byte) error {
errPrefix := "JSON unmarshal:"
var typeSniffer struct {
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
}
if err := json.Unmarshal(data, &typeSniffer); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
partType := "plain"
if typeSniffer.Type != "" {
partType = typeSniffer.Type
}
switch partType {
case "plain":
if err := decodePlain(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
v.PartType = typeSniffer.PartType
if typeSniffer.MinSize == nil {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(typeSniffer.MinSize)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// decodePlain decodes the data into a struct that only embeds the
// FilesystemCustomization with DisallowUnknownFields. This ensures that when
// the type is "plain", none of the fields for btrfs or lvm are used.
func decodePlain(v *PartitionCustomization, data []byte) error {
var plain struct {
// Type, minsize, and part_type are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
FilesystemTypedCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&plain)
if err != nil {
return fmt.Errorf("error decoding partition with type \"plain\": %w", err)
}
v.FilesystemTypedCustomization = plain.FilesystemTypedCustomization
return nil
}
// decodeBtrfs decodes the data into a struct that only embeds the
// BtrfsVolumeCustomization with DisallowUnknownFields. This ensures that when
// the type is btrfs, none of the fields for plain or lvm are used.
func decodeBtrfs(v *PartitionCustomization, data []byte) error {
var btrfs struct {
// Type, minsize, and part_type are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
BtrfsVolumeCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&btrfs)
if err != nil {
return fmt.Errorf("error decoding partition with type \"btrfs\": %w", err)
}
v.BtrfsVolumeCustomization = btrfs.BtrfsVolumeCustomization
return nil
}
// decodeLVM decodes the data into a struct that only embeds the
// VGCustomization with DisallowUnknownFields. This ensures that when the type
// is lvm, none of the fields for plain or btrfs are used.
func decodeLVM(v *PartitionCustomization, data []byte) error {
var vg struct {
// Type, minsize, and part_type are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
VGCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&vg); err != nil {
return fmt.Errorf("error decoding partition with type \"lvm\": %w", err)
}
v.VGCustomization = vg.VGCustomization
return nil
}
// Custom TOML unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalTOML(data any) error {
errPrefix := "TOML unmarshal:"
d, ok := data.(map[string]any)
if !ok {
return fmt.Errorf("%s customizations.partition is not an object", errPrefix)
}
partType := "plain"
if typeField, ok := d["type"]; ok {
typeStr, ok := typeField.(string)
if !ok {
return fmt.Errorf("%s type must be a string, got \"%v\" of type %T", errPrefix, typeField, typeField)
}
partType = typeStr
}
// serialise the data to JSON and reuse the subobject decoders
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("%s error while decoding partition customization: %w", errPrefix, err)
}
switch partType {
case "plain":
if err := decodePlain(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
minsizeField, ok := d["minsize"]
if !ok {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(minsizeField)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// Validate checks for customization combinations that are generally not
// supported or can create conflicts, regardless of specific distro or image
// type policies. The validator ensures all of the following properties:
// - All mountpoints are valid
// - All mountpoints are unique
// - All LVM volume group names are unique
// - All LVM logical volume names are unique within a given volume group
// - All btrfs subvolume names are unique within a given btrfs volume
// - All btrfs subvolume names are valid and non-empty
// - All filesystems are valid for their mountpoints (e.g. xfs or ext4 for /boot)
// - No LVM logical volume has an invalid mountpoint (/boot or /boot/efi)
// - Plain filesystem types are valid for the partition type
// - All non-empty properties are valid for the partition type (e.g.
// LogicalVolumes is empty when the type is "plain" or "btrfs")
// - Filesystems with FSType set to "swap" do not specify a mountpoint.
//
// Note that in *addition* consumers should also call
// ValidateLayoutConstraints() to validate that the policy for disk
// customizations is met.
func (p *DiskCustomization) Validate() error {
if p == nil {
return nil
}
switch p.Type {
case "gpt", "":
case "dos":
// dos/mbr only supports 4 partitions
// Unfortunately, at this stage it's unknown whether we will need extra
// partitions (bios boot, root, esp), so this check is just to catch
// obvious invalid customizations early. The final partition table is
// checked after it's created.
if len(p.Partitions) > 4 {
return fmt.Errorf("invalid partitioning customizations: \"dos\" partition table type only supports up to 4 partitions: got %d", len(p.Partitions))
}
default:
return fmt.Errorf("unknown partition table type: %s (valid: gpt, dos)", p.Type)
}
mountpoints := make(map[string]bool)
vgnames := make(map[string]bool)
var errs []error
for _, part := range p.Partitions {
if err := part.ValidatePartitionTypeID(p.Type); err != nil {
errs = append(errs, err)
}
switch part.Type {
case "plain", "":
errs = append(errs, part.validatePlain(mountpoints))
case "lvm":
errs = append(errs, part.validateLVM(mountpoints, vgnames))
case "btrfs":
errs = append(errs, part.validateBtrfs(mountpoints))
default:
errs = append(errs, fmt.Errorf("unknown partition type: %s", part.Type))
}
}
// will discard all nil errors
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("invalid partitioning customizations:\n%w", err)
}
return nil
}
func validateMountpoint(path string) error {
if path == "" {
return fmt.Errorf("mountpoint is empty")
}
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("mountpoint %q is not an absolute path", path)
}
if cleanPath := filepath.Clean(path); path != cleanPath {
return fmt.Errorf("mountpoint %q is not a canonical path (did you mean %q?)", path, cleanPath)
}
return nil
}
// ValidateLayoutConstraints checks that at most one LVM Volume Group or btrfs
// volume is defined. Returns an error if both LVM and btrfs are set and if
// either has more than one element.
//
// Note that this is a *policy* validation, in theory the "disk" code
// does support the constraints but we choose not to allow them for
// now. Each consumer of "DiskCustomization" should call this
// *unless* it's very low-level and not end-user-facing.
func (p *DiskCustomization) ValidateLayoutConstraints() error {
if p == nil {
return nil
}
var btrfsVols, lvmVGs uint
for _, part := range p.Partitions {
switch part.Type {
case "lvm":
lvmVGs++
case "btrfs":
btrfsVols++
}
if lvmVGs > 0 && btrfsVols > 0 {
return fmt.Errorf("btrfs and lvm partitioning cannot be combined")
}
}
if btrfsVols > 1 {
return fmt.Errorf("multiple btrfs volumes are not yet supported")
}
if lvmVGs > 1 {
return fmt.Errorf("multiple LVM volume groups are not yet supported")
}
return nil
}
// Check that the fs type is valid for the mountpoint.
func validateFilesystemType(path, fstype string) error {
badfsMsgFmt := "unsupported filesystem type for %q: %s"
switch path {
case "/boot":
switch fstype {
case "xfs", "ext4":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
case "/boot/efi":
switch fstype {
case "vfat":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
}
return nil
}
// These mountpoints must be on a plain partition (i.e. not on LVM or btrfs).
var plainOnlyMountpoints = []string{
"/boot",
"/boot/efi", // not allowed by our global policies, but that might change
}
var validPlainFSTypes = []string{
"ext4",
"vfat",
"xfs",
}
// exactly 2 hex digits
var validDosPartitionType = regexp.MustCompile(`^[0-9a-fA-F]{2}$`)
// ValidatePartitionTypeID returns an error if the partition type ID is not
// valid given the partition table type. If the partition table type is an
// empty string, the function returns an error only if the partition type ID is
// invalid for both gpt and dos partition tables.
func (p *PartitionCustomization) ValidatePartitionTypeID(ptType string) error {
// Empty PartType is fine, it will be selected automatically
if p.PartType == "" {
return nil
}
_, uuidErr := uuid.Parse(p.PartType)
validDosType := validDosPartitionType.MatchString(p.PartType)
switch ptType {
case "gpt":
if uuidErr != nil {
return fmt.Errorf("invalid partition part_type %q for partition table type %q (must be a valid UUID): %w", p.PartType, ptType, uuidErr)
}
case "dos":
if !validDosType {
return fmt.Errorf("invalid partition part_type %q for partition table type %q (must be a 2-digit hex number)", p.PartType, ptType)
}
case "":
// We don't know the partition table type yet, the fallback is controlled
// by the CustomPartitionTableOptions, so return an error if it fails both.
if uuidErr != nil && !validDosType {
return fmt.Errorf("invalid part_type %q: must be a valid UUID for GPT partition tables or a 2-digit hex number for DOS partition tables", p.PartType)
}
default:
// ignore: handled elsewhere
}
return nil
}
func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) error {
if p.FSType == "swap" {
// make sure the mountpoint is empty and return
if p.Mountpoint != "" {
return fmt.Errorf("mountpoint for swap partition must be empty (got %q)", p.Mountpoint)
}
return nil
}
if err := validateMountpoint(p.Mountpoint); err != nil {
return err
}
if mountpoints[p.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", p.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, p.FSType) {
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for mountpoint %q: %s", p.Mountpoint, p.FSType)
}
if err := validateFilesystemType(p.Mountpoint, p.FSType); err != nil {
return err
}
mountpoints[p.Mountpoint] = true
return nil
}
func (p *PartitionCustomization) validateLVM(mountpoints, vgnames map[string]bool) error {
if p.Name != "" && vgnames[p.Name] { // VGs with no name get autogenerated names
return fmt.Errorf("duplicate LVM volume group name %q in partitioning customizations", p.Name)
}
// check for invalid property usage
if len(p.Subvolumes) > 0 {
return fmt.Errorf("subvolumes defined for LVM volume group (partition type \"lvm\")")
}
if p.Label != "" {
return fmt.Errorf("label %q defined for LVM volume group (partition type \"lvm\")", p.Label)
}
vgnames[p.Name] = true
lvnames := make(map[string]bool)
for _, lv := range p.LogicalVolumes {
if lv.Name != "" && lvnames[lv.Name] { // LVs with no name get autogenerated names
return fmt.Errorf("duplicate LVM logical volume name %q in volume group %q in partitioning customizations", lv.Name, p.Name)
}
lvnames[lv.Name] = true
if lv.FSType == "swap" {
// make sure the mountpoint is empty and return
if lv.Mountpoint != "" {
return fmt.Errorf("mountpoint for swap logical volume with name %q in volume group %q must be empty", lv.Name, p.Name)
}
return nil
}
if err := validateMountpoint(lv.Mountpoint); err != nil {
return fmt.Errorf("invalid logical volume customization: %w", err)
}
if mountpoints[lv.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", lv.Mountpoint)
}
mountpoints[lv.Mountpoint] = true
if slices.Contains(plainOnlyMountpoints, lv.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for logical volume", lv.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, lv.FSType) {
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for logical volume with mountpoint %q: %s", lv.Mountpoint, lv.FSType)
}
}
return nil
}
func (p *PartitionCustomization) validateBtrfs(mountpoints map[string]bool) error {
if p.Mountpoint != "" {
return fmt.Errorf(`"mountpoint" is not supported for btrfs volumes (only subvolumes can have mountpoints)`)
}
if len(p.Subvolumes) == 0 {
return fmt.Errorf("btrfs volume requires subvolumes")
}
if len(p.LogicalVolumes) > 0 {
return fmt.Errorf("LVM logical volumes defined for btrfs volume (partition type \"btrfs\")")
}
subvolnames := make(map[string]bool)
for _, subvol := range p.Subvolumes {
if subvol.Name == "" {
return fmt.Errorf("btrfs subvolume with empty name in partitioning customizations")
}
if subvolnames[subvol.Name] {
return fmt.Errorf("duplicate btrfs subvolume name %q in partitioning customizations", subvol.Name)
}
subvolnames[subvol.Name] = true
if err := validateMountpoint(subvol.Mountpoint); err != nil {
return fmt.Errorf("invalid btrfs subvolume customization: %w", err)
}
if mountpoints[subvol.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", subvol.Mountpoint)
}
if slices.Contains(plainOnlyMountpoints, subvol.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for btrfs subvolume", subvol.Mountpoint)
}
mountpoints[subvol.Mountpoint] = true
}
return nil
}
// CheckDiskMountpointsPolicy checks if the mountpoints under a [DiskCustomization] are allowed by the policy.
func CheckDiskMountpointsPolicy(partitioning *DiskCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
if partitioning == nil {
return nil
}
// collect all mountpoints
var mountpoints []string
for _, part := range partitioning.Partitions {
if part.Mountpoint != "" {
mountpoints = append(mountpoints, part.Mountpoint)
}
for _, lv := range part.LogicalVolumes {
if lv.Mountpoint != "" {
mountpoints = append(mountpoints, lv.Mountpoint)
}
}
for _, subvol := range part.Subvolumes {
mountpoints = append(mountpoints, subvol.Mountpoint)
}
}
var errs []error
for _, mp := range mountpoints {
if err := mountpointAllowList.Check(mp); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return fmt.Errorf("The following errors occurred while setting up custom mountpoints:\n%w", errors.Join(errs...))
}
return nil
}

View file

@ -4,7 +4,8 @@ import (
"encoding/json"
"fmt"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type FilesystemCustomization struct {
@ -37,7 +38,7 @@ func (fsc *FilesystemCustomization) UnmarshalTOML(data interface{}) error {
case int64:
size = uint64(d["size"].(int64))
case string:
s, err := common.DataSizeToUint64(d["size"].(string))
s, err := datasizes.Parse(d["size"].(string))
if err != nil {
return fmt.Errorf("TOML unmarshal: size is not valid filesystem size (%w)", err)
}
@ -52,7 +53,7 @@ func (fsc *FilesystemCustomization) UnmarshalTOML(data interface{}) error {
case int64:
minsize = uint64(d["minsize"].(int64))
case string:
s, err := common.DataSizeToUint64(d["minsize"].(string))
s, err := datasizes.Parse(d["minsize"].(string))
if err != nil {
return fmt.Errorf("TOML unmarshal: minsize is not valid filesystem size (%w)", err)
}
@ -104,7 +105,7 @@ func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error {
// Note that it uses different key than the TOML version
fsc.MinSize = uint64(d["minsize"].(float64))
case string:
size, err := common.DataSizeToUint64(d["minsize"].(string))
size, err := datasizes.Parse(d["minsize"].(string))
if err != nil {
return fmt.Errorf("JSON unmarshal: size is not valid filesystem size (%w)", err)
}
@ -115,3 +116,44 @@ func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error {
return nil
}
// decodeSize takes an integer or string representing a data size (with a data
// suffix) and returns the uint64 representation.
func decodeSize(size any) (uint64, error) {
switch s := size.(type) {
case string:
return datasizes.Parse(s)
case int64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(s), nil
case float64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
// TODO: emit warning of possible truncation?
return uint64(s), nil
case uint64:
return s, nil
default:
return 0, fmt.Errorf("failed to convert value \"%v\" to number", size)
}
}
// CheckMountpointsPolicy checks if the mountpoints are allowed by the policy
func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
invalidMountpoints := []string{}
for _, m := range mountpoints {
err := mountpointAllowList.Check(m.Mountpoint)
if err != nil {
invalidMountpoints = append(invalidMountpoints, m.Mountpoint)
}
}
if len(invalidMountpoints) > 0 {
return fmt.Errorf("The following custom mountpoints are not supported %+q", invalidMountpoints)
}
return nil
}

View file

@ -4,11 +4,15 @@ import (
"encoding/json"
"fmt"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/fsnode"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/pathpolicy"
)
// validateModeString checks that the given string is a valid mode octal number
@ -334,3 +338,138 @@ func FileCustomizationsToFsNodeFiles(files []FileCustomization) ([]*fsnode.File,
return fsFiles, nil
}
// ValidateDirFileCustomizations validates the given Directory and File customizations.
// If the customizations are invalid, an error is returned. Otherwise, nil is returned.
//
// It currently ensures that:
// - No file path is a prefix of another file or directory path
// - There are no duplicate file or directory paths in the customizations
func ValidateDirFileCustomizations(dirs []DirectoryCustomization, files []FileCustomization) error {
fsNodesMap := make(map[string]interface{}, len(dirs)+len(files))
nodesPaths := make([]string, 0, len(dirs)+len(files))
// First check for duplicate paths
duplicatePaths := make([]string, 0)
for _, dir := range dirs {
if _, ok := fsNodesMap[dir.Path]; ok {
duplicatePaths = append(duplicatePaths, dir.Path)
}
fsNodesMap[dir.Path] = dir
nodesPaths = append(nodesPaths, dir.Path)
}
for _, file := range files {
if _, ok := fsNodesMap[file.Path]; ok {
duplicatePaths = append(duplicatePaths, file.Path)
}
fsNodesMap[file.Path] = file
nodesPaths = append(nodesPaths, file.Path)
}
// There is no point in continuing if there are duplicate paths,
// since the fsNodesMap will not be valid.
if len(duplicatePaths) > 0 {
return fmt.Errorf("duplicate files / directory customization paths: %v", duplicatePaths)
}
invalidFSNodes := make([]string, 0)
checkedPaths := make(map[string]bool)
// Sort the paths so that we always check the longest paths first. This
// ensures that we don't check a parent path before we check the child
// path. Reverse sort the slice based on directory depth.
sort.Slice(nodesPaths, func(i, j int) bool {
return strings.Count(nodesPaths[i], "/") > strings.Count(nodesPaths[j], "/")
})
for _, nodePath := range nodesPaths {
// Skip paths that we have already checked
if checkedPaths[nodePath] {
continue
}
// Check all parent paths of the current path. If any of them have
// already been checked, then we do not need to check them again.
// This is because we always check the longest paths first. If a parent
// path exists in the filesystem nodes map and it is a File,
// then it is an error because it is a parent of a Directory or File.
// Parent paths can be only Directories.
parentPath := nodePath
for {
parentPath = path.Dir(parentPath)
// "." is returned only when the path is relative and we reached
// the root directory. This should never happen because File
// and Directory customization paths are validated as part of
// the unmarshalling process from JSON and TOML.
if parentPath == "." {
panic("filesystem node has relative path set.")
}
if parentPath == "/" {
break
}
if checkedPaths[parentPath] {
break
}
// If the node is not a Directory, then it is an error because
// it is a parent of a Directory or File.
if node, ok := fsNodesMap[parentPath]; ok {
switch node.(type) {
case DirectoryCustomization:
break
case FileCustomization:
invalidFSNodes = append(invalidFSNodes, nodePath)
default:
panic(fmt.Sprintf("unexpected filesystem node customization type: %T", node))
}
}
checkedPaths[parentPath] = true
}
checkedPaths[nodePath] = true
}
if len(invalidFSNodes) > 0 {
return fmt.Errorf("the following filesystem nodes are parents of another node and are not directories: %s", invalidFSNodes)
}
return nil
}
// CheckFileCustomizationsPolicy checks if the given File customizations are allowed by the path policy.
// If any of the customizations are not allowed by the path policy, an error is returned. Otherwise, nil is returned.
func CheckFileCustomizationsPolicy(files []FileCustomization, pathPolicy *pathpolicy.PathPolicies) error {
var invalidPaths []string
for _, file := range files {
if err := pathPolicy.Check(file.Path); err != nil {
invalidPaths = append(invalidPaths, file.Path)
}
}
if len(invalidPaths) > 0 {
return fmt.Errorf("the following custom files are not allowed: %+q", invalidPaths)
}
return nil
}
// CheckDirectoryCustomizationsPolicy checks if the given Directory customizations are allowed by the path policy.
// If any of the customizations are not allowed by the path policy, an error is returned. Otherwise, nil is returned.
func CheckDirectoryCustomizationsPolicy(dirs []DirectoryCustomization, pathPolicy *pathpolicy.PathPolicies) error {
var invalidPaths []string
for _, dir := range dirs {
if err := pathPolicy.Check(dir.Path); err != nil {
invalidPaths = append(invalidPaths, dir.Path)
}
}
if len(invalidPaths) > 0 {
return fmt.Errorf("the following custom directories are not allowed: %+q", invalidPaths)
}
return nil
}

View file

@ -5,6 +5,10 @@ import (
"net/url"
"regexp"
"strings"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/rpmmd"
)
type RepositoryCustomization struct {
@ -75,3 +79,76 @@ func (rc *RepositoryCustomization) getFilename() string {
}
return rc.Filename
}
func RepoCustomizationsInstallFromOnly(repos []RepositoryCustomization) []rpmmd.RepoConfig {
var res []rpmmd.RepoConfig
for _, repo := range repos {
if !repo.InstallFrom {
continue
}
res = append(res, repo.customRepoToRepoConfig())
}
return res
}
func RepoCustomizationsToRepoConfigAndGPGKeyFiles(repos []RepositoryCustomization) (map[string][]rpmmd.RepoConfig, []*fsnode.File, error) {
if len(repos) == 0 {
return nil, nil, nil
}
repoMap := make(map[string][]rpmmd.RepoConfig, len(repos))
var gpgKeyFiles []*fsnode.File
for _, repo := range repos {
filename := repo.getFilename()
convertedRepo := repo.customRepoToRepoConfig()
// convert any inline gpgkeys to fsnode.File and
// replace the gpgkey with the file path
for idx, gpgkey := range repo.GPGKeys {
if _, ok := url.ParseRequestURI(gpgkey); ok != nil {
// create the file path
path := fmt.Sprintf("/etc/pki/rpm-gpg/RPM-GPG-KEY-%s-%d", repo.Id, idx)
// replace the gpgkey with the file path
convertedRepo.GPGKeys[idx] = fmt.Sprintf("file://%s", path)
// create the fsnode for the gpgkey keyFile
keyFile, err := fsnode.NewFile(path, nil, nil, nil, []byte(gpgkey))
if err != nil {
return nil, nil, err
}
gpgKeyFiles = append(gpgKeyFiles, keyFile)
}
}
repoMap[filename] = append(repoMap[filename], convertedRepo)
}
return repoMap, gpgKeyFiles, nil
}
func (repo RepositoryCustomization) customRepoToRepoConfig() rpmmd.RepoConfig {
urls := make([]string, len(repo.BaseURLs))
copy(urls, repo.BaseURLs)
keys := make([]string, len(repo.GPGKeys))
copy(keys, repo.GPGKeys)
repoConfig := rpmmd.RepoConfig{
Id: repo.Id,
BaseURLs: urls,
GPGKeys: keys,
Name: repo.Name,
Metalink: repo.Metalink,
MirrorList: repo.Mirrorlist,
CheckGPG: repo.GPGCheck,
CheckRepoGPG: repo.RepoGPGCheck,
Priority: repo.Priority,
ModuleHotfixes: repo.ModuleHotfixes,
Enabled: repo.Enabled,
}
if repo.SSLVerify != nil {
repoConfig.IgnoreSSL = common.ToPtr(!*repo.SSLVerify)
}
return repoConfig
}

View file

@ -3,6 +3,7 @@ package blueprint
// Subscription Manager [rhsm] configuration
type SubManRHSMConfig struct {
ManageRepos *bool `json:"manage_repos,omitempty" toml:"manage_repos,omitempty"`
AutoEnableYumPlugins *bool `json:"auto_enable_yum_plugins,omitempty" toml:"auto_enable_yum_plugins,omitempty"`
}
// Subscription Manager [rhsmcertd] configuration

View file

@ -0,0 +1,24 @@
package blueprint
import (
"encoding/json"
"fmt"
)
// XXX: move to interal/common ?
func unmarshalTOMLviaJSON(u json.Unmarshaler, data any) error {
// This is the most efficient way to reuse code when unmarshaling
// structs in toml, it leaks json errors which is a bit sad but
// because the toml unmarshaler gives us not "[]byte" but an
// already pre-processed "any" we cannot just unmarshal into our
// "fooMarshaling" struct and reuse the result so we resort to
// this workaround (but toml will go away long term anyway).
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("error unmarshaling TOML data %v: %w", data, err)
}
if err := u.UnmarshalJSON(dataJSON); err != nil {
return fmt.Errorf("error decoding TOML %v: %w", data, err)
}
return nil
}

View file

@ -15,6 +15,7 @@ type ImageOptions struct {
BaseUrl string `json:"base_url"`
Insights bool `json:"insights"`
Rhc bool `json:"rhc"`
Proxy string `json:"proxy"`
}
type RHSMStatus string

View file

@ -11,10 +11,10 @@ import (
const DefaultBtrfsCompression = "zstd:1"
type Btrfs struct {
UUID string
Label string
Mountpoint string
Subvolumes []BtrfsSubvolume
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Label string `json:"label,omitempty" yaml:"label,omitempty"`
Mountpoint string `json:"mountpoint,omitempty" yaml:"mountpoint,omitempty"`
Subvolumes []BtrfsSubvolume `json:"subvolumes,omitempty" yaml:"subvolumes,omitempty"`
}
func init() {
@ -107,15 +107,15 @@ func (b *Btrfs) minSize(size uint64) uint64 {
}
type BtrfsSubvolume struct {
Name string
Size uint64
Mountpoint string
GroupID uint64
Compress string
ReadOnly bool
Name string `json:"name" yaml:"name"`
Size uint64 `json:"size" yaml:"size"`
Mountpoint string `json:"mountpoint,omitempty" yaml:"mountpoint,omitempty"`
GroupID uint64 `json:"group_id,omitempty" yaml:"group_id,omitempty"`
Compress string `json:"compress,omitempty" yaml:"compress,omitempty"`
ReadOnly bool `json:"read_only,omitempty" yaml:"read_only,omitempty"`
// UUID of the parent volume
UUID string
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
}
func (bs *BtrfsSubvolume) Clone() Entity {

View file

@ -24,11 +24,11 @@ import (
"io"
"math/rand"
"reflect"
"slices"
"strings"
"slices"
"github.com/google/uuid"
"github.com/osbuild/images/pkg/arch"
)
@ -248,6 +248,10 @@ func (t PartitionTableType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
func (t PartitionTableType) MarshalYAML() (interface{}, error) {
return t.String(), nil
}
func (t *PartitionTableType) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {

View file

@ -9,19 +9,19 @@ import (
// Filesystem related functions
type Filesystem struct {
Type string `json:"type"`
Type string `json:"type" yaml:"type"`
// ID of the filesystem, vfat doesn't use traditional UUIDs, therefore this
// is just a string.
UUID string `json:"uuid,omitempty"`
Label string `json:"label,omitempty"`
Mountpoint string `json:"mountpoint,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Label string `json:"label,omitempty" yaml:"label,omitempty"`
Mountpoint string `json:"mountpoint,omitempty" yaml:"mountpoint,omitempty"`
// The fourth field of fstab(5); fs_mntops
FSTabOptions string `json:"fstab_options,omitempty"`
FSTabOptions string `json:"fstab_options,omitempty" yaml:"fstab_options,omitempty"`
// The fifth field of fstab(5); fs_freq
FSTabFreq uint64 `json:"fstab_freq,omitempty"`
FSTabFreq uint64 `json:"fstab_freq,omitempty" yaml:"fstab_freq,omitempty"`
// The sixth field of fstab(5); fs_passno
FSTabPassNo uint64 `json:"fstab_passno,omitempty"`
FSTabPassNo uint64 `json:"fstab_passno,omitempty" yaml:"fstab_passno,omitempty"`
}
func init() {

View file

@ -1,6 +1,7 @@
package disk
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
@ -13,41 +14,41 @@ import (
// Argon2id defines parameters for the key derivation function for LUKS.
type Argon2id struct {
// Number of iterations to perform.
Iterations uint
Iterations uint `json:"iterations,omitempty" yaml:"iterations,omitempty"`
// Amount of memory to use (in KiB).
Memory uint
Memory uint `json:"memory,omitempty" yaml:"memory,omitempty"`
// Degree of parallelism (i.e. number of threads).
Parallelism uint
Parallelism uint `json:"parallelism,omitempty" yaml:"parallelism,omitempty"`
}
// ClevisBind defines parameters for binding a LUKS device with a given policy.
type ClevisBind struct {
Pin string
Policy string
Pin string `json:"pin,omitempty" yaml:"pin,omitempty"`
Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
// If enabled, the passphrase will be removed from the LUKS device at the
// end of the build (using the org.osbuild.luks2.remove-key stage).
RemovePassphrase bool
RemovePassphrase bool `json:"remove_passphrase,omitempty" yaml:"remove_passphrase,omitempty"`
}
// LUKSContainer represents a LUKS encrypted volume.
type LUKSContainer struct {
Passphrase string
UUID string
Cipher string
Label string
Subsystem string
SectorSize uint64
Passphrase string `json:"passphrase,omitempty" yaml:"passphrase,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Cipher string `json:"cipher,omitempty" yaml:"cipher,omitempty"`
Label string `json:"label,omitempty" yaml:"label,omitempty"`
Subsystem string `json:"subsystem,omitempty" yaml:"subsystem,omitempty"`
SectorSize uint64 `json:"sector_size,omitempty" yaml:"sector_size,omitempty"`
// The password-based key derivation function's parameters.
PBKDF Argon2id
PBKDF Argon2id `json:"pbkdf,omitempty" yaml:"pbkdf,omitempty"`
// Parameters for binding the LUKS device.
Clevis *ClevisBind
Clevis *ClevisBind `json:"clevis,omitempty" yaml:"clevis,omitempty"`
Payload Entity
Payload Entity `json:"payload,omitempty" yaml:"payload,omitempty"`
}
func init() {
@ -131,3 +132,24 @@ func (lc *LUKSContainer) minSize(size uint64) uint64 {
}
return minSize
}
func (lc *LUKSContainer) UnmarshalJSON(data []byte) (err error) {
// keep in sync with lvm.go,partition.go,luks.go
type alias LUKSContainer
var withoutPayload struct {
alias
Payload json.RawMessage `json:"payload" yaml:"payload"`
PayloadType string `json:"payload_type" yaml:"payload_type"`
}
if err := jsonUnmarshalStrict(data, &withoutPayload); err != nil {
return fmt.Errorf("cannot unmarshal %q: %w", data, err)
}
*lc = LUKSContainer(withoutPayload.alias)
lc.Payload, err = unmarshalJSONPayload(data)
return err
}
func (lc *LUKSContainer) UnmarshalYAML(unmarshal func(any) error) error {
return unmarshalYAMLviaJSON(lc, unmarshal)
}

View file

@ -1,6 +1,7 @@
package disk
import (
"encoding/json"
"fmt"
"reflect"
"strings"
@ -13,10 +14,10 @@ import (
const LVMDefaultExtentSize = 4 * datasizes.MebiByte
type LVMVolumeGroup struct {
Name string
Description string
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
LogicalVolumes []LVMLogicalVolume
LogicalVolumes []LVMLogicalVolume `json:"logical_volumes,omitempty" yaml:"logical_volumes,omitempty"`
}
func init() {
@ -174,10 +175,20 @@ func (vg *LVMVolumeGroup) minSize(size uint64) uint64 {
return vg.AlignUp(size)
}
func (vg *LVMVolumeGroup) UnmarshalJSON(data []byte) error {
type alias LVMVolumeGroup
var tmp alias
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*vg = LVMVolumeGroup(tmp)
return nil
}
type LVMLogicalVolume struct {
Name string
Size uint64
Payload Entity
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Size uint64 `json:"size,omitempty" yaml:"size,omitempty"`
Payload Entity `json:"payload,omitempty" yaml:"payload,omitempty"`
}
func (lv *LVMLogicalVolume) Clone() Entity {
@ -232,3 +243,24 @@ func lvname(path string) string {
path = strings.TrimLeft(path, "/")
return strings.ReplaceAll(path, "/", "_") + "lv"
}
func (lv *LVMLogicalVolume) UnmarshalJSON(data []byte) (err error) {
// keep in sync with lvm.go,partition.go,luks.go
type alias LVMLogicalVolume
var withoutPayload struct {
alias
Payload json.RawMessage `json:"payload" yaml:"payload"`
PayloadType string `json:"payload_type" yaml:"payload_type"`
}
if err := jsonUnmarshalStrict(data, &withoutPayload); err != nil {
return fmt.Errorf("cannot unmarshal %q: %w", data, err)
}
*lv = LVMLogicalVolume(withoutPayload.alias)
lv.Payload, err = unmarshalJSONPayload(data)
return err
}
func (lv *LVMLogicalVolume) UnmarshalYAML(unmarshal func(any) error) error {
return unmarshalYAMLviaJSON(lv, unmarshal)
}

View file

@ -1,28 +1,26 @@
package disk
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
type Partition struct {
// Start of the partition in bytes
Start uint64 `json:"start"`
Start uint64 `json:"start,omitempty" yaml:"start,omitempty"`
// Size of the partition in bytes
Size uint64 `json:"size"`
Size uint64 `json:"size" yaml:"size"`
// Partition type, e.g. 0x83 for MBR or a UUID for gpt
Type string `json:"type,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
// `Legacy BIOS bootable` (GPT) or `active` (DOS) flag
Bootable bool `json:"bootable,omitempty"`
Bootable bool `json:"bootable,omitempty" yaml:"bootable,omitempty"`
// ID of the partition, dos doesn't use traditional UUIDs, therefore this
// is just a string.
UUID string `json:"uuid,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
// If nil, the partition is raw; It doesn't contain a payload.
Payload PayloadEntity `json:"payload,omitempty"`
Payload PayloadEntity `json:"payload,omitempty" yaml:"payload,omitempty"`
}
func (p *Partition) Clone() Entity {
@ -105,14 +103,14 @@ func (p *Partition) IsPReP() bool {
func (p *Partition) MarshalJSON() ([]byte, error) {
type partAlias Partition
entityName := "no-payload"
var entityName string
if p.Payload != nil {
entityName = p.Payload.EntityName()
}
partWithPayloadType := struct {
partAlias
PayloadType string `json:"payload_type,omitempty"`
PayloadType string `json:"payload_type,omitempty" yaml:"payload_type,omitempty"`
}{
partAlias(*p),
entityName,
@ -121,36 +119,23 @@ func (p *Partition) MarshalJSON() ([]byte, error) {
return json.Marshal(partWithPayloadType)
}
func (p *Partition) UnmarshalJSON(data []byte) error {
type partAlias Partition
var partWithoutPayload struct {
partAlias
Payload json.RawMessage `json:"payload"`
PayloadType string `json:"payload_type,omitempty"`
func (p *Partition) UnmarshalJSON(data []byte) (err error) {
// keep in sync with lvm.go,partition.go,luks.go
type alias Partition
var withoutPayload struct {
alias
Payload json.RawMessage `json:"payload" yaml:"payload"`
PayloadType string `json:"payload_type" yaml:"payload_type"`
}
if err := jsonUnmarshalStrict(data, &withoutPayload); err != nil {
return fmt.Errorf("cannot unmarshal %q: %w", data, err)
}
*p = Partition(withoutPayload.alias)
dec := json.NewDecoder(bytes.NewBuffer(data))
if err := dec.Decode(&partWithoutPayload); err != nil {
return fmt.Errorf("cannot build partition from %q: %w", data, err)
}
*p = Partition(partWithoutPayload.partAlias)
// no payload, e.g. bios partiton
if partWithoutPayload.PayloadType == "no-payload" {
return nil
}
entType := payloadEntityMap[partWithoutPayload.PayloadType]
if entType == nil {
return fmt.Errorf("cannot build partition from %q: unknown payload %q", data, partWithoutPayload.PayloadType)
}
entValP := reflect.New(entType).Elem().Addr()
ent := entValP.Interface()
if err := json.Unmarshal(partWithoutPayload.Payload, &ent); err != nil {
p.Payload, err = unmarshalJSONPayload(data)
return err
}
p.Payload = ent.(PayloadEntity)
return nil
}
func (t *Partition) UnmarshalYAML(unmarshal func(any) error) error {
return unmarshalYAMLviaJSON(t, unmarshal)
}

View file

@ -15,19 +15,19 @@ import (
type PartitionTable struct {
// Size of the disk (in bytes).
Size uint64 `json:"size"`
Size uint64 `json:"size,omitempty" yaml:"size,omitempty"`
// Unique identifier of the partition table (GPT only).
UUID string `json:"uuid,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
// Partition table type, e.g. dos, gpt.
Type PartitionTableType `json:"type"`
Partitions []Partition `json:"partitions"`
Type PartitionTableType `json:"type" yaml:"type"`
Partitions []Partition `json:"partitions" yaml:"partitions"`
// Sector size in bytes
SectorSize uint64 `json:"sector_size,omitempty"`
SectorSize uint64 `json:"sector_size,omitempty" yaml:"sector_size,omitempty"`
// Extra space at the end of the partition table (sectors)
ExtraPadding uint64 `json:"extra_padding,omitempty"`
ExtraPadding uint64 `json:"extra_padding,omitempty" yaml:"extra_padding,omitempty"`
// Starting offset of the first partition in the table (Mb)
StartOffset uint64 `json:"start_offset,omitempty"`
StartOffset uint64 `json:"start_offset,omitempty" yaml:"start_offset,omitempty"`
}
type PartitioningMode string

View file

@ -1,8 +1,10 @@
package disk
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
// unmarshalYAMLviaJSON unmarshals via the JSON interface, this avoids code
@ -22,3 +24,35 @@ func unmarshalYAMLviaJSON(u json.Unmarshaler, unmarshal func(any) error) error {
}
return nil
}
func unmarshalJSONPayload(data []byte) (PayloadEntity, error) {
var payload struct {
Payload json.RawMessage `json:"payload"`
PayloadType string `json:"payload_type,omitempty"`
}
if err := json.Unmarshal(data, &payload); err != nil {
return nil, fmt.Errorf("cannot peek payload: %w", err)
}
if payload.PayloadType == "" {
if len(payload.Payload) > 0 {
return nil, fmt.Errorf("cannot build payload: empty payload type but payload is: %q", payload.Payload)
}
return nil, nil
}
entType := payloadEntityMap[payload.PayloadType]
if entType == nil {
return nil, fmt.Errorf("cannot build payload from %q: unknown payload type %q", data, payload.PayloadType)
}
entValP := reflect.New(entType).Elem().Addr()
ent := entValP.Interface()
if err := jsonUnmarshalStrict(payload.Payload, &ent); err != nil {
return nil, fmt.Errorf("cannot decode payload for %q: %w", data, err)
}
return ent.(PayloadEntity), nil
}
func jsonUnmarshalStrict(data []byte, v any) error {
dec := json.NewDecoder(bytes.NewBuffer(data))
dec.DisallowUnknownFields()
return dec.Decode(&v)
}

View file

@ -12,8 +12,269 @@
- "geolite2-country"
- "plymouth"
partitioning:
ids:
- &prep_partition_dosid "41"
- &filesystem_linux_dosid "83"
- &fat16_bdosid "06"
guids:
- &bios_boot_partition_guid "21686148-6449-6E6F-744E-656564454649"
- &efi_system_partition_guid "C12A7328-F81F-11D2-BA4B-00A0C93EC93B"
- &filesystem_data_guid "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
- &xboot_ldr_partition_guid "BC13C2FF-59E6-4262-A352-B275FD6F7172"
# static UUIDs for partitions and filesystems
# NOTE(akoutsou): These are unnecessary and have stuck around since the
# beginning where (I believe) the goal was to have predictable,
# reproducible partition tables. They might be removed soon in favour of
# proper, random UUIDs, with reproducibility being controlled by fixing
# rng seeds.
uuids:
- &bios_boot_partition_uuid "FAC7F1FB-3E8D-4137-A512-961DE09A5549"
- &root_partition_uuid "6264D520-3FB9-423F-8AB8-7A0A8E3D3562"
- &data_partition_uuid "CB07C243-BC44-4717-853E-28852021225B"
- &efi_system_partition_uuid "68B2905B-DF3E-4FB3-80FA-49D1E773AA33"
- &efi_filesystem_uuid "7B77-95E7"
default_partition_tables: &default_partition_tables
x86_64:
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
partitions:
- size: 1_048_576 # 1 MiB
bootable: true
type: *bios_boot_partition_guid
uuid: *bios_boot_partition_uuid
- &default_partition_table_part_efi
size: 209_715_200 # 200 MiB
type: *efi_system_partition_guid
uuid: *efi_system_partition_uuid
payload_type: "filesystem"
payload:
type: vfat
uuid: *efi_filesystem_uuid
mountpoint: "/boot/efi"
label: "EFI-SYSTEM"
fstab_options: "defaults,uid=0,gid=0,umask=077,shortname=winnt"
fstab_freq: 0
fstab_passno: 2
- &default_partition_table_part_boot
size: 524_288_000 # 500 * MiB
type: *filesystem_data_guid
uuid: *data_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
mountpoint: "/boot"
label: "boot"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
- &default_partition_table_part_root
size: 2_147_483_648 # 2 * datasizes.GibiByte,
type: *filesystem_data_guid
uuid: *root_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "root"
mountpoint: "/"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
aarch64: &default_partition_table_aarch64
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
partitions:
- *default_partition_table_part_efi
- *default_partition_table_part_boot
- *default_partition_table_part_root
ppc64le:
uuid: "0x14fc63d2"
type: "dos"
partitions:
- size: 4_194_304 # 4 MiB
bootable: true
type: *prep_partition_dosid
- &default_partition_table_part_boot_ppc64le
size: 524_288_000 # 500 * MiB
payload_type: "filesystem"
payload:
type: "ext4"
mountpoint: "/boot"
label: "boot"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
- &default_partition_table_part_root_ppc64le
size: 2_147_483_648 # 2 * datasizes.GibiByte,
payload_type: "filesystem"
payload:
type: "ext4"
mountpoint: "/"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
s390x:
uuid: "0x14fc63d2"
type: "dos"
partitions:
- *default_partition_table_part_boot_ppc64le
- <<: *default_partition_table_part_root_ppc64le
bootable: true
riscv64: *default_partition_table_aarch64
minimal_raw_partition_tables: &minimal_raw_partition_tables
x86_64:
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- *default_partition_table_part_efi
- &minimal_raw_partition_table_part_boot
<<: *default_partition_table_part_boot
size: 1_073_741_824 # 1 * datasizes.GibiByte,
type: *xboot_ldr_partition_guid
- &minimal_raw_partition_table_part_root
<<: *default_partition_table_part_root
aarch64: &minimal_raw_partition_table_aarch64
uuid: "0xc1748067"
type: "dos"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- <<: *default_partition_table_part_efi
bootable: true
type: *fat16_bdosid
uuid: ""
- <<: *minimal_raw_partition_table_part_boot
type: *filesystem_linux_dosid
uuid: ""
- <<: *default_partition_table_part_root
type: *filesystem_linux_dosid
uuid: ""
riscv64: *minimal_raw_partition_table_aarch64
iot_base_partition_tables: &iot_base_partition_tables
x86_64:
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- &iot_base_partition_table_part_efi
size: 525_336_576 # 501 * datasizes.MebiByte
type: *efi_system_partition_guid
uuid: *efi_system_partition_uuid
payload_type: "filesystem"
payload:
type: vfat
uuid: *efi_filesystem_uuid
mountpoint: "/boot/efi"
label: "EFI-SYSTEM"
fstab_options: "umask=0077,shortname=winnt"
fstab_freq: 0
fstab_passno: 2
- &iot_base_partition_table_part_boot
size: 1_073_741_824 # 1 * datasizes.GibiByte,
type: *filesystem_data_guid
uuid: *data_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "boot"
mountpoint: "/boot"
fstab_options: "defaults"
fstab_freq: 1
fstab_passno: 2
- &iot_base_partition_table_part_root
size: 2_693_791_744 # 2569 * datasizes.MebiByte,
type: *filesystem_data_guid
uuid: *root_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "root"
mountpoint: "/"
fstab_options: "defaults,ro"
fstab_freq: 1
fstab_passno: 1
aarch64: &iot_base_partition_table_aarch64
uuid: "0xc1748067"
type: "dos"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- <<: *iot_base_partition_table_part_efi
bootable: true
type: *fat16_bdosid
uuid: ""
- <<: *iot_base_partition_table_part_boot
type: *filesystem_linux_dosid
uuid: ""
- <<: *iot_base_partition_table_part_root
type: *filesystem_linux_dosid
uuid: ""
iot_simplified_installer_partition_tables: &iot_simplified_installer_partition_tables
x86_64: &iot_simplified_installer_partition_tables_x86
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
partitions:
- *iot_base_partition_table_part_efi
- size: 1_073_741_824 # 1 * datasizes.GibiByte,
type: *xboot_ldr_partition_guid
uuid: *data_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "boot"
mountpoint: "/boot"
fstab_options: "defaults"
fstab_freq: 1
fstab_passno: 1
- type: *filesystem_data_guid
uuid: *root_partition_uuid
payload_type: "luks"
payload:
label: "crypt_root"
cipher: "cipher_null"
passphrase: "osbuild"
pbkdf:
memory: 32
iterations: 4
parallelism: 1
clevis:
pin: "null"
policy: "{}"
remove_passphrase: true
payload_type: "lvm"
payload:
name: "rootvg"
description: "built with lvm2 and osbuild"
logical_volumes:
- size: 8_589_934_592 # 8 * datasizes.GibiByte,
name: "rootlv"
payload_type: "filesystem"
payload:
type: "ext4"
label: "root"
mountpoint: "/"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
aarch64:
<<: *iot_simplified_installer_partition_tables_x86
image_config:
default:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-fedora-ds.xml"
hostname: "localhost.localdomain"
install_weak_deps: true
locale: "C.UTF-8"
machine_id_uninitialized: true
timezone: "UTC"
image_types:
qcow2: &qcow2
partition_table:
<<: *default_partition_tables
package_sets:
- *cloud_base_pkgset
- include:
@ -23,12 +284,16 @@ image_types:
openstack: *qcow2
vhd:
partition_table:
<<: *default_partition_tables
package_sets:
- *cloud_base_pkgset
- include:
- "WALinuxAgent"
vmdk: &vmdk
partition_table:
<<: *default_partition_tables
package_sets:
- include:
- "@Fedora Cloud Server"
@ -172,6 +437,20 @@ image_types:
iot_container: *iot_commit
iot_raw_image:
partition_table:
<<: *iot_base_partition_tables
partition_table_override:
condition:
version_greater_or_equal:
"42":
- partition_index: 2
fstab_options: "defaults,ro"
iot_qcow2_image:
partition_table:
<<: *iot_base_partition_tables
iot_bootable_container:
package_sets:
- include:
@ -573,6 +852,8 @@ image_types:
- "fuse-libs"
minimal_raw: &minimal_raw
partition_table:
<<: *minimal_raw_partition_tables
package_sets:
- include:
- "@core"
@ -599,6 +880,8 @@ image_types:
minimal_raw_zst: *minimal_raw
iot_simplified_installer:
partition_table:
<<: *iot_simplified_installer_partition_tables
package_sets:
- *installer_pkgset
- include:

View file

@ -3,6 +3,7 @@ package defs
import (
"embed"
"errors"
"fmt"
"io/fs"
"os"
@ -15,38 +16,140 @@ import (
"gopkg.in/yaml.v3"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/experimentalflags"
"github.com/osbuild/images/pkg/rpmmd"
)
var (
ErrImageTypeNotFound = errors.New("image type not found")
ErrNoPartitionTableForImgType = errors.New("no partition table for image type")
ErrNoPartitionTableForArch = errors.New("no partition table for arch")
)
//go:embed */*.yaml
var data embed.FS
var DataFS fs.FS = data
type toplevelYAML struct {
ImageConfig imageConfig `yaml:"image_config,omitempty"`
ImageTypes map[string]imageType `yaml:"image_types"`
Common map[string]any `yaml:".common,omitempty"`
}
type imageConfig struct {
Default *distro.ImageConfig `yaml:"default"`
Condition *imageConfigConditions `yaml:"condition,omitempty"`
}
type imageConfigConditions struct {
DistroName map[string]*distro.ImageConfig `yaml:"distro_name,omitempty"`
}
type imageType struct {
PackageSets []packageSet `yaml:"package_sets"`
// archStr->partitionTable
PartitionTables map[string]*disk.PartitionTable `yaml:"partition_table"`
// override specific aspects of the partition table
PartitionTablesOverrides *partitionTablesOverrides `yaml:"partition_table_override"`
}
type packageSet struct {
Include []string `yaml:"include"`
Exclude []string `yaml:"exclude"`
Condition *conditions `yaml:"condition,omitempty"`
Condition *pkgSetConditions `yaml:"condition,omitempty"`
}
type conditions struct {
type pkgSetConditions struct {
Architecture map[string]packageSet `yaml:"architecture,omitempty"`
VersionLessThan map[string]packageSet `yaml:"version_less_than,omitempty"`
VersionGreaterOrEqual map[string]packageSet `yaml:"version_greater_or_equal,omitempty"`
DistroName map[string]packageSet `yaml:"distro_name,omitempty"`
}
type partitionTablesOverrides struct {
Conditional *partitionTablesOverwriteConditional `yaml:"condition"`
}
func (po *partitionTablesOverrides) Apply(it distro.ImageType, pt *disk.PartitionTable, replacements map[string]string) error {
if po == nil {
return nil
}
cond := po.Conditional
_, distroVersion := splitDistroNameVer(it.Arch().Distro().Name())
for gteqVer, geOverrides := range cond.VersionGreaterOrEqual {
if r, ok := replacements[gteqVer]; ok {
gteqVer = r
}
if common.VersionGreaterThanOrEqual(distroVersion, gteqVer) {
for _, overrideOp := range geOverrides {
if err := overrideOp.Apply(pt); err != nil {
return err
}
}
}
}
return nil
}
type partitionTablesOverwriteConditional struct {
VersionGreaterOrEqual map[string][]partitionTablesOverrideOp `yaml:"version_greater_or_equal,omitempty"`
}
type partitionTablesOverrideOp struct {
PartitionIndex int `yaml:"partition_index"`
Size uint64 `yaml:"size"`
FSTabOptions string `yaml:"fstab_options"`
}
func (op *partitionTablesOverrideOp) Apply(pt *disk.PartitionTable) error {
selectPart := op.PartitionIndex
if selectPart > len(pt.Partitions) {
return fmt.Errorf("override %q part %v outside of partitionTable %+v", op, selectPart, pt)
}
if op.Size > 0 {
pt.Partitions[selectPart].Size = op.Size
}
if op.FSTabOptions != "" {
part := pt.Partitions[selectPart]
fs, ok := part.Payload.(*disk.Filesystem)
if !ok {
return fmt.Errorf("override %q part %v for fstab_options expecting filesystem got %T", op, selectPart, part)
}
fs.FSTabOptions = op.FSTabOptions
}
return nil
}
// DistroImageConfig returns the distro wide ImageConfig.
//
// Each ImageType gets this as their default ImageConfig.
func DistroImageConfig(distroNameVer string) (*distro.ImageConfig, error) {
toplevel, err := load(distroNameVer)
if err != nil {
return nil, err
}
imgConfig := toplevel.ImageConfig.Default
cond := toplevel.ImageConfig.Condition
if cond != nil {
distroName, _ := splitDistroNameVer(distroNameVer)
// XXX: we shoudl probably use a similar pattern like
// for the partition table overrides (via
// findElementIndexByJSONTag) but this if fine for now
if distroNameCnf, ok := cond.DistroName[distroName]; ok {
imgConfig = distroNameCnf.InheritFrom(imgConfig)
}
}
return imgConfig, nil
}
// PackageSet loads the PackageSet from the yaml source file discovered via the
// imagetype. By default the imagetype name is used to load the packageset
// but with "overrideTypeName" this can be overriden (useful for e.g.
@ -62,61 +165,18 @@ func PackageSet(it distro.ImageType, overrideTypeName string, replacements map[s
archName := arch.Name()
distribution := arch.Distro()
distroNameVer := distribution.Name()
// we need to split from the right for "centos-stream-10" like
// distro names, sadly go has no rsplit() so we do it manually
// XXX: we cannot use distroidparser here because of import cycles
distroName := distroNameVer[:strings.LastIndex(distroNameVer, "-")]
distroVersion := strings.SplitN(distroNameVer, "-", 2)[1]
distroNameMajorVer := strings.SplitN(distroNameVer, ".", 2)[0]
// XXX: this is a short term measure, pass a set of
// searchPaths down the stack instead
var dataFS fs.FS = DataFS
if overrideDir := experimentalflags.String("yamldir"); overrideDir != "" {
logrus.Warnf("using experimental override dir %q", overrideDir)
dataFS = os.DirFS(overrideDir)
}
// XXX: this is only needed temporary until we have a "distros.yaml"
// that describes some high-level properties of each distro
// (like their yaml dirs)
var baseDir string
switch distroName {
case "rhel":
// rhel yaml files are under ./rhel-$majorVer
baseDir = distroNameMajorVer
case "centos":
// centos yaml is just rhel but we have (sadly) no symlinks
// in "go:embed" so we have to have this slightly ugly
// workaround
baseDir = fmt.Sprintf("rhel-%s", distroVersion)
case "fedora", "test-distro":
// our other distros just have a single yaml dir per distro
// and use condition.version_gt etc
baseDir = distroName
default:
return rpmmd.PackageSet{}, fmt.Errorf("unsupported distro in loader %q (add to loader.go)", distroName)
}
f, err := dataFS.Open(filepath.Join(baseDir, "distro.yaml"))
if err != nil {
return rpmmd.PackageSet{}, err
}
defer f.Close()
decoder := yaml.NewDecoder(f)
decoder.KnownFields(true)
distroName, distroVersion := splitDistroNameVer(distroNameVer)
// each imagetype can have multiple package sets, so that we can
// use yaml aliases/anchors to de-duplicate them
var toplevel toplevelYAML
if err := decoder.Decode(&toplevel); err != nil {
toplevel, err := load(distroNameVer)
if err != nil {
return rpmmd.PackageSet{}, err
}
imgType, ok := toplevel.ImageTypes[typeName]
if !ok {
return rpmmd.PackageSet{}, fmt.Errorf("unknown image type name %q", typeName)
return rpmmd.PackageSet{}, fmt.Errorf("%w: %q", ErrImageTypeNotFound, typeName)
}
var rpmmdPkgSet rpmmd.PackageSet
@ -172,3 +232,98 @@ func PackageSet(it distro.ImageType, overrideTypeName string, replacements map[s
return rpmmdPkgSet, nil
}
// PartitionTable returns the partionTable for the given distro/imgType.
func PartitionTable(it distro.ImageType, replacements map[string]string) (*disk.PartitionTable, error) {
distroNameVer := it.Arch().Distro().Name()
typeName := strings.ReplaceAll(it.Name(), "-", "_")
toplevel, err := load(distroNameVer)
if err != nil {
return nil, err
}
imgType, ok := toplevel.ImageTypes[typeName]
if !ok {
return nil, fmt.Errorf("%w: %q", ErrImageTypeNotFound, typeName)
}
if imgType.PartitionTables == nil {
return nil, fmt.Errorf("%w: %q", ErrNoPartitionTableForImgType, typeName)
}
arch := it.Arch()
archName := arch.Name()
pt, ok := imgType.PartitionTables[archName]
if !ok {
return nil, fmt.Errorf("%w (%q): %q", ErrNoPartitionTableForArch, typeName, archName)
}
if err := imgType.PartitionTablesOverrides.Apply(it, pt, replacements); err != nil {
return nil, err
}
return pt, nil
}
func splitDistroNameVer(distroNameVer string) (string, string) {
// we need to split from the right for "centos-stream-10" like
// distro names, sadly go has no rsplit() so we do it manually
// XXX: we cannot use distroidparser here because of import cycles
idx := strings.LastIndex(distroNameVer, "-")
return distroNameVer[:idx], distroNameVer[idx+1:]
}
func load(distroNameVer string) (*toplevelYAML, error) {
// we need to split from the right for "centos-stream-10" like
// distro names, sadly go has no rsplit() so we do it manually
// XXX: we cannot use distroidparser here because of import cycles
distroName, distroVersion := splitDistroNameVer(distroNameVer)
distroNameMajorVer := strings.SplitN(distroNameVer, ".", 2)[0]
// XXX: this is a short term measure, pass a set of
// searchPaths down the stack instead
var dataFS fs.FS = DataFS
if overrideDir := experimentalflags.String("yamldir"); overrideDir != "" {
logrus.Warnf("using experimental override dir %q", overrideDir)
dataFS = os.DirFS(overrideDir)
}
// XXX: this is only needed temporary until we have a "distros.yaml"
// that describes some high-level properties of each distro
// (like their yaml dirs)
var baseDir string
switch distroName {
case "rhel":
// rhel yaml files are under ./rhel-$majorVer
baseDir = distroNameMajorVer
case "centos":
// centos yaml is just rhel but we have (sadly) no symlinks
// in "go:embed" so we have to have this slightly ugly
// workaround
baseDir = fmt.Sprintf("rhel-%s", distroVersion)
case "fedora", "test-distro":
// our other distros just have a single yaml dir per distro
// and use condition.version_gt etc
baseDir = distroName
default:
return nil, fmt.Errorf("unsupported distro in loader %q (add to loader.go)", distroName)
}
f, err := dataFS.Open(filepath.Join(baseDir, "distro.yaml"))
if err != nil {
return nil, err
}
defer f.Close()
decoder := yaml.NewDecoder(f)
decoder.KnownFields(true)
// each imagetype can have multiple package sets, so that we can
// use yaml aliases/anchors to de-duplicate them
var toplevel toplevelYAML
if err := decoder.Decode(&toplevel); err != nil {
return nil, err
}
return &toplevel, nil
}

View file

@ -152,6 +152,23 @@
- "grub2-efi-aa64"
- "shim-aa64"
image_config:
default:
default_kernel: "kernel"
# XXX: this needs to be conditional for centos and rhel
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-rhel10-ds.xml"
install_weak_deps: true
locale: "C.UTF-8"
sysconfig:
networking: true
no_zero_conf: true
timezone: "UTC"
update_default_kernel: true
condition:
distro_name:
centos:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-cs10-ds.xml"
image_types:
# XXX: not a real pkgset but the "os" pipeline pkgset for image-installer
# find a nicer way to represent this

View file

@ -44,6 +44,23 @@
include:
- "insights-client"
image_config:
default:
timezone: "America/New_York"
locale: "en_US.UTF-8"
gpgkey_files:
- "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"
sysconfig:
networking: true
no_zero_conf: true
create_default_network_scripts: true
default_kernel: "kernel"
update_default_kernel: true
kernel_options_bootloader: true
# RHEL 7 grub does not support BLS
no_bls: true
install_weak_deps: true
image_types:
azure_rhui:
package_sets:

View file

@ -524,6 +524,23 @@
- "insights-client"
- "subscription-manager-cockpit"
image_config:
default:
default_kernel: "kernel"
# XXX: this needs to be conditional for centos and rhel
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-rhel8-ds.xml"
install_weak_deps: true
kernel_options_bootloader: true
locale: "en_US.UTF-8"
sysconfig:
networking: true
no_zero_conf: true
timezone: "America/New_York"
update_default_kernel: true
condition:
distro_name:
centos:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-centos8-ds.xml"
image_types:
# XXX: not a real pkgset but the "os" pipeline pkgset for image-installer

View file

@ -362,6 +362,22 @@
include:
- "dmidecode"
image_config:
default:
default_kernel: "kernel"
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-rhel9-ds.xml"
install_weak_deps: true
locale: "C.UTF-8"
sysconfig:
networking: true
no_zero_conf: true
timezone: "America/New_York"
update_default_kernel: true
condition:
distro_name:
centos:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-cs9-ds.xml"
image_types:
# XXX: not a real pkgset but the "os" pipeline pkgset for image-installer
# find a nicer way to represent this

View file

@ -102,6 +102,10 @@ type ImageType interface {
// has no partition table. Only support for RHEL 8.5+
PartitionType() disk.PartitionTableType
// Return the base partition tabe for the given image type, will
// return `nil` if there is none
BasePartitionTable() (*disk.PartitionTable, error)
// Returns the corresponding boot mode ("legacy", "uefi", "hybrid") or "none"
BootMode() platform.BootMode

View file

@ -242,7 +242,6 @@ func mkIotSimplifiedInstallerImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "xz", "coi-tree", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"},
basePartitionTables: iotSimplifiedInstallerPartitionTables,
kernelOptions: ostreeDeploymentKernelOptions(),
requiredPartitionSizes: requiredDirectorySizes,
}
@ -272,7 +271,6 @@ func mkIotRawImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: iotBasePartitionTables,
kernelOptions: ostreeDeploymentKernelOptions(),
// Passing an empty map into the required partition sizes disables the
@ -304,7 +302,6 @@ func mkIotQcow2ImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "qcow2"},
exports: []string{"qcow2"},
basePartitionTables: iotBasePartitionTables,
kernelOptions: ostreeDeploymentKernelOptions(),
requiredPartitionSizes: requiredDirectorySizes,
}
@ -329,7 +326,6 @@ func mkQcow2ImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
}
@ -362,7 +358,6 @@ func mkVmdkImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
}
@ -383,7 +378,6 @@ func mkOvaImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},
basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
}
@ -438,10 +432,8 @@ func mkWslImgType(d distribution) imageType {
ExcludeDocs: common.ToPtr(true),
Locale: common.ToPtr("C.UTF-8"),
Timezone: common.ToPtr("Etc/UTC"),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
},
image: containerImage,
@ -481,7 +473,6 @@ func mkMinimalRawImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: minimalrawPartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
if common.VersionGreaterThanOrEqual(d.osVersion, "43") {
@ -508,16 +499,6 @@ type distribution struct {
defaultImageConfig *distro.ImageConfig
}
// Fedora based OS image configuration defaults
var defaultDistroImageConfig = &distro.ImageConfig{
Hostname: common.ToPtr("localhost.localdomain"),
Timezone: common.ToPtr("UTC"),
Locale: common.ToPtr("C.UTF-8"),
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultFedoraDatastream()),
InstallWeakDeps: common.ToPtr(true),
MachineIdUninitialized: common.ToPtr(true),
}
func defaultDistroInstallerConfig(d *distribution) *distro.InstallerConfig {
config := distro.InstallerConfig{}
// In Fedora 42 the ifcfg module was replaced by net-lib.
@ -543,15 +524,16 @@ func getDistro(version int) distribution {
if version < 0 {
panic("Invalid Fedora version (must be positive)")
}
nameVer := fmt.Sprintf("fedora-%d", version)
return distribution{
name: fmt.Sprintf("fedora-%d", version),
name: nameVer,
product: "Fedora",
osVersion: strconv.Itoa(version),
releaseVersion: strconv.Itoa(version),
modulePlatformID: fmt.Sprintf("platform:f%d", version),
ostreeRefTmpl: fmt.Sprintf("fedora/%d/%%s/iot", version),
runner: &runner.Fedora{Version: uint64(version)},
defaultImageConfig: defaultDistroImageConfig,
defaultImageConfig: common.Must(defs.DistroImageConfig(nameVer)),
}
}

View file

@ -210,7 +210,7 @@ func osCustomizations(
osc.ShellInit = imageConfig.ShellInit
osc.Grub2Config = imageConfig.Grub2Config
osc.Sysconfig = imageConfig.Sysconfig
osc.Sysconfig = imageConfig.SysconfigStageOptions()
osc.SystemdLogind = imageConfig.SystemdLogind
osc.CloudInit = imageConfig.CloudInit
osc.Modprobe = imageConfig.Modprobe
@ -226,7 +226,7 @@ func osCustomizations(
osc.SshdConfig = imageConfig.SshdConfig
osc.AuthConfig = imageConfig.Authconfig
osc.PwQuality = imageConfig.PwQuality
osc.WSLConfig = imageConfig.WSLConfig
osc.WSLConfig = imageConfig.WSLConfStageOptions()
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)

View file

@ -1,6 +1,7 @@
package fedora
import (
"errors"
"fmt"
"math/rand"
"strings"
@ -16,6 +17,7 @@ import (
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/experimentalflags"
"github.com/osbuild/images/pkg/image"
"github.com/osbuild/images/pkg/manifest"
@ -57,8 +59,6 @@ type imageType struct {
rpmOstree bool
// bootable image
bootable bool
// List of valid arches for the image type
basePartitionTables distro.BasePartitionTableMap
requiredPartitionSizes map[string]uint64
}
@ -139,14 +139,18 @@ func (t *imageType) BootMode() platform.BootMode {
return platform.BOOT_NONE
}
func (t *imageType) BasePartitionTable() (*disk.PartitionTable, error) {
return defs.PartitionTable(t, VersionReplacements())
}
func (t *imageType) getPartitionTable(
customizations *blueprint.Customizations,
options distro.ImageOptions,
rng *rand.Rand,
) (*disk.PartitionTable, error) {
basePartitionTable, exists := t.basePartitionTables[t.arch.Name()]
if !exists {
return nil, fmt.Errorf("unknown arch for partition table: %s", t.arch.Name())
basePartitionTable, err := t.BasePartitionTable()
if err != nil {
return nil, err
}
imageSize := t.Size(options.Size)
@ -185,7 +189,7 @@ func (t *imageType) getPartitionTable(
}
mountpoints := customizations.GetFilesystems()
return disk.NewPartitionTable(&basePartitionTable, mountpoints, imageSize, partitioningMode, t.platform.GetArch(), t.requiredPartitionSizes, rng)
return disk.NewPartitionTable(basePartitionTable, mountpoints, imageSize, partitioningMode, t.platform.GetArch(), t.requiredPartitionSizes, rng)
}
func (t *imageType) getDefaultImageConfig() *distro.ImageConfig {
@ -207,10 +211,13 @@ func (t *imageType) getDefaultInstallerConfig() (*distro.InstallerConfig, error)
}
func (t *imageType) PartitionType() disk.PartitionTableType {
basePartitionTable, exists := t.basePartitionTables[t.arch.Name()]
if !exists {
basePartitionTable, err := t.BasePartitionTable()
if errors.Is(err, defs.ErrNoPartitionTableForImgType) {
return disk.PT_NONE
}
if err != nil {
panic(err)
}
return basePartitionTable.Type
}

View file

@ -1,594 +0,0 @@
package fedora
import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
)
var defaultBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 1 * datasizes.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_PPC64LE.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: disk.PT_DOS,
Partitions: []disk.Partition{
{
Size: 4 * datasizes.MebiByte,
Type: disk.PRepPartitionDOSID,
Bootable: true,
},
{
Size: 500 * datasizes.MebiByte,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_S390X.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: disk.PT_DOS,
Partitions: []disk.Partition{
{
Size: 500 * datasizes.MebiByte,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Bootable: true,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_RISCV64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
var minimalrawPartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: disk.PT_DOS,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_RISCV64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: disk.PT_DOS,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
var iotBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 2,
},
},
{
Size: 2569 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults,ro",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: disk.PT_DOS,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 2,
},
},
{
Size: 2569 * datasizes.MebiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults,ro",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
},
},
}
var iotSimplifiedInstallerPartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
{
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.LUKSContainer{
Label: "crypt_root",
Cipher: "cipher_null",
Passphrase: "osbuild",
PBKDF: disk.Argon2id{
Memory: 32,
Iterations: 4,
Parallelism: 1,
},
Clevis: &disk.ClevisBind{
Pin: "null",
Policy: "{}",
RemovePassphrase: true,
},
Payload: &disk.LVMVolumeGroup{
Name: "rootvg",
Description: "built with lvm2 and osbuild",
LogicalVolumes: []disk.LVMLogicalVolume{
{
Size: 8 * datasizes.GibiByte,
Name: "rootlv",
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
{
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.LUKSContainer{
Label: "crypt_root",
Cipher: "cipher_null",
Passphrase: "osbuild",
PBKDF: disk.Argon2id{
Memory: 32,
Iterations: 4,
Parallelism: 1,
},
Clevis: &disk.ClevisBind{
Pin: "null",
Policy: "{}",
RemovePassphrase: true,
},
Payload: &disk.LVMVolumeGroup{
Name: "rootvg",
Description: "built with lvm2 and osbuild",
LogicalVolumes: []disk.LVMLogicalVolume{
{
Size: 8 * datasizes.GibiByte,
Name: "rootlv",
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
},
},
},
},
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"reflect"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/customizations/shell"
"github.com/osbuild/images/pkg/customizations/subscription"
@ -12,19 +13,22 @@ import (
// ImageConfig represents a (default) configuration applied to the image payload.
type ImageConfig struct {
Hostname *string
Timezone *string
Hostname *string `yaml:"hostname,omitempty"`
Timezone *string `yaml:"timezone,omitempty"`
TimeSynchronization *osbuild.ChronyStageOptions
Locale *string
Locale *string `yaml:"locale,omitempty"`
Keyboard *osbuild.KeymapStageOptions
EnabledServices []string
DisabledServices []string
MaskedServices []string
DefaultTarget *string
Sysconfig []*osbuild.SysconfigStageOptions
Sysconfig *Sysconfig `yaml:"sysconfig,omitempty"`
DefaultKernel *string `yaml:"default_kernel,omitempty"`
UpdateDefaultKernel *bool `yaml:"update_default_kernel,omitempty"`
// List of files from which to import GPG keys into the RPM database
GPGKeyFiles []string
GPGKeyFiles []string `yaml:"gpgkey_files,omitempty"`
// Disable SELinux labelling
NoSElinux *bool
@ -64,7 +68,8 @@ type ImageConfig struct {
Firewall *osbuild.FirewallStageOptions
UdevRules *osbuild.UdevRulesStageOptions
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
WSLConfig *osbuild.WSLConfStageOptions
WSLConfig *WSLConfig
Files []*fsnode.File
Directories []*fsnode.Directory
@ -75,15 +80,15 @@ type ImageConfig struct {
//
// This should only be used for old distros that use grub and it is
// applied on all architectures, except for s390x.
KernelOptionsBootloader *bool
KernelOptionsBootloader *bool `yaml:"kernel_options_bootloader,omitempty"`
// The default OSCAP datastream to use for the image as a fallback,
// if no datastream value is provided by the user.
DefaultOSCAPDatastream *string
DefaultOSCAPDatastream *string `yaml:"default_oscap_datastream,omitempty"`
// NoBLS configures the image bootloader with traditional menu entries
// instead of BLS. Required for legacy systems like RHEL 7.
NoBLS *bool
NoBLS *bool `yaml:"no_bls,omitempty"`
// OSTree specific configuration
@ -98,18 +103,22 @@ type ImageConfig struct {
// InstallWeakDeps enables installation of weak dependencies for packages
// that are statically defined for the pipeline.
InstallWeakDeps *bool
InstallWeakDeps *bool `yaml:"install_weak_deps,omitempty"`
// How to handle the /etc/machine-id file, when set to true it causes the
// machine id to be set to 'uninitialized' which causes ConditionFirstboot
// to be triggered in systemd
MachineIdUninitialized *bool
MachineIdUninitialized *bool `yaml:"machine_id_uninitialized,omitempty"`
// MountUnits creates systemd .mount units to describe the filesystem
// instead of writing to /etc/fstab
MountUnits *bool
}
type WSLConfig struct {
BootSystemd bool
}
// InheritFrom inherits unset values from the provided parent configuration and
// returns a new structure instance, which is a result of the inheritance.
func (c *ImageConfig) InheritFrom(parentConfig *ImageConfig) *ImageConfig {
@ -134,3 +143,76 @@ func (c *ImageConfig) InheritFrom(parentConfig *ImageConfig) *ImageConfig {
}
return &finalConfig
}
func (c *ImageConfig) WSLConfStageOptions() *osbuild.WSLConfStageOptions {
if c.WSLConfig == nil {
return nil
}
return &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: c.WSLConfig.BootSystemd,
},
}
}
type Sysconfig struct {
Networking bool `yaml:"networking,omitempty"`
NoZeroConf bool `yaml:"no_zero_conf,omitempty"`
CreateDefaultNetworkScripts bool `yaml:"create_default_network_scripts,omitempty"`
}
func (c *ImageConfig) SysconfigStageOptions() []*osbuild.SysconfigStageOptions {
var opts *osbuild.SysconfigStageOptions
if c.DefaultKernel != nil {
if opts == nil {
opts = &osbuild.SysconfigStageOptions{}
}
if opts.Kernel == nil {
opts.Kernel = &osbuild.SysconfigKernelOptions{}
}
opts.Kernel.DefaultKernel = *c.DefaultKernel
}
if c.UpdateDefaultKernel != nil {
if opts == nil {
opts = &osbuild.SysconfigStageOptions{}
}
if opts.Kernel == nil {
opts.Kernel = &osbuild.SysconfigKernelOptions{}
}
opts.Kernel.UpdateDefault = *c.UpdateDefaultKernel
}
if c.Sysconfig != nil {
if c.Sysconfig.Networking {
if opts == nil {
opts = &osbuild.SysconfigStageOptions{}
}
if opts.Network == nil {
opts.Network = &osbuild.SysconfigNetworkOptions{}
}
opts.Network.Networking = c.Sysconfig.Networking
opts.Network.NoZeroConf = c.Sysconfig.NoZeroConf
if c.Sysconfig.CreateDefaultNetworkScripts {
opts.NetworkScripts = &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
}
}
}
}
if opts == nil {
return nil
}
return []*osbuild.SysconfigStageOptions{opts}
}

View file

@ -228,6 +228,9 @@ func osCustomizations(
var subscriptionStatus subscription.RHSMStatus
if options.Subscription != nil {
subscriptionStatus = subscription.RHSMConfigWithSubscription
if options.Subscription.Proxy != "" {
osc.InsightsClientConfig = &osbuild.InsightsClientConfigStageOptions{Proxy: options.Subscription.Proxy}
}
} else {
subscriptionStatus = subscription.RHSMConfigNoSubscription
}
@ -241,7 +244,7 @@ func osCustomizations(
osc.ShellInit = imageConfig.ShellInit
osc.Grub2Config = imageConfig.Grub2Config
osc.Sysconfig = imageConfig.Sysconfig
osc.Sysconfig = imageConfig.SysconfigStageOptions()
osc.SystemdLogind = imageConfig.SystemdLogind
osc.CloudInit = imageConfig.CloudInit
osc.Modprobe = imageConfig.Modprobe
@ -263,7 +266,7 @@ func osCustomizations(
osc.WAAgentConfig = imageConfig.WAAgentConfig
osc.UdevRules = imageConfig.UdevRules
osc.GCPGuestAgentConfig = imageConfig.GCPGuestAgentConfig
osc.WSLConfig = imageConfig.WSLConfig
osc.WSLConfig = imageConfig.WSLConfStageOptions()
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)

View file

@ -187,6 +187,21 @@ func (t *ImageType) BootMode() platform.BootMode {
return platform.BOOT_NONE
}
func (t *ImageType) BasePartitionTable() (*disk.PartitionTable, error) {
// XXX: simplify once https://github.com/osbuild/images/pull/1372
// (or something similar) went in, see pkg/distro/fedora, once
// the yaml based loading is in we can drop from ImageType
// "BasePartitionTables BasePartitionTableFunc"
if t.BasePartitionTables == nil {
return nil, nil
}
basePartitionTable, exists := t.BasePartitionTables(t)
if !exists {
return nil, nil
}
return &basePartitionTable, nil
}
func (t *ImageType) GetPartitionTable(
customizations *blueprint.Customizations,
options distro.ImageOptions,

View file

@ -204,18 +204,12 @@ func defaultEc2ImageConfig() *distro.ImageConfig {
"tuned",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
},
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{
Filename: "00-getty-fixes.conf",

View file

@ -321,18 +321,12 @@ func defaultAzureImageConfig(rd *rhel.Distribution) *distro.ImageConfig {
Layouts: []string{"us"},
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
},
},
EnabledServices: []string{
"firewalld",
"nm-cloud-setup.service",

View file

@ -8,8 +8,8 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -50,24 +50,7 @@ func distroISOLabelFunc(t *rhel.ImageType) string {
}
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("UTC"),
Locale: common.ToPtr("C.UTF-8"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultRHEL10Datastream(d.IsRHEL())),
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func newDistro(name string, major, minor int) *rhel.Distribution {

View file

@ -108,14 +108,14 @@ func baseGCEImageConfig() *distro.ImageConfig {
PermitRootLogin: osbuild.PermitRootLoginValueNo,
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
DefaultKernel: "kernel-core",
UpdateDefault: true,
},
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
// XXX: ensure the "old" behavior is preserved (that is
// likely a bug) where for GCE the sysconfig network
// options are not set because the merge of imageConfig
// is shallow and the previous setup was changing the
// kernel without also changing the network options.
Sysconfig: &distro.Sysconfig{},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-floppy.conf",

View file

@ -37,10 +37,8 @@ func mkWSLImgType() *rhel.ImageType {
},
},
NoSElinux: common.ToPtr(true),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
}

View file

@ -109,30 +109,12 @@ func ec2ImageConfig() *distro.ImageConfig {
"rsyslog",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
CreateDefaultNetworkScripts: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -49,18 +49,13 @@ var azureDefaultImgConfig = &distro.ImageConfig{
},
SELinuxForceRelabel: common.ToPtr(true),
Authconfig: &osbuild.AuthconfigStageOptions{},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
},
},
EnabledServices: []string{
"cloud-config",
"cloud-final",

View file

@ -6,35 +6,14 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
// RHEL-based OS image configuration defaults
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("en_US.UTF-8"),
GPGKeyFiles: []string{
"/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
KernelOptionsBootloader: common.ToPtr(true),
NoBLS: common.ToPtr(true), // RHEL 7 grub does not support BLS
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func newDistro(name string, minor int) *rhel.Distribution {

View file

@ -38,30 +38,12 @@ func mkQcow2ImgType() *rhel.ImageType {
var qcow2DefaultImgConfig = &distro.ImageConfig{
DefaultTarget: common.ToPtr("multi-user.target"),
SELinuxForceRelabel: common.ToPtr(true),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
CreateDefaultNetworkScripts: true,
},
RHSMConfig: map[subscription.RHSMStatus]*subscription.RHSMConfig{
subscription.RHSMConfigNoSubscription: {

View file

@ -198,30 +198,12 @@ func baseEc2ImageConfig() *distro.ImageConfig {
"reboot.target",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
CreateDefaultNetworkScripts: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -370,18 +370,12 @@ var defaultAzureImageConfig = &distro.ImageConfig{
Layouts: []string{"us"},
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
DefaultKernel: common.ToPtr("kernel-core"),
UpdateDefaultKernel: common.ToPtr(true),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
},
},
EnabledServices: []string{
"nm-cloud-setup.service",
"nm-cloud-setup.timer",

View file

@ -8,8 +8,8 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -37,25 +37,7 @@ var (
// RHEL-based OS image configuration defaults
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("en_US.UTF-8"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
KernelOptionsBootloader: common.ToPtr(true),
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultRHEL8Datastream(d.IsRHEL())),
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func distroISOLabelFunc(t *rhel.ImageType) string {

View file

@ -129,14 +129,14 @@ func defaultGceByosImageConfig(rd distro.Distro) *distro.ImageConfig {
PermitRootLogin: osbuild.PermitRootLoginValueNo,
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
DefaultKernel: "kernel-core",
UpdateDefault: true,
},
},
},
DefaultKernel: common.ToPtr("kernel-core"),
UpdateDefaultKernel: common.ToPtr(true),
// XXX: ensure the "old" behavior is preserved (that is
// likely a bug) where for GCE the sysconfig network
// options are not set because the merge of imageConfig
// is shallow and the previous setup was changing the
// kernel without also changing the network options.
Sysconfig: &distro.Sysconfig{},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-floppy.conf",

View file

@ -4,7 +4,6 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
)
func mkWslImgType() *rhel.ImageType {
@ -24,10 +23,8 @@ func mkWslImgType() *rhel.ImageType {
it.DefaultImageConfig = &distro.ImageConfig{
Locale: common.ToPtr("en_US.UTF-8"),
NoSElinux: common.ToPtr(true),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
}

View file

@ -51,30 +51,13 @@ func defaultEc2ImageConfig() *distro.ImageConfig {
"tuned",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
CreateDefaultNetworkScripts: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -334,18 +334,12 @@ func defaultAzureImageConfig(rd *rhel.Distribution) *distro.ImageConfig {
Layouts: []string{"us"},
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
},
},
EnabledServices: []string{
"firewalld",
"nm-cloud-setup.service",

View file

@ -8,8 +8,8 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -53,24 +53,7 @@ func distroISOLabelFunc(t *rhel.ImageType) string {
}
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("C.UTF-8"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultRHEL9Datastream(d.IsRHEL())),
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func newDistro(name string, major, minor int) *rhel.Distribution {

View file

@ -105,14 +105,14 @@ func baseGCEImageConfig() *distro.ImageConfig {
PermitRootLogin: osbuild.PermitRootLoginValueNo,
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
DefaultKernel: "kernel-core",
UpdateDefault: true,
},
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
// XXX: ensure the "old" behavior is preserved (that is
// likely a bug) where for GCE the sysconfig network
// options are not set because the merge of imageConfig
// is shallow and the previous setup was changing the
// kernel without also changing the network options.
Sysconfig: &distro.Sysconfig{},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-floppy.conf",

View file

@ -38,10 +38,8 @@ func mkWSLImgType() *rhel.ImageType {
},
Locale: common.ToPtr("en_US.UTF-8"),
NoSElinux: common.ToPtr(true),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
}

View file

@ -212,6 +212,10 @@ func (t *TestImageType) PartitionType() disk.PartitionTableType {
return disk.PT_NONE
}
func (t *TestImageType) BasePartitionTable() (*disk.PartitionTable, error) {
return nil, nil
}
func (t *TestImageType) BootMode() platform.BootMode {
return platform.BOOT_HYBRID
}

View file

@ -125,6 +125,7 @@ type OSCustomizations struct {
WAAgentConfig *osbuild.WAAgentConfStageOptions
UdevRules *osbuild.UdevRulesStageOptions
WSLConfig *osbuild.WSLConfStageOptions
InsightsClientConfig *osbuild.InsightsClientConfigStageOptions
LeapSecTZ *string
Presets []osbuild.Preset
ContainersStorage *string
@ -634,7 +635,11 @@ func (p *OS) serialize() osbuild.Pipeline {
}
if p.SshdConfig != nil {
pipeline.AddStage((osbuild.NewSshdConfigStage(p.SshdConfig)))
pipeline.AddStage(osbuild.NewSshdConfigStage(p.SshdConfig))
}
if p.InsightsClientConfig != nil {
pipeline.AddStage(osbuild.NewInsightsClientConfigStage(p.InsightsClientConfig))
}
if p.AuthConfig != nil {

View file

@ -0,0 +1,15 @@
package osbuild
type InsightsClientConfigStageOptions struct {
Proxy string `json:"proxy,omitempty"`
Path string `json:"path,omitempty"`
}
func (InsightsClientConfigStageOptions) isStageOptions() {}
func NewInsightsClientConfigStage(options *InsightsClientConfigStageOptions) *Stage {
return &Stage{
Type: "org.osbuild.insights-client.config",
Options: options,
}
}

View file

@ -1,11 +1,11 @@
package osbuild
type SysconfigStageOptions struct {
Kernel *SysconfigKernelOptions `json:"kernel,omitempty"`
Network *SysconfigNetworkOptions `json:"network,omitempty"`
NetworkScripts *NetworkScriptsOptions `json:"network-scripts,omitempty"`
Desktop *SysconfigDesktopOptions `json:"desktop,omitempty"`
LiveSys *SysconfigLivesysOptions `json:"livesys,omitempty"`
Kernel *SysconfigKernelOptions `json:"kernel,omitempty" yaml:"kernel,omitempty"`
Network *SysconfigNetworkOptions `json:"network,omitempty" yaml:"network,omitempty"`
NetworkScripts *NetworkScriptsOptions `json:"network-scripts,omitempty" yaml:"network-scripts,omitempty"`
Desktop *SysconfigDesktopOptions `json:"desktop,omitempty" yaml:"desktop,omitempty"`
LiveSys *SysconfigLivesysOptions `json:"livesys,omitempty" yaml:"libesys,omitempty"`
}
func (SysconfigStageOptions) isStageOptions() {}
@ -19,12 +19,14 @@ func NewSysconfigStage(options *SysconfigStageOptions) *Stage {
type SysconfigNetworkOptions struct {
Networking bool `json:"networking,omitempty"`
NoZeroConf bool `json:"no_zero_conf,omitempty"`
// XXX: ideally this would be no_zeroconf" (because zeroconf
// is the program name) but we need to keep for compatibility
NoZeroConf bool `json:"no_zero_conf,omitempty" yaml:"no_zero_conf,omitempty"`
}
type SysconfigKernelOptions struct {
UpdateDefault bool `json:"update_default,omitempty"`
DefaultKernel string `json:"default_kernel,omitempty"`
UpdateDefault bool `json:"update_default,omitempty" yaml:"update_default,omitempty"`
DefaultKernel string `json:"default_kernel,omitempty" yaml:"default_kernel,omitempty"`
}
type SysconfigDesktopOptions struct {

View file

@ -244,6 +244,7 @@ func GenSystemdMountStages(pt *disk.PartitionTable) ([]*Stage, error) {
}
options := &SystemdUnitCreateStageOptions{
UnitPath: EtcUnitPath, // create all mount units in /etc/systemd/
Config: SystemdUnit{
Unit: &UnitSection{
// Adds the following dependencies for mount units (systemd.mount(5)):

8
vendor/modules.txt vendored
View file

@ -173,7 +173,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version
github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
# github.com/BurntSushi/toml v1.4.0
# github.com/BurntSushi/toml v1.5.1-0.20250403130103-3d3abc24416a
## explicit; go 1.18
github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
@ -1045,7 +1045,11 @@ github.com/oracle/oci-go-sdk/v54/identity
github.com/oracle/oci-go-sdk/v54/objectstorage
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
github.com/oracle/oci-go-sdk/v54/workrequests
# github.com/osbuild/images v0.128.0
# github.com/osbuild/blueprint v1.6.0
## explicit; go 1.22.8
github.com/osbuild/blueprint/internal/common
github.com/osbuild/blueprint/pkg/blueprint
# github.com/osbuild/images v0.131.0
## explicit; go 1.22.8
github.com/osbuild/images/data/dependencies
github.com/osbuild/images/data/repositories