deps: update images to 0.102

This commit is contained in:
Lukas Zapletal 2024-11-27 08:47:58 +01:00 committed by Tomáš Hozza
parent 2e1afdc829
commit 6310115629
20 changed files with 1425 additions and 249 deletions

2
go.mod
View file

@ -46,7 +46,7 @@ require (
github.com/labstack/gommon v0.4.2 github.com/labstack/gommon v0.4.2
github.com/openshift-online/ocm-sdk-go v0.1.438 github.com/openshift-online/ocm-sdk-go v0.1.438
github.com/oracle/oci-go-sdk/v54 v54.0.0 github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.99.0 github.com/osbuild/images v0.102.0
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d
github.com/osbuild/pulp-client v0.1.0 github.com/osbuild/pulp-client v0.1.0
github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_golang v1.20.2

4
go.sum
View file

@ -534,8 +534,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.438 h1:tsLCCUzbLCTL4RZG02y9RuopmGCXp
github.com/openshift-online/ocm-sdk-go v0.1.438/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y= github.com/openshift-online/ocm-sdk-go v0.1.438/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4= github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.99.0 h1:+L1Di9oP8bK0faYM/Zb2VmxYfFHJq4XWU4KH36e7wkY= github.com/osbuild/images v0.102.0 h1:RQuxZM2w/afCa+Q8mrEG9S60Zbi4j9aSFoFUKFo/Tkk=
github.com/osbuild/images v0.99.0/go.mod h1:4bNmMQOVadIKVC1q8zsLO8tdEQFH90zIp+MQBQUnCiE= github.com/osbuild/images v0.102.0/go.mod h1:4bNmMQOVadIKVC1q8zsLO8tdEQFH90zIp+MQBQUnCiE=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d h1:r9BFPDv0uuA9k1947Jybcxs36c/pTywWS1gjeizvtcQ= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d h1:r9BFPDv0uuA9k1947Jybcxs36c/pTywWS1gjeizvtcQ=
github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d/go.mod h1:zR1iu/hOuf+OQNJlk70tju9IqzzM4ycq0ectkFBm94U= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20240814102216-0239db53236d/go.mod h1:zR1iu/hOuf+OQNJlk70tju9IqzzM4ycq0ectkFBm94U=
github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8= github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8=

View file

@ -0,0 +1,5 @@
package blueprint
type CACustomization struct {
PEMCerts []string `json:"pem_certs,omitempty" toml:"pem_certs,omitempty"`
}

View file

@ -6,6 +6,7 @@ import (
"slices" "slices"
"strings" "strings"
"github.com/osbuild/images/pkg/cert"
"github.com/osbuild/images/pkg/customizations/anaconda" "github.com/osbuild/images/pkg/customizations/anaconda"
) )
@ -19,6 +20,7 @@ type Customizations struct {
Firewall *FirewallCustomization `json:"firewall,omitempty" toml:"firewall,omitempty"` Firewall *FirewallCustomization `json:"firewall,omitempty" toml:"firewall,omitempty"`
Services *ServicesCustomization `json:"services,omitempty" toml:"services,omitempty"` Services *ServicesCustomization `json:"services,omitempty" toml:"services,omitempty"`
Filesystem []FilesystemCustomization `json:"filesystem,omitempty" toml:"filesystem,omitempty"` Filesystem []FilesystemCustomization `json:"filesystem,omitempty" toml:"filesystem,omitempty"`
Disk *DiskCustomization `json:"disk,omitempty" toml:"disk,omitempty"`
InstallationDevice string `json:"installation_device,omitempty" toml:"installation_device,omitempty"` InstallationDevice string `json:"installation_device,omitempty" toml:"installation_device,omitempty"`
FDO *FDOCustomization `json:"fdo,omitempty" toml:"fdo,omitempty"` FDO *FDOCustomization `json:"fdo,omitempty" toml:"fdo,omitempty"`
OpenSCAP *OpenSCAPCustomization `json:"openscap,omitempty" toml:"openscap,omitempty"` OpenSCAP *OpenSCAPCustomization `json:"openscap,omitempty" toml:"openscap,omitempty"`
@ -31,6 +33,7 @@ type Customizations struct {
Installer *InstallerCustomization `json:"installer,omitempty" toml:"installer,omitempty"` Installer *InstallerCustomization `json:"installer,omitempty" toml:"installer,omitempty"`
RPM *RPMCustomization `json:"rpm,omitempty" toml:"rpm,omitempty"` RPM *RPMCustomization `json:"rpm,omitempty" toml:"rpm,omitempty"`
RHSM *RHSMCustomization `json:"rhsm,omitempty" toml:"rhsm,omitempty"` RHSM *RHSMCustomization `json:"rhsm,omitempty" toml:"rhsm,omitempty"`
CACerts *CACustomization `json:"cacerts,omitempty" toml:"ca,omitempty"`
} }
type IgnitionCustomization struct { type IgnitionCustomization struct {
@ -311,6 +314,17 @@ func (c *Customizations) GetFilesystemsMinSize() uint64 {
return agg return agg
} }
func (c *Customizations) GetPartitioning() (*DiskCustomization, error) {
if c == nil {
return nil, nil
}
if err := c.Disk.Validate(); err != nil {
return nil, err
}
return c.Disk, nil
}
func (c *Customizations) GetInstallationDevice() string { func (c *Customizations) GetInstallationDevice() string {
if c == nil || c.InstallationDevice == "" { if c == nil || c.InstallationDevice == "" {
return "" return ""
@ -425,3 +439,32 @@ func (c *Customizations) GetRHSM() *RHSMCustomization {
} }
return c.RHSM return c.RHSM
} }
func (c *Customizations) checkCACerts() error {
if c == nil {
return nil
}
if c.CACerts != nil {
for _, bundle := range c.CACerts.PEMCerts {
_, err := cert.ParseCerts(bundle)
if err != nil {
return err
}
}
}
return nil
}
func (c *Customizations) GetCACerts() (*CACustomization, error) {
if c == nil {
return nil, nil
}
if err := c.checkCACerts(); err != nil {
return nil, err
}
return c.CACerts, nil
}

View file

@ -0,0 +1,582 @@
package blueprint
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"slices"
"strings"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type DiskCustomization struct {
// TODO: Add partition table type: gpt or dos
MinSize uint64
Partitions []PartitionCustomization
}
type diskCustomizationMarshaler struct {
// TODO: Add partition table type: gpt or dos
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"partitions,omitempty"`
}
func (dc *DiskCustomization) UnmarshalJSON(data []byte) error {
var dcm diskCustomizationMarshaler
if err := json.Unmarshal(data, &dcm); err != nil {
return err
}
dc.MinSize = dcm.MinSize.Uint64()
dc.Partitions = dcm.Partitions
return nil
}
func (dc *DiskCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(dc, data)
}
// PartitionCustomization defines a single partition on a disk. The Type
// defines the kind of "payload" for the partition: plain, lvm, or btrfs.
// - plain: the payload will be a filesystem on a partition (e.g. xfs, ext4).
// See [FilesystemTypedCustomization] for extra fields.
// - lvm: the payload will be an LVM volume group. See [VGCustomization] for
// extra fields
// - btrfs: the payload will be a btrfs volume. See
// [BtrfsVolumeCustomization] for extra fields.
type PartitionCustomization struct {
// The type of payload for the partition (optional, defaults to "plain").
Type string `json:"type" toml:"type"`
// Minimum size of the partition that contains the filesystem (for "plain"
// filesystem), volume group ("lvm"), or btrfs volume ("btrfs"). The final
// size of the partition will be larger than the minsize if the sum of the
// contained volumes (logical volumes or subvolumes) is larger. In
// addition, certain mountpoints have required minimum sizes. See
// https://osbuild.org/docs/user-guide/partitioning for more details.
// (optional, defaults depend on payload and mountpoints).
MinSize uint64 `json:"minsize" toml:"minsize"`
BtrfsVolumeCustomization
VGCustomization
FilesystemTypedCustomization
}
// A filesystem on a plain partition or LVM logical volume.
// Note the differences from [FilesystemCustomization]:
// - Adds a label.
// - Adds a filesystem type (fs_type).
// - Does not define a size. The size is defined by its container: a
// partition ([PartitionCustomization]) or LVM logical volume
// ([LVCustomization]).
type FilesystemTypedCustomization struct {
Mountpoint string `json:"mountpoint" toml:"mountpoint"`
Label string `json:"label,omitempty" toml:"label,omitempty"`
FSType string `json:"fs_type,omitempty" toml:"fs_type,omitempty"`
}
// An LVM volume group with one or more logical volumes.
type VGCustomization struct {
// Volume group name (optional, default will be automatically generated).
Name string `json:"name" toml:"name"`
LogicalVolumes []LVCustomization `json:"logical_volumes,omitempty" toml:"logical_volumes,omitempty"`
}
type LVCustomization struct {
// Logical volume name
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Minimum size of the logical volume
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
// Custom JSON unmarshaller for LVCustomization for handling the conversion of
// data sizes (minsize) expressed as strings to uint64.
func (lv *LVCustomization) UnmarshalJSON(data []byte) error {
var lvAnySize struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
MinSize any `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
if err := json.Unmarshal(data, &lvAnySize); err != nil {
return err
}
lv.Name = lvAnySize.Name
lv.FilesystemTypedCustomization = lvAnySize.FilesystemTypedCustomization
if lvAnySize.MinSize == nil {
return fmt.Errorf("minsize is required")
}
size, err := decodeSize(lvAnySize.MinSize)
if err != nil {
return err
}
lv.MinSize = size
return nil
}
// A btrfs volume consisting of one or more subvolumes.
type BtrfsVolumeCustomization struct {
Subvolumes []BtrfsSubvolumeCustomization
}
type BtrfsSubvolumeCustomization struct {
// The name of the subvolume, which defines the location (path) on the
// root volume (required).
// See https://btrfs.readthedocs.io/en/latest/Subvolumes.html
Name string `json:"name" toml:"name"`
// Mountpoint for the subvolume.
Mountpoint string `json:"mountpoint" toml:"mountpoint"`
}
// Custom JSON unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalJSON(data []byte) error {
errPrefix := "JSON unmarshal:"
var typeSniffer struct {
Type string `json:"type"`
MinSize any `json:"minsize"`
}
if err := json.Unmarshal(data, &typeSniffer); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
partType := "plain"
if typeSniffer.Type != "" {
partType = typeSniffer.Type
}
switch partType {
case "plain":
if err := decodePlain(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
if typeSniffer.MinSize == nil {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(typeSniffer.MinSize)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// decodePlain decodes the data into a struct that only embeds the
// FilesystemCustomization with DisallowUnknownFields. This ensures that when
// the type is "plain", none of the fields for btrfs or lvm are used.
func decodePlain(v *PartitionCustomization, data []byte) error {
var plain struct {
// Type and minsize are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
FilesystemTypedCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&plain)
if err != nil {
return fmt.Errorf("error decoding partition with type \"plain\": %w", err)
}
v.FilesystemTypedCustomization = plain.FilesystemTypedCustomization
return nil
}
// decodeBtrfs decodes the data into a struct that only embeds the
// BtrfsVolumeCustomization with DisallowUnknownFields. This ensures that when
// the type is btrfs, none of the fields for plain or lvm are used.
func decodeBtrfs(v *PartitionCustomization, data []byte) error {
var btrfs struct {
// Type and minsize are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
BtrfsVolumeCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&btrfs)
if err != nil {
return fmt.Errorf("error decoding partition with type \"btrfs\": %w", err)
}
v.BtrfsVolumeCustomization = btrfs.BtrfsVolumeCustomization
return nil
}
// decodeLVM decodes the data into a struct that only embeds the
// VGCustomization with DisallowUnknownFields. This ensures that when the type
// is lvm, none of the fields for plain or btrfs are used.
func decodeLVM(v *PartitionCustomization, data []byte) error {
var vg struct {
// Type and minsize are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
VGCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&vg); err != nil {
return fmt.Errorf("error decoding partition with type \"lvm\": %w", err)
}
v.VGCustomization = vg.VGCustomization
return nil
}
// Custom TOML unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalTOML(data any) error {
errPrefix := "TOML unmarshal:"
d, ok := data.(map[string]any)
if !ok {
return fmt.Errorf("%s customizations.partition is not an object", errPrefix)
}
partType := "plain"
if typeField, ok := d["type"]; ok {
typeStr, ok := typeField.(string)
if !ok {
return fmt.Errorf("%s type must be a string, got \"%v\" of type %T", errPrefix, typeField, typeField)
}
partType = typeStr
}
// serialise the data to JSON and reuse the subobject decoders
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("%s error while decoding partition customization: %w", errPrefix, err)
}
switch partType {
case "plain":
if err := decodePlain(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
minsizeField, ok := d["minsize"]
if !ok {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(minsizeField)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// Validate checks for customization combinations that are generally not
// supported or can create conflicts, regardless of specific distro or image
// type policies. The validator ensures all of the following properties:
// - All mountpoints are valid
// - All mountpoints are unique
// - All LVM volume group names are unique
// - All LVM logical volume names are unique within a given volume group
// - All btrfs subvolume names are unique within a given btrfs volume
// - All btrfs subvolume names are valid and non-empty
// - All filesystems are valid for their mountpoints (e.g. xfs or ext4 for /boot)
// - No LVM logical volume has an invalid mountpoint (/boot or /boot/efi)
// - Plain filesystem types are valid for the partition type
// - All non-empty properties are valid for the partition type (e.g.
// LogicalVolumes is empty when the type is "plain" or "btrfs")
//
// Note that in *addition* consumers should also call
// ValidateLayoutConstraints() to validate that the policy for disk
// customizations is met.
func (p *DiskCustomization) Validate() error {
if p == nil {
return nil
}
mountpoints := make(map[string]bool)
vgnames := make(map[string]bool)
var errs []error
for _, part := range p.Partitions {
switch part.Type {
case "plain", "":
errs = append(errs, part.validatePlain(mountpoints))
case "lvm":
errs = append(errs, part.validateLVM(mountpoints, vgnames))
case "btrfs":
errs = append(errs, part.validateBtrfs(mountpoints))
default:
errs = append(errs, fmt.Errorf("unknown partition type: %s", part.Type))
}
}
// will discard all nil errors
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("invalid partitioning customizations:\n%w", err)
}
return nil
}
func validateMountpoint(path string) error {
if path == "" {
return fmt.Errorf("mountpoint is empty")
}
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("mountpoint %q is not an absolute path", path)
}
if cleanPath := filepath.Clean(path); path != cleanPath {
return fmt.Errorf("mountpoint %q is not a canonical path (did you mean %q?)", path, cleanPath)
}
return nil
}
// ValidateLayoutConstraints checks that at most one LVM Volume Group or btrfs
// volume is defined. Returns an error if both LVM and btrfs are set and if
// either has more than one element.
//
// Note that this is a *policy* validation, in theory the "disk" code
// does support the constraints but we choose not to allow them for
// now. Each consumer of "DiskCustomization" should call this
// *unless* it's very low-level and not end-user-facing.
func (p *DiskCustomization) ValidateLayoutConstraints() error {
if p == nil {
return nil
}
var btrfsVols, lvmVGs uint
for _, part := range p.Partitions {
switch part.Type {
case "lvm":
lvmVGs++
case "btrfs":
btrfsVols++
}
if lvmVGs > 0 && btrfsVols > 0 {
return fmt.Errorf("btrfs and lvm partitioning cannot be combined")
}
}
if btrfsVols > 1 {
return fmt.Errorf("multiple btrfs volumes are not yet supported")
}
if lvmVGs > 1 {
return fmt.Errorf("multiple LVM volume groups are not yet supported")
}
return nil
}
// Check that the fs type is valid for the mountpoint.
func validateFilesystemType(path, fstype string) error {
badfsMsgFmt := "unsupported filesystem type for %q: %s"
switch path {
case "/boot":
switch fstype {
case "xfs", "ext4":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
case "/boot/efi":
switch fstype {
case "vfat":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
}
return nil
}
// These mountpoints must be on a plain partition (i.e. not on LVM or btrfs).
var plainOnlyMountpoints = []string{
"/boot",
"/boot/efi", // not allowed by our global policies, but that might change
}
var validPlainFSTypes = []string{
"ext4",
"vfat",
"xfs",
}
func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) error {
if err := validateMountpoint(p.Mountpoint); err != nil {
return err
}
if mountpoints[p.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", p.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, p.FSType) {
return fmt.Errorf("unknown or invalid filesystem type for mountpoint %q: %s", p.Mountpoint, p.FSType)
}
if err := validateFilesystemType(p.Mountpoint, p.FSType); err != nil {
return err
}
mountpoints[p.Mountpoint] = true
return nil
}
func (p *PartitionCustomization) validateLVM(mountpoints, vgnames map[string]bool) error {
if p.Name != "" && vgnames[p.Name] { // VGs with no name get autogenerated names
return fmt.Errorf("duplicate LVM volume group name %q in partitioning customizations", p.Name)
}
// check for invalid property usage
if len(p.Subvolumes) > 0 {
return fmt.Errorf("subvolumes defined for LVM volume group (partition type \"lvm\")")
}
if p.Label != "" {
return fmt.Errorf("label %q defined for LVM volume group (partition type \"lvm\")", p.Label)
}
vgnames[p.Name] = true
lvnames := make(map[string]bool)
for _, lv := range p.LogicalVolumes {
if lv.Name != "" && lvnames[lv.Name] { // LVs with no name get autogenerated names
return fmt.Errorf("duplicate LVM logical volume name %q in volume group %q in partitioning customizations", lv.Name, p.Name)
}
lvnames[lv.Name] = true
if err := validateMountpoint(lv.Mountpoint); err != nil {
return fmt.Errorf("invalid logical volume customization: %w", err)
}
if mountpoints[lv.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", lv.Mountpoint)
}
mountpoints[lv.Mountpoint] = true
if slices.Contains(plainOnlyMountpoints, lv.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for logical volume", lv.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, lv.FSType) {
return fmt.Errorf("unknown or invalid filesystem type for logical volume with mountpoint %q: %s", lv.Mountpoint, lv.FSType)
}
}
return nil
}
func (p *PartitionCustomization) validateBtrfs(mountpoints map[string]bool) error {
if p.Mountpoint != "" {
return fmt.Errorf(`"mountpoint" is not supported for btrfs volumes (only subvolumes can have mountpoints)`)
}
if len(p.Subvolumes) == 0 {
return fmt.Errorf("btrfs volume requires subvolumes")
}
if len(p.LogicalVolumes) > 0 {
return fmt.Errorf("LVM logical volumes defined for btrfs volume (partition type \"btrfs\")")
}
subvolnames := make(map[string]bool)
for _, subvol := range p.Subvolumes {
if subvol.Name == "" {
return fmt.Errorf("btrfs subvolume with empty name in partitioning customizations")
}
if subvolnames[subvol.Name] {
return fmt.Errorf("duplicate btrfs subvolume name %q in partitioning customizations", subvol.Name)
}
subvolnames[subvol.Name] = true
if err := validateMountpoint(subvol.Mountpoint); err != nil {
return fmt.Errorf("invalid btrfs subvolume customization: %w", err)
}
if mountpoints[subvol.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", subvol.Mountpoint)
}
if slices.Contains(plainOnlyMountpoints, subvol.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for btrfs subvolume", subvol.Mountpoint)
}
mountpoints[subvol.Mountpoint] = true
}
return nil
}
// CheckDiskMountpointsPolicy checks if the mountpoints under a [DiskCustomization] are allowed by the policy.
func CheckDiskMountpointsPolicy(partitioning *DiskCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
if partitioning == nil {
return nil
}
// collect all mountpoints
var mountpoints []string
for _, part := range partitioning.Partitions {
if part.Mountpoint != "" {
mountpoints = append(mountpoints, part.Mountpoint)
}
for _, lv := range part.LogicalVolumes {
mountpoints = append(mountpoints, lv.Mountpoint)
}
for _, subvol := range part.Subvolumes {
mountpoints = append(mountpoints, subvol.Mountpoint)
}
}
var errs []error
for _, mp := range mountpoints {
if err := mountpointAllowList.Check(mp); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return fmt.Errorf("The following errors occurred while setting up custom mountpoints:\n%w", errors.Join(errs...))
}
return nil
}

View file

@ -10,74 +10,33 @@ import (
) )
type FilesystemCustomization struct { type FilesystemCustomization struct {
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"` Mountpoint string
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"` MinSize uint64
} }
func (fsc *FilesystemCustomization) UnmarshalTOML(data interface{}) error { type filesystemCustomizationMarshaling struct {
d, ok := data.(map[string]interface{}) Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
if !ok { MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
return fmt.Errorf("customizations.filesystem is not an object")
}
switch d["mountpoint"].(type) {
case string:
fsc.Mountpoint = d["mountpoint"].(string)
default:
return fmt.Errorf("TOML unmarshal: mountpoint must be string, got %v of type %T", d["mountpoint"], d["mountpoint"])
}
switch d["minsize"].(type) {
case int64:
minSize := d["minsize"].(int64)
if minSize < 0 {
return fmt.Errorf("TOML unmarshal: minsize cannot be negative")
}
fsc.MinSize = uint64(minSize)
case string:
minSize, err := datasizes.Parse(d["minsize"].(string))
if err != nil {
return fmt.Errorf("TOML unmarshal: minsize is not valid filesystem size (%w)", err)
}
fsc.MinSize = minSize
default:
return fmt.Errorf("TOML unmarshal: minsize must be integer or string, got %v of type %T", d["minsize"], d["minsize"])
}
return nil
} }
func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error { func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error {
var v interface{} var fc filesystemCustomizationMarshaling
if err := json.Unmarshal(data, &v); err != nil { if err := json.Unmarshal(data, &fc); err != nil {
if fc.Mountpoint != "" {
return fmt.Errorf("error decoding minsize value for mountpoint %q: %w", fc.Mountpoint, err)
}
return err return err
} }
d, _ := v.(map[string]interface{}) fsc.Mountpoint = fc.Mountpoint
fsc.MinSize = fc.MinSize.Uint64()
switch d["mountpoint"].(type) {
case string:
fsc.Mountpoint = d["mountpoint"].(string)
default:
return fmt.Errorf("JSON unmarshal: mountpoint must be string, got %v of type %T", d["mountpoint"], d["mountpoint"])
}
// The JSON specification only mentions float64 and Go defaults to it: https://go.dev/blog/json
switch d["minsize"].(type) {
case float64:
fsc.MinSize = uint64(d["minsize"].(float64))
case string:
minSize, err := datasizes.Parse(d["minsize"].(string))
if err != nil {
return fmt.Errorf("JSON unmarshal: minsize is not valid filesystem size (%w)", err)
}
fsc.MinSize = minSize
default:
return fmt.Errorf("JSON unmarshal: minsize must be float64 number or string, got %v of type %T", d["minsize"], d["minsize"])
}
return nil return nil
} }
func (fsc *FilesystemCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(fsc, data)
}
// CheckMountpointsPolicy checks if the mountpoints are allowed by the policy // CheckMountpointsPolicy checks if the mountpoints are allowed by the policy
func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAllowList *pathpolicy.PathPolicies) error { func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
var errs []error var errs []error
@ -93,3 +52,27 @@ func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAll
return nil return nil
} }
// decodeSize takes an integer or string representing a data size (with a data
// suffix) and returns the uint64 representation.
func decodeSize(size any) (uint64, error) {
switch s := size.(type) {
case string:
return datasizes.Parse(s)
case int64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(s), nil
case float64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
// TODO: emit warning of possible truncation?
return uint64(s), nil
case uint64:
return s, nil
default:
return 0, fmt.Errorf("failed to convert value \"%v\" to number", size)
}
}

View file

@ -0,0 +1,24 @@
package blueprint
import (
"encoding/json"
"fmt"
)
// XXX: move to interal/common ?
func unmarshalTOMLviaJSON(u json.Unmarshaler, data any) error {
// This is the most efficient way to reuse code when unmarshaling
// structs in toml, it leaks json errors which is a bit sad but
// because the toml unmarshaler gives us not "[]byte" but an
// already pre-processed "any" we cannot just unmarshal into our
// "fooMarshaling" struct and reuse the result so we resort to
// this workaround (but toml will go away long term anyway).
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("error unmarshaling TOML data %v: %w", data, err)
}
if err := u.UnmarshalJSON(dataJSON); err != nil {
return fmt.Errorf("error decoding TOML %v: %w", data, err)
}
return nil
}

View file

@ -0,0 +1,46 @@
package cert
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
)
var ErrNoValidPEMCertificatesFound = errors.New("no valid PEM certificates found")
// ParseCerts parses a PEM-encoded certificate chain formatted as concatenated strings
// and returns a slice of x509.Certificate. In case of unparsable certificates, the
// function returns an empty slice.
// Returns an error when a cert cannot be parsed, or when no certificates are recognized
// in the input.
func ParseCerts(cert string) ([]*x509.Certificate, error) {
result := make([]*x509.Certificate, 0, 1)
block := []byte(cert)
var blocks [][]byte
for {
var certDERBlock *pem.Block
certDERBlock, block = pem.Decode(block)
if certDERBlock == nil {
break
}
if certDERBlock.Type == "CERTIFICATE" {
blocks = append(blocks, certDERBlock.Bytes)
}
}
for _, block := range blocks {
cert, err := x509.ParseCertificate(block)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate: %w", err)
}
result = append(result, cert)
}
if len(result) == 0 {
return nil, fmt.Errorf("%w in: %s", ErrNoValidPEMCertificatesFound, cert)
}
return result, nil
}

72
vendor/github.com/osbuild/images/pkg/datasizes/size.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
package datasizes
import (
"bytes"
"encoding/json"
"fmt"
)
// Size is a wrapper around uint64 with support for reading from string
// yaml/toml, so {"size": 123}, {"size": "1234"}, {"size": "1 GiB"} are
// all supported
type Size uint64
// Uint64 returns the size as uint64. This is a convenience functions,
// it is strictly equivalent to uint64(Size(1))
func (si Size) Uint64() uint64 {
return uint64(si)
}
func (si *Size) UnmarshalTOML(data interface{}) error {
i, err := decodeSize(data)
if err != nil {
return fmt.Errorf("error decoding TOML size: %w", err)
}
*si = Size(i)
return nil
}
func (si *Size) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewBuffer(data))
dec.UseNumber()
var v interface{}
if err := dec.Decode(&v); err != nil {
return err
}
i, err := decodeSize(v)
if err != nil {
// if only we could do better here and include e.g. the field
// name where this happend but encoding/json does not
// support this, c.f. https://github.com/golang/go/issues/58655
return fmt.Errorf("error decoding size: %w", err)
}
*si = Size(i)
return nil
}
// decodeSize takes an integer or string representing a data size (with a data
// suffix) and returns the uint64 representation.
func decodeSize(size any) (uint64, error) {
switch s := size.(type) {
case string:
return Parse(s)
case json.Number:
i, err := s.Int64()
if i < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(i), err
case int64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(s), nil
case uint64:
return s, nil
case float64, float32:
return 0, fmt.Errorf("cannot be float")
default:
return 0, fmt.Errorf("failed to convert value \"%v\" to number", size)
}
}

View file

@ -72,6 +72,36 @@ const (
DosESPID = "ef00" DosESPID = "ef00"
) )
// pt type -> type -> ID mapping for convenience
var idMap = map[PartitionTableType]map[string]string{
PT_DOS: {
"bios": DosBIOSBootID,
"boot": DosLinuxTypeID,
"data": DosLinuxTypeID,
"esp": DosESPID,
"lvm": DosLinuxTypeID,
},
PT_GPT: {
"bios": BIOSBootPartitionGUID,
"boot": XBootLDRPartitionGUID,
"data": FilesystemDataGUID,
"esp": EFISystemPartitionGUID,
"lvm": LVMPartitionGUID,
},
}
func getPartitionTypeIDfor(ptType PartitionTableType, partTypeName string) (string, error) {
ptMap, ok := idMap[ptType]
if !ok {
return "", fmt.Errorf("unknown or unsupported partition table enum: %d", ptType)
}
id, ok := ptMap[partTypeName]
if !ok {
return "", fmt.Errorf("unknown or unsupported partition type name: %s", partTypeName)
}
return id, nil
}
// FSType is the filesystem type enum. // FSType is the filesystem type enum.
// //
// There should always be one value for each filesystem type supported by // There should always be one value for each filesystem type supported by

View file

@ -793,10 +793,9 @@ func (pt *PartitionTable) ensureBtrfs() error {
// reset the btrfs partition size - it will be grown later // reset the btrfs partition size - it will be grown later
part.Size = 0 part.Size = 0
if pt.Type == PT_GPT { part.Type, err = getPartitionTypeIDfor(pt.Type, "data")
part.Type = FilesystemDataGUID if err != nil {
} else { return fmt.Errorf("error converting partition table to btrfs: %w", err)
part.Type = DosLinuxTypeID
} }
} else { } else {
@ -979,14 +978,9 @@ func EnsureRootFilesystem(pt *PartitionTable, defaultFsType FSType) error {
return fmt.Errorf("error creating root partition: %w", err) return fmt.Errorf("error creating root partition: %w", err)
} }
var partType string partType, err := getPartitionTypeIDfor(pt.Type, "data")
switch pt.Type { if err != nil {
case PT_DOS: return fmt.Errorf("error creating root partition: %w", err)
partType = DosLinuxTypeID
case PT_GPT:
partType = FilesystemDataGUID
default:
return fmt.Errorf("error creating root partition: unknown or unsupported partition table type: %s", pt.Type)
} }
rootpart := Partition{ rootpart := Partition{
Type: partType, Type: partType,
@ -1002,45 +996,29 @@ func EnsureRootFilesystem(pt *PartitionTable, defaultFsType FSType) error {
return nil return nil
} }
// EnsureBootPartition creates a boot partition if one does not already exist. // addBootPartition creates a boot partition. The function will append the boot
// The function will append the boot partition to the end of the existing // partition to the end of the existing partition table therefore it is best to
// partition table therefore it is best to call this function early to put boot // call this function early to put boot near the front (as is conventional).
// near the front (as is conventional). func addBootPartition(pt *PartitionTable, bootFsType FSType) error {
func EnsureBootPartition(pt *PartitionTable, bootFsType FSType) error {
// collect all labels to avoid conflicts
labels := make(map[string]bool)
var foundBoot bool
_ = pt.ForEachMountable(func(mnt Mountable, path []Entity) error {
if mnt.GetMountpoint() == "/boot" {
foundBoot = true
return nil
}
labels[mnt.GetFSSpec().Label] = true
return nil
})
if foundBoot {
// nothing to do
return nil
}
if bootFsType == FS_NONE { if bootFsType == FS_NONE {
return fmt.Errorf("error creating boot partition: no filesystem type") return fmt.Errorf("error creating boot partition: no filesystem type")
} }
// collect all labels to avoid conflicts
labels := make(map[string]bool)
_ = pt.ForEachMountable(func(mnt Mountable, path []Entity) error {
labels[mnt.GetFSSpec().Label] = true
return nil
})
bootLabel, err := genUniqueString("boot", labels) bootLabel, err := genUniqueString("boot", labels)
if err != nil { if err != nil {
return fmt.Errorf("error creating boot partition: %w", err) return fmt.Errorf("error creating boot partition: %w", err)
} }
var partType string partType, err := getPartitionTypeIDfor(pt.Type, "boot")
switch pt.Type { if err != nil {
case PT_DOS: return fmt.Errorf("error creating boot partition: %w", err)
partType = DosLinuxTypeID
case PT_GPT:
partType = XBootLDRPartitionGUID
default:
return fmt.Errorf("error creating boot partition: unknown or unsupported partition table type: %s", pt.Type)
} }
bootPart := Partition{ bootPart := Partition{
Type: partType, Type: partType,
@ -1056,7 +1034,7 @@ func EnsureBootPartition(pt *PartitionTable, bootFsType FSType) error {
return nil return nil
} }
// AddPartitionsForBootMode creates partitions to satisfy the boot mode requirements: // addPartitionsForBootMode creates partitions to satisfy the boot mode requirements:
// - BIOS/legacy: adds a 1 MiB BIOS boot partition. // - BIOS/legacy: adds a 1 MiB BIOS boot partition.
// - UEFI: adds a 200 MiB EFI system partition. // - UEFI: adds a 200 MiB EFI system partition.
// - Hybrid: adds both. // - Hybrid: adds both.
@ -1064,7 +1042,7 @@ func EnsureBootPartition(pt *PartitionTable, bootFsType FSType) error {
// The function will append the new partitions to the end of the existing // The function will append the new partitions to the end of the existing
// partition table therefore it is best to call this function early to put them // partition table therefore it is best to call this function early to put them
// near the front (as is conventional). // near the front (as is conventional).
func AddPartitionsForBootMode(pt *PartitionTable, bootMode platform.BootMode) error { func addPartitionsForBootMode(pt *PartitionTable, bootMode platform.BootMode) error {
switch bootMode { switch bootMode {
case platform.BOOT_LEGACY: case platform.BOOT_LEGACY:
// add BIOS boot partition // add BIOS boot partition
@ -1102,14 +1080,9 @@ func AddPartitionsForBootMode(pt *PartitionTable, bootMode platform.BootMode) er
} }
func mkBIOSBoot(ptType PartitionTableType) (Partition, error) { func mkBIOSBoot(ptType PartitionTableType) (Partition, error) {
var partType string partType, err := getPartitionTypeIDfor(ptType, "bios")
switch ptType { if err != nil {
case PT_DOS: return Partition{}, fmt.Errorf("error creating BIOS boot partition: %w", err)
partType = DosBIOSBootID
case PT_GPT:
partType = BIOSBootPartitionGUID
default:
return Partition{}, fmt.Errorf("error creating BIOS boot partition: unknown or unsupported partition table enum: %d", ptType)
} }
return Partition{ return Partition{
Size: 1 * datasizes.MiB, Size: 1 * datasizes.MiB,
@ -1120,14 +1093,9 @@ func mkBIOSBoot(ptType PartitionTableType) (Partition, error) {
} }
func mkESP(size uint64, ptType PartitionTableType) (Partition, error) { func mkESP(size uint64, ptType PartitionTableType) (Partition, error) {
var partType string partType, err := getPartitionTypeIDfor(ptType, "esp")
switch ptType { if err != nil {
case PT_DOS: return Partition{}, fmt.Errorf("error creating EFI system partition: %w", err)
partType = DosESPID
case PT_GPT:
partType = EFISystemPartitionGUID
default:
return Partition{}, fmt.Errorf("error creating EFI system partition: unknown or unsupported partition table enum: %d", ptType)
} }
return Partition{ return Partition{
Size: size, Size: size,
@ -1144,3 +1112,292 @@ func mkESP(size uint64, ptType PartitionTableType) (Partition, error) {
}, },
}, nil }, nil
} }
type CustomPartitionTableOptions struct {
// PartitionTableType must be either "dos" or "gpt". Defaults to "gpt".
PartitionTableType PartitionTableType
// BootMode determines the types of boot-related partitions that are
// automatically added, BIOS boot (legacy), ESP (UEFI), or both (hybrid).
// If none, no boot-related partitions are created.
BootMode platform.BootMode
// DefaultFSType determines the filesystem type for automatically created
// filesystems and custom mountpoints that don't specify a type.
// None is only valid if no partitions are created and all mountpoints
// partitions specify a type.
// The default type is also used for the automatically created /boot
// filesystem if it is a supported type for that fileystem. If it is not,
// xfs is used as a fallback.
DefaultFSType FSType
// RequiredMinSizes defines a map of minimum sizes for specific
// directories. These indirectly control the minimum sizes of partitions. A
// directory with a required size will set the minimum size of the
// partition with the mountpoint that contains the directory. Additional
// directory requirements are additive, meaning the minimum size for a
// mountpoint's partition is the sum of all the required directory sizes it
// will contain.
RequiredMinSizes map[string]uint64
}
// Returns the default filesystem type if the fstype is empty. If both are
// empty/none, returns an error.
func (options *CustomPartitionTableOptions) getfstype(fstype string) (string, error) {
if fstype != "" {
return fstype, nil
}
if options.DefaultFSType == FS_NONE {
return "", fmt.Errorf("no filesystem type defined and no default set")
}
return options.DefaultFSType.String(), nil
}
func maybeAddBootPartition(pt *PartitionTable, disk *blueprint.DiskCustomization, defaultFSType FSType) error {
// The boot type will be the default only if it's a supported filesystem
// type for /boot (ext4 or xfs). Otherwise, we default to xfs.
// FS_NONE also falls back to xfs.
var bootFsType FSType
switch defaultFSType {
case FS_EXT4, FS_XFS:
bootFsType = defaultFSType
default:
bootFsType = FS_XFS
}
if needsBoot(disk) {
// we need a /boot partition to boot LVM or Btrfs, create boot
// partition if it does not already exist
if err := addBootPartition(pt, bootFsType); err != nil {
return err
}
}
return nil
}
// NewCustomPartitionTable creates a partition table based almost entirely on the disk customizations from a blueprint.
func NewCustomPartitionTable(customizations *blueprint.DiskCustomization, options *CustomPartitionTableOptions, rng *rand.Rand) (*PartitionTable, error) {
if options == nil {
options = &CustomPartitionTableOptions{}
}
if customizations == nil {
customizations = &blueprint.DiskCustomization{}
}
errPrefix := "error generating partition table:"
// validate the partitioning customizations before using them
if err := customizations.Validate(); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
pt := &PartitionTable{}
// TODO: Handle partition table type in customizations
switch options.PartitionTableType {
case PT_GPT, PT_DOS:
pt.Type = options.PartitionTableType
case PT_NONE:
// default to "gpt"
pt.Type = PT_GPT
default:
return nil, fmt.Errorf("%s invalid partition table type enum value: %d", errPrefix, options.PartitionTableType)
}
// add any partition(s) that are needed for booting (like /boot/efi)
// if needed
//
// TODO: switch to ensure ESP in case customizations already include it
if err := addPartitionsForBootMode(pt, options.BootMode); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
// add the /boot partition (if it is needed)
if err := maybeAddBootPartition(pt, customizations, options.DefaultFSType); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
// add user customized partitions
for _, part := range customizations.Partitions {
switch part.Type {
case "plain", "":
if err := addPlainPartition(pt, part, options); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := addLVMPartition(pt, part, options); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
addBtrfsPartition(pt, part)
default:
return nil, fmt.Errorf("%s invalid partition type: %s", errPrefix, part.Type)
}
}
if err := EnsureRootFilesystem(pt, options.DefaultFSType); err != nil {
return nil, fmt.Errorf("%s %w", errPrefix, err)
}
if len(options.RequiredMinSizes) != 0 {
pt.EnsureDirectorySizes(options.RequiredMinSizes)
}
pt.relayout(customizations.MinSize)
pt.GenerateUUIDs(rng)
return pt, nil
}
func addPlainPartition(pt *PartitionTable, partition blueprint.PartitionCustomization, options *CustomPartitionTableOptions) error {
fstype, err := options.getfstype(partition.FSType)
if err != nil {
return fmt.Errorf("error creating partition with mountpoint %q: %w", partition.Mountpoint, err)
}
// all user-defined partitions are data partitions except boot
typeName := "data"
if partition.Mountpoint == "/boot" {
typeName = "boot"
}
partType, err := getPartitionTypeIDfor(pt.Type, typeName)
if err != nil {
return fmt.Errorf("error getting partition type ID for %q: %w", partition.Mountpoint, err)
}
newpart := Partition{
Type: partType,
Bootable: false,
Size: partition.MinSize,
Payload: &Filesystem{
Type: fstype,
Label: partition.Label,
Mountpoint: partition.Mountpoint,
FSTabOptions: "defaults", // TODO: add customization
},
}
pt.Partitions = append(pt.Partitions, newpart)
return nil
}
func addLVMPartition(pt *PartitionTable, partition blueprint.PartitionCustomization, options *CustomPartitionTableOptions) error {
vgname := partition.Name
if vgname == "" {
// get existing volume groups and generate unique name
existing := make(map[string]bool)
for _, part := range pt.Partitions {
vg, ok := part.Payload.(*LVMVolumeGroup)
if !ok {
continue
}
existing[vg.Name] = true
}
// unlike other unique name generation cases, here we want the first
// name to have the 00 suffix, so we add the base to the existing set
base := "vg"
existing[base] = true
uniqueName, err := genUniqueString(base, existing)
if err != nil {
return fmt.Errorf("error creating volume group: %w", err)
}
vgname = uniqueName
}
newvg := &LVMVolumeGroup{
Name: vgname,
Description: "created via lvm2 and osbuild",
}
for _, lv := range partition.LogicalVolumes {
fstype, err := options.getfstype(lv.FSType)
if err != nil {
return fmt.Errorf("error creating logical volume %q (%s): %w", lv.Name, lv.Mountpoint, err)
}
newfs := &Filesystem{
Type: fstype,
Label: lv.Label,
Mountpoint: lv.Mountpoint,
FSTabOptions: "defaults", // TODO: add customization
}
if _, err := newvg.CreateLogicalVolume(lv.Name, lv.MinSize, newfs); err != nil {
return fmt.Errorf("error creating logical volume %q (%s): %w", lv.Name, lv.Mountpoint, err)
}
}
// create partition for volume group
newpart := Partition{
Type: LVMPartitionGUID,
Size: partition.MinSize,
Bootable: false,
Payload: newvg,
}
pt.Partitions = append(pt.Partitions, newpart)
return nil
}
func addBtrfsPartition(pt *PartitionTable, partition blueprint.PartitionCustomization) {
subvols := make([]BtrfsSubvolume, len(partition.Subvolumes))
for idx, subvol := range partition.Subvolumes {
newsubvol := BtrfsSubvolume{
Name: subvol.Name,
Mountpoint: subvol.Mountpoint,
}
subvols[idx] = newsubvol
}
newvol := &Btrfs{
Subvolumes: subvols,
}
// create partition for btrfs volume
newpart := Partition{
Type: FilesystemDataGUID,
Bootable: false,
Payload: newvol,
Size: partition.MinSize,
}
pt.Partitions = append(pt.Partitions, newpart)
}
// Determine if a boot partition is needed based on the customizations. A boot
// partition is needed if any of the following conditions apply:
// - / is on LVM or btrfs and /boot is not defined.
// - / is not defined and btrfs or lvm volumes are defined.
//
// In the second case, a root partition will be created automatically on either
// btrfs or lvm.
func needsBoot(disk *blueprint.DiskCustomization) bool {
if disk == nil {
return false
}
var foundBtrfsOrLVM bool
for _, part := range disk.Partitions {
switch part.Type {
case "plain", "":
if part.Mountpoint == "/" {
return false
}
if part.Mountpoint == "/boot" {
return false
}
case "lvm":
foundBtrfsOrLVM = true
// check if any of the LVs is root
for _, lv := range part.LogicalVolumes {
if lv.Mountpoint == "/" {
return true
}
}
case "btrfs":
foundBtrfsOrLVM = true
// check if any of the subvols is root
for _, subvol := range part.Subvolumes {
if subvol.Mountpoint == "/" {
return true
}
}
default:
// NOTE: invalid types should be validated elsewhere
}
}
return foundBtrfsOrLVM
}

View file

@ -51,6 +51,12 @@ var (
oscap.Standard, oscap.Standard,
} }
// Default directory size minimums for all image types.
requiredDirectorySizes = map[string]uint64{
"/": 1 * datasizes.GiB,
"/usr": 2 * datasizes.GiB,
}
// Services // Services
iotServices = []string{ iotServices = []string{
"NetworkManager.service", "NetworkManager.service",
@ -92,10 +98,11 @@ var (
rpmOstree: false, rpmOstree: false,
image: imageInstallerImage, image: imageInstallerImage,
// We don't know the variant of the OS pipeline being installed // We don't know the variant of the OS pipeline being installed
isoLabel: getISOLabelFunc("Unknown"), isoLabel: getISOLabelFunc("Unknown"),
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "os", "bootiso-tree", "bootiso"}, payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "os", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"}, exports: []string{"bootiso"},
requiredPartitionSizes: requiredDirectorySizes,
} }
liveInstallerImgType = imageType{ liveInstallerImgType = imageType{
@ -106,14 +113,15 @@ var (
packageSets: map[string]packageSetFunc{ packageSets: map[string]packageSetFunc{
installerPkgsKey: liveInstallerPackageSet, installerPkgsKey: liveInstallerPackageSet,
}, },
bootable: true, bootable: true,
bootISO: true, bootISO: true,
rpmOstree: false, rpmOstree: false,
image: liveInstallerImage, image: liveInstallerImage,
isoLabel: getISOLabelFunc("Workstation"), isoLabel: getISOLabelFunc("Workstation"),
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"}, payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"}, exports: []string{"bootiso"},
requiredPartitionSizes: requiredDirectorySizes,
} }
iotCommitImgType = imageType{ iotCommitImgType = imageType{
@ -128,11 +136,12 @@ var (
EnabledServices: iotServices, EnabledServices: iotServices,
DracutConf: []*osbuild.DracutConfStageOptions{osbuild.FIPSDracutConfStageOptions}, DracutConf: []*osbuild.DracutConfStageOptions{osbuild.FIPSDracutConfStageOptions},
}, },
rpmOstree: true, rpmOstree: true,
image: iotCommitImage, image: iotCommitImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "ostree-commit", "commit-archive"}, payloadPipelines: []string{"os", "ostree-commit", "commit-archive"},
exports: []string{"commit-archive"}, exports: []string{"commit-archive"},
requiredPartitionSizes: requiredDirectorySizes,
} }
iotBootableContainer = imageType{ iotBootableContainer = imageType{
@ -142,11 +151,12 @@ var (
packageSets: map[string]packageSetFunc{ packageSets: map[string]packageSetFunc{
osPkgsKey: bootableContainerPackageSet, osPkgsKey: bootableContainerPackageSet,
}, },
rpmOstree: true, rpmOstree: true,
image: bootableContainerImage, image: bootableContainerImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "ostree-commit", "ostree-encapsulate"}, payloadPipelines: []string{"os", "ostree-commit", "ostree-encapsulate"},
exports: []string{"ostree-encapsulate"}, exports: []string{"ostree-encapsulate"},
requiredPartitionSizes: requiredDirectorySizes,
} }
iotOCIImgType = imageType{ iotOCIImgType = imageType{
@ -164,12 +174,13 @@ var (
EnabledServices: iotServices, EnabledServices: iotServices,
DracutConf: []*osbuild.DracutConfStageOptions{osbuild.FIPSDracutConfStageOptions}, DracutConf: []*osbuild.DracutConfStageOptions{osbuild.FIPSDracutConfStageOptions},
}, },
rpmOstree: true, rpmOstree: true,
bootISO: false, bootISO: false,
image: iotContainerImage, image: iotContainerImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "ostree-commit", "container-tree", "container"}, payloadPipelines: []string{"os", "ostree-commit", "container-tree", "container"},
exports: []string{"container"}, exports: []string{"container"},
requiredPartitionSizes: requiredDirectorySizes,
} }
iotInstallerImgType = imageType{ iotInstallerImgType = imageType{
@ -184,13 +195,14 @@ var (
Locale: common.ToPtr("en_US.UTF-8"), Locale: common.ToPtr("en_US.UTF-8"),
EnabledServices: iotServices, EnabledServices: iotServices,
}, },
rpmOstree: true, rpmOstree: true,
bootISO: true, bootISO: true,
image: iotInstallerImage, image: iotInstallerImage,
isoLabel: getISOLabelFunc("IoT"), isoLabel: getISOLabelFunc("IoT"),
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"}, payloadPipelines: []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"}, exports: []string{"bootiso"},
requiredPartitionSizes: requiredDirectorySizes,
} }
iotSimplifiedInstallerImgType = imageType{ iotSimplifiedInstallerImgType = imageType{
@ -210,17 +222,18 @@ var (
LockRootUser: common.ToPtr(true), LockRootUser: common.ToPtr(true),
IgnitionPlatform: common.ToPtr("metal"), IgnitionPlatform: common.ToPtr("metal"),
}, },
defaultSize: 10 * datasizes.GibiByte, defaultSize: 10 * datasizes.GibiByte,
rpmOstree: true, rpmOstree: true,
bootable: true, bootable: true,
bootISO: true, bootISO: true,
image: iotSimplifiedInstallerImage, image: iotSimplifiedInstallerImage,
isoLabel: getISOLabelFunc("IoT"), isoLabel: getISOLabelFunc("IoT"),
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "xz", "coi-tree", "efiboot-tree", "bootiso-tree", "bootiso"}, payloadPipelines: []string{"ostree-deployment", "image", "xz", "coi-tree", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"}, exports: []string{"bootiso"},
basePartitionTables: iotSimplifiedInstallerPartitionTables, basePartitionTables: iotSimplifiedInstallerPartitionTables,
kernelOptions: ostreeDeploymentKernelOptions, kernelOptions: ostreeDeploymentKernelOptions,
requiredPartitionSizes: requiredDirectorySizes,
} }
iotRawImgType = imageType{ iotRawImgType = imageType{
@ -269,15 +282,16 @@ var (
LockRootUser: common.ToPtr(true), LockRootUser: common.ToPtr(true),
IgnitionPlatform: common.ToPtr("qemu"), IgnitionPlatform: common.ToPtr("qemu"),
}, },
defaultSize: 10 * datasizes.GibiByte, defaultSize: 10 * datasizes.GibiByte,
rpmOstree: true, rpmOstree: true,
bootable: true, bootable: true,
image: iotImage, image: iotImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "qcow2"}, payloadPipelines: []string{"ostree-deployment", "image", "qcow2"},
exports: []string{"qcow2"}, exports: []string{"qcow2"},
basePartitionTables: iotBasePartitionTables, basePartitionTables: iotBasePartitionTables,
kernelOptions: ostreeDeploymentKernelOptions, kernelOptions: ostreeDeploymentKernelOptions,
requiredPartitionSizes: requiredDirectorySizes,
} }
qcow2ImgType = imageType{ qcow2ImgType = imageType{
@ -291,14 +305,15 @@ var (
defaultImageConfig: &distro.ImageConfig{ defaultImageConfig: &distro.ImageConfig{
DefaultTarget: common.ToPtr("multi-user.target"), DefaultTarget: common.ToPtr("multi-user.target"),
}, },
kernelOptions: cloudKernelOptions, kernelOptions: cloudKernelOptions,
bootable: true, bootable: true,
defaultSize: 5 * datasizes.GibiByte, defaultSize: 5 * datasizes.GibiByte,
image: diskImage, image: diskImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"}, payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"}, exports: []string{"qcow2"},
basePartitionTables: defaultBasePartitionTables, basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
} }
vmdkDefaultImageConfig = &distro.ImageConfig{ vmdkDefaultImageConfig = &distro.ImageConfig{
@ -318,15 +333,16 @@ var (
packageSets: map[string]packageSetFunc{ packageSets: map[string]packageSetFunc{
osPkgsKey: vmdkCommonPackageSet, osPkgsKey: vmdkCommonPackageSet,
}, },
defaultImageConfig: vmdkDefaultImageConfig, defaultImageConfig: vmdkDefaultImageConfig,
kernelOptions: cloudKernelOptions, kernelOptions: cloudKernelOptions,
bootable: true, bootable: true,
defaultSize: 2 * datasizes.GibiByte, defaultSize: 2 * datasizes.GibiByte,
image: diskImage, image: diskImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"}, payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"}, exports: []string{"vmdk"},
basePartitionTables: defaultBasePartitionTables, basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
} }
ovaImgType = imageType{ ovaImgType = imageType{
@ -336,15 +352,16 @@ var (
packageSets: map[string]packageSetFunc{ packageSets: map[string]packageSetFunc{
osPkgsKey: vmdkCommonPackageSet, osPkgsKey: vmdkCommonPackageSet,
}, },
defaultImageConfig: vmdkDefaultImageConfig, defaultImageConfig: vmdkDefaultImageConfig,
kernelOptions: cloudKernelOptions, kernelOptions: cloudKernelOptions,
bootable: true, bootable: true,
defaultSize: 2 * datasizes.GibiByte, defaultSize: 2 * datasizes.GibiByte,
image: diskImage, image: diskImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"}, payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"}, exports: []string{"archive"},
basePartitionTables: defaultBasePartitionTables, basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
} }
containerImgType = imageType{ containerImgType = imageType{
@ -360,11 +377,12 @@ var (
Locale: common.ToPtr("C.UTF-8"), Locale: common.ToPtr("C.UTF-8"),
Timezone: common.ToPtr("Etc/UTC"), Timezone: common.ToPtr("Etc/UTC"),
}, },
image: containerImage, image: containerImage,
bootable: false, bootable: false,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "container"}, payloadPipelines: []string{"os", "container"},
exports: []string{"container"}, exports: []string{"container"},
requiredPartitionSizes: requiredDirectorySizes,
} }
wslImgType = imageType{ wslImgType = imageType{
@ -385,11 +403,12 @@ var (
}, },
}, },
}, },
image: containerImage, image: containerImage,
bootable: false, bootable: false,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "container"}, payloadPipelines: []string{"os", "container"},
exports: []string{"container"}, exports: []string{"container"},
requiredPartitionSizes: requiredDirectorySizes,
} }
minimalrawImgType = imageType{ minimalrawImgType = imageType{
@ -410,15 +429,16 @@ var (
Timeout: 5, Timeout: 5,
}, },
}, },
rpmOstree: false, rpmOstree: false,
kernelOptions: defaultKernelOptions, kernelOptions: defaultKernelOptions,
bootable: true, bootable: true,
defaultSize: 2 * datasizes.GibiByte, defaultSize: 2 * datasizes.GibiByte,
image: diskImage, image: diskImage,
buildPipelines: []string{"build"}, buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"}, payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"}, exports: []string{"xz"},
basePartitionTables: minimalrawPartitionTables, basePartitionTables: minimalrawPartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
} }
) )

View file

@ -226,6 +226,14 @@ func osCustomizations(
osc.Files = append(osc.Files, imageConfig.Files...) osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...) osc.Directories = append(osc.Directories, imageConfig.Directories...)
ca, err := c.GetCACerts()
if err != nil {
panic(fmt.Sprintf("unexpected error checking CA certs: %v", err))
}
if ca != nil {
osc.CACerts = ca.PEMCerts
}
return osc, nil return osc, nil
} }
@ -329,7 +337,7 @@ func diskImage(workload workload.Workload,
img.InstallWeakDeps = common.ToPtr(false) img.InstallWeakDeps = common.ToPtr(false)
} }
// TODO: move generation into LiveImage // TODO: move generation into LiveImage
pt, err := t.getPartitionTable(bp.Customizations.GetFilesystems(), options, rng) pt, err := t.getPartitionTable(bp.Customizations, options, rng)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -700,7 +708,7 @@ func iotImage(workload workload.Workload,
img.OSName = "fedora-iot" img.OSName = "fedora-iot"
// TODO: move generation into LiveImage // TODO: move generation into LiveImage
pt, err := t.getPartitionTable(customizations.GetFilesystems(), options, rng) pt, err := t.getPartitionTable(customizations, options, rng)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -741,7 +749,7 @@ func iotSimplifiedInstallerImage(workload workload.Workload,
rawImg.OSName = "fedora" rawImg.OSName = "fedora"
// TODO: move generation into LiveImage // TODO: move generation into LiveImage
pt, err := t.getPartitionTable(customizations.GetFilesystems(), options, rng) pt, err := t.getPartitionTable(customizations, options, rng)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -138,7 +138,7 @@ func (t *imageType) BootMode() platform.BootMode {
} }
func (t *imageType) getPartitionTable( func (t *imageType) getPartitionTable(
mountpoints []blueprint.FilesystemCustomization, customizations *blueprint.Customizations,
options distro.ImageOptions, options distro.ImageOptions,
rng *rand.Rand, rng *rand.Rand,
) (*disk.PartitionTable, error) { ) (*disk.PartitionTable, error) {
@ -148,6 +148,27 @@ func (t *imageType) getPartitionTable(
} }
imageSize := t.Size(options.Size) imageSize := t.Size(options.Size)
partitioning, err := customizations.GetPartitioning()
if err != nil {
return nil, err
}
if partitioning != nil {
// Use the new custom partition table to create a PT fully based on the user's customizations.
// This overrides FilesystemCustomizations, but we should never have both defined.
if options.Size > 0 {
// user specified a size on the command line, so let's override the
// customization with the calculated/rounded imageSize
partitioning.MinSize = imageSize
}
partOptions := &disk.CustomPartitionTableOptions{
PartitionTableType: basePartitionTable.Type, // PT type is not customizable, it is determined by the base PT for an image type or architecture
BootMode: t.BootMode(),
DefaultFSType: disk.FS_EXT4, // default fs type for Fedora
RequiredMinSizes: t.requiredPartitionSizes,
}
return disk.NewCustomPartitionTable(partitioning, partOptions, rng)
}
partitioningMode := options.PartitioningMode partitioningMode := options.PartitioningMode
if t.rpmOstree { if t.rpmOstree {
@ -160,6 +181,7 @@ func (t *imageType) getPartitionTable(
partitioningMode = disk.AutoLVMPartitioningMode partitioningMode = disk.AutoLVMPartitioningMode
} }
mountpoints := customizations.GetFilesystems()
return disk.NewPartitionTable(&basePartitionTable, mountpoints, imageSize, partitioningMode, t.requiredPartitionSizes, rng) return disk.NewPartitionTable(&basePartitionTable, mountpoints, imageSize, partitioningMode, t.requiredPartitionSizes, rng)
} }
@ -355,13 +377,21 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
} }
mountpoints := customizations.GetFilesystems() mountpoints := customizations.GetFilesystems()
partitioning, err := customizations.GetPartitioning()
if mountpoints != nil && t.rpmOstree { if err != nil {
return nil, fmt.Errorf("Custom mountpoints are not supported for ostree types") return nil, err
}
if (len(mountpoints) > 0 || partitioning != nil) && t.rpmOstree {
return nil, fmt.Errorf("Custom mountpoints and partitioning are not supported for ostree types")
}
if len(mountpoints) > 0 && partitioning != nil {
return nil, fmt.Errorf("partitioning customizations cannot be used with custom filesystems (mountpoints)")
} }
err := blueprint.CheckMountpointsPolicy(mountpoints, policies.MountpointPolicies) if err := blueprint.CheckMountpointsPolicy(mountpoints, policies.MountpointPolicies); err != nil {
if err != nil { return nil, err
}
if err := blueprint.CheckDiskMountpointsPolicy(partitioning, policies.MountpointPolicies); err != nil {
return nil, err return nil, err
} }
@ -438,5 +468,13 @@ func (t *imageType) checkOptions(bp *blueprint.Blueprint, options distro.ImageOp
} }
} }
diskc, err := customizations.GetPartitioning()
if err != nil {
return nil, err
}
if err := diskc.ValidateLayoutConstraints(); err != nil {
return nil, fmt.Errorf("cannot use disk customization: %w", err)
}
return nil, nil return nil, nil
} }

View file

@ -271,6 +271,14 @@ func osCustomizations(
osc.NoBLS = *imageConfig.NoBLS osc.NoBLS = *imageConfig.NoBLS
} }
ca, err := c.GetCACerts()
if err != nil {
panic(fmt.Sprintf("unexpected error checking CA certs: %v", err))
}
if ca != nil {
osc.CACerts = ca.PEMCerts
}
return osc, nil return osc, nil
} }

View file

@ -141,6 +141,8 @@ type OSCustomizations struct {
Directories []*fsnode.Directory Directories []*fsnode.Directory
Files []*fsnode.File Files []*fsnode.File
CACerts []string
FIPS bool FIPS bool
// NoBLS configures the image bootloader with traditional menu entries // NoBLS configures the image bootloader with traditional menu entries
@ -791,6 +793,21 @@ func (p *OS) serialize() osbuild.Pipeline {
})) }))
} }
if len(p.CACerts) > 0 {
for _, cc := range p.CACerts {
files, err := osbuild.NewCAFileNodes(cc)
if err != nil {
panic(err.Error())
}
if len(files) > 0 {
p.Files = append(p.Files, files...)
pipeline.AddStages(osbuild.GenFileNodesStages(files)...)
}
}
pipeline.AddStage(osbuild.NewCAStageStage())
}
if p.SElinux != "" { if p.SElinux != "" {
pipeline.AddStage(osbuild.NewSELinuxStage(&osbuild.SELinuxStageOptions{ pipeline.AddStage(osbuild.NewSELinuxStage(&osbuild.SELinuxStageOptions{
FileContexts: fmt.Sprintf("etc/selinux/%s/contexts/files/file_contexts", p.SElinux), FileContexts: fmt.Sprintf("etc/selinux/%s/contexts/files/file_contexts", p.SElinux),

View file

@ -36,9 +36,9 @@ func NewOSTreeSourceItem(commit ostree.CommitSpec) *OSTreeSourceItem {
item := new(OSTreeSourceItem) item := new(OSTreeSourceItem)
item.Remote.URL = commit.URL item.Remote.URL = commit.URL
item.Remote.ContentURL = commit.ContentURL item.Remote.ContentURL = commit.ContentURL
if commit.Secrets == "org.osbuild.rhsm.consumer" { if commit.Secrets != "" {
item.Remote.Secrets = &OSTreeSourceRemoteSecrets{ item.Remote.Secrets = &OSTreeSourceRemoteSecrets{
Name: "org.osbuild.rhsm.consumer", Name: commit.Secrets,
} }
} }
return item return item

View file

@ -0,0 +1,35 @@
package osbuild
import (
"encoding/pem"
"fmt"
"path/filepath"
"github.com/osbuild/images/pkg/cert"
"github.com/osbuild/images/pkg/customizations/fsnode"
)
func NewCAStageStage() *Stage {
return &Stage{
Type: "org.osbuild.pki.update-ca-trust",
}
}
func NewCAFileNodes(bundle string) ([]*fsnode.File, error) {
var files []*fsnode.File
certs, err := cert.ParseCerts(bundle)
if err != nil {
return nil, fmt.Errorf("failed to parse CA certificates: %v", err)
}
for _, c := range certs {
path := filepath.Join("/etc/pki/ca-trust/source/anchors", filepath.Base(c.SerialNumber.Text(16))+".pem")
f, err := fsnode.NewFile(path, nil, "root", "root", pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: c.Raw}))
if err != nil {
panic(err)
}
files = append(files, f)
}
return files, nil
}

View file

@ -151,18 +151,9 @@ func verifyChecksum(commit string) bool {
return len(commit) > 0 && ostreeCommitRE.MatchString(commit) return len(commit) > 0 && ostreeCommitRE.MatchString(commit)
} }
// resolveRef resolves the URL path specified by the location and ref func httpClientForRef(scheme string, ss SourceSpec) (*http.Client, error) {
// (location+"refs/heads/"+ref) and returns the commit ID for the named ref. If
// there is an error, it will be of type ResolveRefError.
func resolveRef(ss SourceSpec) (string, error) {
u, err := url.Parse(ss.URL)
if err != nil {
return "", NewResolveRefError("error parsing ostree repository location: %v", err)
}
u.Path = path.Join(u.Path, "refs", "heads", ss.Ref)
transport := http.DefaultTransport.(*http.Transport).Clone() transport := http.DefaultTransport.(*http.Transport).Clone()
if u.Scheme == "https" { if scheme == "https" {
tlsConf := &tls.Config{ tlsConf := &tls.Config{
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
} }
@ -171,18 +162,18 @@ func resolveRef(ss SourceSpec) (string, error) {
if ss.MTLS != nil && ss.MTLS.CA != "" { if ss.MTLS != nil && ss.MTLS.CA != "" {
caCertPEM, err := os.ReadFile(ss.MTLS.CA) caCertPEM, err := os.ReadFile(ss.MTLS.CA)
if err != nil { if err != nil {
return "", NewResolveRefError("error adding ca certificate when resolving ref: %s", err) return nil, NewResolveRefError("error adding ca certificate when resolving ref: %s", err)
} }
tlsConf.RootCAs = x509.NewCertPool() tlsConf.RootCAs = x509.NewCertPool()
if ok := tlsConf.RootCAs.AppendCertsFromPEM(caCertPEM); !ok { if ok := tlsConf.RootCAs.AppendCertsFromPEM(caCertPEM); !ok {
return "", NewResolveRefError("error adding ca certificate when resolving ref") return nil, NewResolveRefError("error adding ca certificate when resolving ref")
} }
} }
if ss.MTLS != nil && ss.MTLS.ClientCert != "" && ss.MTLS.ClientKey != "" { if ss.MTLS != nil && ss.MTLS.ClientCert != "" && ss.MTLS.ClientKey != "" {
cert, err := tls.LoadX509KeyPair(ss.MTLS.ClientCert, ss.MTLS.ClientKey) cert, err := tls.LoadX509KeyPair(ss.MTLS.ClientCert, ss.MTLS.ClientKey)
if err != nil { if err != nil {
return "", NewResolveRefError("error adding client certificate when resolving ref: %s", err) return nil, NewResolveRefError("error adding client certificate when resolving ref: %s", err)
} }
tlsConf.Certificates = []tls.Certificate{cert} tlsConf.Certificates = []tls.Certificate{cert}
} }
@ -193,12 +184,12 @@ func resolveRef(ss SourceSpec) (string, error) {
if ss.Proxy != "" { if ss.Proxy != "" {
host, port, err := net.SplitHostPort(ss.Proxy) host, port, err := net.SplitHostPort(ss.Proxy)
if err != nil { if err != nil {
return "", NewResolveRefError("error parsing MTLS proxy URL '%s': %v", ss.URL, err) return nil, NewResolveRefError("error parsing MTLS proxy URL '%s': %v", ss.URL, err)
} }
proxyURL, err := url.Parse("http://" + host + ":" + port) proxyURL, err := url.Parse("http://" + host + ":" + port)
if err != nil { if err != nil {
return "", NewResolveRefError("error parsing MTLS proxy URL '%s': %v", ss.URL, err) return nil, NewResolveRefError("error parsing MTLS proxy URL '%s': %v", ss.URL, err)
} }
transport.Proxy = func(request *http.Request) (*url.URL, error) { transport.Proxy = func(request *http.Request) (*url.URL, error) {
@ -206,9 +197,25 @@ func resolveRef(ss SourceSpec) (string, error) {
} }
} }
client := &http.Client{ return &http.Client{
Transport: transport, Transport: transport,
Timeout: 300 * time.Second, Timeout: 300 * time.Second,
}, nil
}
// resolveRef resolves the URL path specified by the location and ref
// (location+"refs/heads/"+ref) and returns the commit ID for the named ref. If
// there is an error, it will be of type ResolveRefError.
func resolveRef(ss SourceSpec) (string, error) {
u, err := url.Parse(ss.URL)
if err != nil {
return "", NewResolveRefError("error parsing ostree repository location: %v", err)
}
u.Path = path.Join(u.Path, "refs", "heads", ss.Ref)
client, err := httpClientForRef(u.Scheme, ss)
if err != nil {
return "", err
} }
req, err := http.NewRequest(http.MethodGet, u.String(), nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil)

3
vendor/modules.txt vendored
View file

@ -1022,7 +1022,7 @@ github.com/oracle/oci-go-sdk/v54/identity
github.com/oracle/oci-go-sdk/v54/objectstorage github.com/oracle/oci-go-sdk/v54/objectstorage
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
github.com/oracle/oci-go-sdk/v54/workrequests github.com/oracle/oci-go-sdk/v54/workrequests
# github.com/osbuild/images v0.99.0 # github.com/osbuild/images v0.102.0
## explicit; go 1.21.0 ## explicit; go 1.21.0
github.com/osbuild/images/internal/common github.com/osbuild/images/internal/common
github.com/osbuild/images/internal/environment github.com/osbuild/images/internal/environment
@ -1030,6 +1030,7 @@ github.com/osbuild/images/internal/workload
github.com/osbuild/images/pkg/arch github.com/osbuild/images/pkg/arch
github.com/osbuild/images/pkg/artifact github.com/osbuild/images/pkg/artifact
github.com/osbuild/images/pkg/blueprint github.com/osbuild/images/pkg/blueprint
github.com/osbuild/images/pkg/cert
github.com/osbuild/images/pkg/container github.com/osbuild/images/pkg/container
github.com/osbuild/images/pkg/crypt github.com/osbuild/images/pkg/crypt
github.com/osbuild/images/pkg/customizations/anaconda github.com/osbuild/images/pkg/customizations/anaconda