Delete internal/blueprint/ and import from osbuild/blueprint

Import osbuild/blueprint v1.6.0
This commit is contained in:
Achilleas Koutsou 2025-03-25 17:15:30 +01:00
parent 362712a71d
commit cf956ff5a6
93 changed files with 2300 additions and 4163 deletions

View file

@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Documentation: https://godocs.io/github.com/BurntSushi/toml
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show

View file

@ -196,6 +196,26 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
return md.unify(primValue.undecoded, rvalue(v))
}
// markDecodedRecursive is a helper to mark any key under the given tmap as
// decoded, recursing as needed
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
for key := range tmap {
md.decoded[md.context.add(key).String()] = struct{}{}
if tmap, ok := tmap[key].(map[string]any); ok {
md.context = append(md.context, key)
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
if tarr, ok := tmap[key].([]map[string]any); ok {
for _, elm := range tarr {
md.context = append(md.context, key)
markDecodedRecursive(md, elm)
md.context = md.context[0 : len(md.context)-1]
}
}
}
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
@ -222,6 +242,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
if err != nil {
return md.parseErr(err)
}
// Assume the Unmarshaler decoded everything, so mark all keys under
// this table as decoded.
if tmap, ok := data.(map[string]any); ok {
markDecodedRecursive(md, tmap)
}
if aot, ok := data.([]map[string]any); ok {
for _, tmap := range aot {
markDecodedRecursive(md, tmap)
}
}
return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
@ -540,12 +570,14 @@ func (md *MetaData) badtype(dst string, data any) error {
func (md *MetaData) parseErr(err error) error {
k := md.context.String()
d := string(md.data)
return ParseError{
LastKey: k,
Position: md.keyInfo[k].pos,
Line: md.keyInfo[k].pos.Line,
Message: err.Error(),
err: err,
input: string(md.data),
LastKey: k,
Position: md.keyInfo[k].pos.withCol(d),
Line: md.keyInfo[k].pos.Line,
input: d,
}
}

View file

@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
var mapKeysDirect, mapKeysSub []reflect.Value
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, k)
mapKeysSub = append(mapKeysSub, mapKey)
} else {
mapKeysDirect = append(mapKeysDirect, k)
mapKeysDirect = append(mapKeysDirect, mapKey)
}
}
var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys)
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
for i, mapKey := range mapKeys {
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
val := eindirect(rv.MapIndex(mapKey))
if isNil(val) {
continue
}
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
enc.encode(key.add(mapKey.String()), val)
}
}
}
@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem())
@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
if is32Bit {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
}
// Need to make a copy because ... ehm, I don't know why... I guess
// allocating a new array can cause it to fail(?)
//
// Done for: https://github.com/BurntSushi/toml/issues/430
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
addFields(rt, rv, nil)
writeFields := func(fields [][]int) {
writeFields := func(fields [][]int, totalFields int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := rv.FieldByIndex(fieldIndex)
@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
}
} else {
@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.wf("{")
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
l := len(fieldsDirect) + len(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline {
enc.wf("}")
}

View file

@ -67,21 +67,36 @@ type ParseError struct {
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght in bytes.
Len int // Length of the error in bytes.
}
func (p Position) withCol(tomlFile string) Position {
var (
pos int
lines = strings.Split(tomlFile, "\n")
)
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= p.Start {
p.Col = p.Start - pos + 1
if p.Col < 1 { // Should never happen, but just in case.
p.Col = 1
}
break
}
pos += ll
}
return p
}
func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, msg)
pe.Position.Line, pe.LastKey, pe.Message)
}
// ErrorWithPosition returns the error with detailed location context.
@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error()
}
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
var (
lines = strings.Split(pe.input, "\n")
b = new(strings.Builder)
)
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
msg, pe.Position.Line, col+1)
pe.Message, pe.Position.Line, pe.Position.Col)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
msg, pe.Position.Line, col, col+pe.Position.Len)
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
diff := len(expanded) - len(lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
return m
}
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
func expandTab(s string) string {
var (
b strings.Builder

View file

@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
pos.Line--
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
pos.Line--
}
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default:
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
}
return lx.errorf("expected '.' or '=', but got %q instead", r)
}
}
@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
}
return lx.errorf("expected value but found %q instead", r)
}
@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
case 'x':
r = lx.peek()
if !isHex(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
}
@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool {
if tomlNext {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-' ||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
(r >= 0x037f && r <= 0x1fff) ||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
(r >= 0x10000 && r <= 0xeffff)
}
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || r == '_' || r == '-'
}

View file

@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
if cap(k) > len(k) {
return append(k, piece)
}
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece

View file

@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
// it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:]
}
@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Start: i, Len: 1},
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) {
panic(ParseError{
Message: err.Error(),
err: err,
Position: it.pos,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: it.pos,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Len,
LastKey: p.current(),
})
@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: p.pos,
Position: p.pos.withCol(p.lx.input),
Line: p.pos.Line,
LastKey: p.current(),
})
@ -123,10 +123,11 @@ func (p *parser) next() item {
if it.typ == itemError {
if it.err != nil {
panic(ParseError{
Position: it.pos,
Message: it.err.Error(),
err: it.err,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line,
LastKey: p.current(),
err: it.err,
})
}
@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
}
}
// isHexis a superset of all the permissable characters surrounding an
// isHex is a superset of all the permissible characters surrounding an
// underscore.
accept = isHex(r)
}

View file

@ -0,0 +1,5 @@
package common
func ToPtr[T any](x T) *T {
return &x
}

View file

@ -0,0 +1,509 @@
// Package blueprint contains primitives for representing weldr blueprints
package blueprint
import (
"encoding/json"
"fmt"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/crypt"
"github.com/coreos/go-semver/semver"
iblueprint "github.com/osbuild/images/pkg/blueprint"
)
// A Blueprint is a high-level description of an image.
type Blueprint struct {
Name string `json:"name" toml:"name"`
Description string `json:"description" toml:"description"`
Version string `json:"version,omitempty" toml:"version,omitempty"`
Packages []Package `json:"packages" toml:"packages"`
Modules []Package `json:"modules" toml:"modules"`
// Note, this is called "enabled modules" because we already have "modules" except
// the "modules" refers to packages and "enabled modules" refers to modularity modules.
EnabledModules []EnabledModule `json:"enabled_modules" toml:"enabled_modules"`
Groups []Group `json:"groups" toml:"groups"`
Containers []Container `json:"containers,omitempty" toml:"containers,omitempty"`
Customizations *Customizations `json:"customizations,omitempty" toml:"customizations"`
Distro string `json:"distro" toml:"distro"`
Arch string `json:"architecture,omitempty" toml:"architecture,omitempty"`
// EXPERIMENTAL
Minimal bool `json:"minimal,omitempty" toml:"minimal,omitempty"`
}
type Change struct {
Commit string `json:"commit" toml:"commit"`
Message string `json:"message" toml:"message"`
Revision *int `json:"revision" toml:"revision"`
Timestamp string `json:"timestamp" toml:"timestamp"`
Blueprint Blueprint `json:"-" toml:"-"`
}
// A Package specifies an RPM package.
type Package struct {
Name string `json:"name" toml:"name"`
Version string `json:"version,omitempty" toml:"version,omitempty"`
}
// A module specifies a modularity stream.
type EnabledModule struct {
Name string `json:"name" toml:"name"`
Stream string `json:"stream,omitempty" toml:"stream,omitempty"`
}
// A group specifies an package group.
type Group struct {
Name string `json:"name" toml:"name"`
}
type Container struct {
Source string `json:"source" toml:"source"`
Name string `json:"name,omitempty" toml:"name,omitempty"`
TLSVerify *bool `json:"tls-verify,omitempty" toml:"tls-verify,omitempty"`
LocalStorage bool `json:"local-storage,omitempty" toml:"local-storage,omitempty"`
}
// DeepCopy returns a deep copy of the blueprint
// This uses json.Marshal and Unmarshal which are not very efficient
func (b *Blueprint) DeepCopy() Blueprint {
bpJSON, err := json.Marshal(b)
if err != nil {
panic(err)
}
var bp Blueprint
err = json.Unmarshal(bpJSON, &bp)
if err != nil {
panic(err)
}
return bp
}
// Initialize ensures that the blueprint has sane defaults for any missing fields
func (b *Blueprint) Initialize() error {
if len(b.Name) == 0 {
return fmt.Errorf("empty blueprint name not allowed")
}
if b.Packages == nil {
b.Packages = []Package{}
}
if b.Modules == nil {
b.Modules = []Package{}
}
if b.EnabledModules == nil {
b.EnabledModules = []EnabledModule{}
}
if b.Groups == nil {
b.Groups = []Group{}
}
if b.Containers == nil {
b.Containers = []Container{}
}
if b.Version == "" {
b.Version = "0.0.0"
}
// Return an error if the version is not valid
_, err := semver.NewVersion(b.Version)
if err != nil {
return fmt.Errorf("Invalid 'version', must use Semantic Versioning: %s", err.Error())
}
err = b.CryptPasswords()
if err != nil {
return fmt.Errorf("Error hashing passwords: %s", err.Error())
}
for i, pkg := range b.Packages {
if pkg.Name == "" {
var errMsg string
if pkg.Version == "" {
errMsg = fmt.Sprintf("Entry #%d has no name.", i+1)
} else {
errMsg = fmt.Sprintf("Entry #%d has version '%v' but no name.", i+1, pkg.Version)
}
return fmt.Errorf("All package entries need to contain the name of the package. %s", errMsg)
}
}
return nil
}
// BumpVersion increments the previous blueprint's version
// If the old version string is not vaild semver it will use the new version as-is
// This assumes that the new blueprint's version has already been validated via Initialize
func (b *Blueprint) BumpVersion(old string) {
var ver *semver.Version
ver, err := semver.NewVersion(old)
if err != nil {
return
}
ver.BumpPatch()
b.Version = ver.String()
}
// packages, modules, and groups all resolve to rpm packages right now. This
// function returns a combined list of "name-version" strings.
func (b *Blueprint) GetPackages() []string {
return b.GetPackagesEx(true)
}
func (b *Blueprint) GetPackagesEx(bootable bool) []string {
packages := []string{}
for _, pkg := range b.Packages {
packages = append(packages, pkg.ToNameVersion())
}
for _, pkg := range b.Modules {
packages = append(packages, pkg.ToNameVersion())
}
for _, group := range b.Groups {
packages = append(packages, "@"+group.Name)
}
if bootable {
kc := b.Customizations.GetKernel()
kpkg := Package{Name: kc.Name}
packages = append(packages, kpkg.ToNameVersion())
}
return packages
}
func (p Package) ToNameVersion() string {
// Omit version to prevent all packages with prefix of name to be installed
if p.Version == "*" || p.Version == "" {
return p.Name
}
return p.Name + "-" + p.Version
}
func (b *Blueprint) GetEnabledModules() []string {
modules := []string{}
for _, mod := range b.EnabledModules {
modules = append(modules, mod.ToNameStream())
}
return modules
}
func (p EnabledModule) ToNameStream() string {
return p.Name + ":" + p.Stream
}
// CryptPasswords ensures that all blueprint passwords are hashed
func (b *Blueprint) CryptPasswords() error {
if b.Customizations == nil {
return nil
}
// Any passwords for users?
for i := range b.Customizations.User {
// Missing or empty password
if b.Customizations.User[i].Password == nil {
continue
}
// Prevent empty password from being hashed
if len(*b.Customizations.User[i].Password) == 0 {
b.Customizations.User[i].Password = nil
continue
}
if !crypt.PasswordIsCrypted(*b.Customizations.User[i].Password) {
pw, err := crypt.CryptSHA512(*b.Customizations.User[i].Password)
if err != nil {
return err
}
// Replace the password with the
b.Customizations.User[i].Password = &pw
}
}
return nil
}
func Convert(bp Blueprint) iblueprint.Blueprint {
var pkgs []iblueprint.Package
if len(bp.Packages) > 0 {
pkgs = make([]iblueprint.Package, len(bp.Packages))
for idx := range bp.Packages {
pkgs[idx] = iblueprint.Package(bp.Packages[idx])
}
}
var modules []iblueprint.Package
if len(bp.Modules) > 0 {
modules = make([]iblueprint.Package, len(bp.Modules))
for idx := range bp.Modules {
modules[idx] = iblueprint.Package(bp.Modules[idx])
}
}
var enabledModules []iblueprint.EnabledModule
if len(bp.EnabledModules) > 0 {
enabledModules = make([]iblueprint.EnabledModule, len(bp.EnabledModules))
for idx := range bp.EnabledModules {
enabledModules[idx] = iblueprint.EnabledModule(bp.EnabledModules[idx])
}
}
var groups []iblueprint.Group
if len(bp.Groups) > 0 {
groups = make([]iblueprint.Group, len(bp.Groups))
for idx := range bp.Groups {
groups[idx] = iblueprint.Group(bp.Groups[idx])
}
}
var containers []iblueprint.Container
if len(bp.Containers) > 0 {
containers = make([]iblueprint.Container, len(bp.Containers))
for idx := range bp.Containers {
containers[idx] = iblueprint.Container(bp.Containers[idx])
}
}
var customizations *iblueprint.Customizations
if c := bp.Customizations; c != nil {
customizations = &iblueprint.Customizations{
Hostname: c.Hostname,
InstallationDevice: c.InstallationDevice,
}
if fdo := c.FDO; fdo != nil {
ifdo := iblueprint.FDOCustomization(*fdo)
customizations.FDO = &ifdo
}
if oscap := c.OpenSCAP; oscap != nil {
ioscap := iblueprint.OpenSCAPCustomization{
DataStream: oscap.DataStream,
ProfileID: oscap.ProfileID,
}
if tailoring := oscap.Tailoring; tailoring != nil {
itailoring := iblueprint.OpenSCAPTailoringCustomizations(*tailoring)
ioscap.Tailoring = &itailoring
}
customizations.OpenSCAP = &ioscap
}
if ign := c.Ignition; ign != nil {
iign := iblueprint.IgnitionCustomization{}
if embed := ign.Embedded; embed != nil {
iembed := iblueprint.EmbeddedIgnitionCustomization(*embed)
iign.Embedded = &iembed
}
if fb := ign.FirstBoot; fb != nil {
ifb := iblueprint.FirstBootIgnitionCustomization(*fb)
iign.FirstBoot = &ifb
}
customizations.Ignition = &iign
}
if dirs := c.Directories; dirs != nil {
idirs := make([]iblueprint.DirectoryCustomization, len(dirs))
for idx := range dirs {
idirs[idx] = iblueprint.DirectoryCustomization(dirs[idx])
}
customizations.Directories = idirs
}
if files := c.Files; files != nil {
ifiles := make([]iblueprint.FileCustomization, len(files))
for idx := range files {
ifiles[idx] = iblueprint.FileCustomization(files[idx])
}
customizations.Files = ifiles
}
if repos := c.Repositories; repos != nil {
irepos := make([]iblueprint.RepositoryCustomization, len(repos))
for idx := range repos {
irepos[idx] = iblueprint.RepositoryCustomization(repos[idx])
}
customizations.Repositories = irepos
}
if kernel := c.Kernel; kernel != nil {
ikernel := iblueprint.KernelCustomization(*kernel)
customizations.Kernel = &ikernel
}
if users := c.GetUsers(); users != nil { // contains both user customizations and converted sshkey customizations
iusers := make([]iblueprint.UserCustomization, len(users))
for idx := range users {
iusers[idx] = iblueprint.UserCustomization(users[idx])
}
customizations.User = iusers
}
if groups := c.Group; groups != nil {
igroups := make([]iblueprint.GroupCustomization, len(groups))
for idx := range groups {
igroups[idx] = iblueprint.GroupCustomization(groups[idx])
}
customizations.Group = igroups
}
if fs := c.Filesystem; fs != nil {
ifs := make([]iblueprint.FilesystemCustomization, len(fs))
for idx := range fs {
ifs[idx] = iblueprint.FilesystemCustomization(fs[idx])
}
customizations.Filesystem = ifs
}
if disk := c.Disk; disk != nil {
idisk := &iblueprint.DiskCustomization{
MinSize: disk.MinSize,
Partitions: make([]iblueprint.PartitionCustomization, len(disk.Partitions)),
}
for idx, part := range disk.Partitions {
ipart := iblueprint.PartitionCustomization{
Type: part.Type,
MinSize: part.MinSize,
BtrfsVolumeCustomization: iblueprint.BtrfsVolumeCustomization{},
VGCustomization: iblueprint.VGCustomization{
Name: part.VGCustomization.Name,
},
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization(part.FilesystemTypedCustomization),
}
if len(part.LogicalVolumes) > 0 {
ipart.LogicalVolumes = make([]iblueprint.LVCustomization, len(part.LogicalVolumes))
for lvidx, lv := range part.LogicalVolumes {
ipart.LogicalVolumes[lvidx] = iblueprint.LVCustomization{
Name: lv.Name,
MinSize: lv.MinSize,
FilesystemTypedCustomization: iblueprint.FilesystemTypedCustomization(lv.FilesystemTypedCustomization),
}
}
}
if len(part.Subvolumes) > 0 {
ipart.Subvolumes = make([]iblueprint.BtrfsSubvolumeCustomization, len(part.Subvolumes))
for svidx, sv := range part.Subvolumes {
ipart.Subvolumes[svidx] = iblueprint.BtrfsSubvolumeCustomization(sv)
}
}
idisk.Partitions[idx] = ipart
}
customizations.Disk = idisk
}
if tz := c.Timezone; tz != nil {
itz := iblueprint.TimezoneCustomization(*tz)
customizations.Timezone = &itz
}
if locale := c.Locale; locale != nil {
ilocale := iblueprint.LocaleCustomization(*locale)
customizations.Locale = &ilocale
}
if fw := c.Firewall; fw != nil {
ifw := iblueprint.FirewallCustomization{
Ports: fw.Ports,
}
if services := fw.Services; services != nil {
iservices := iblueprint.FirewallServicesCustomization(*services)
ifw.Services = &iservices
}
if zones := fw.Zones; zones != nil {
izones := make([]iblueprint.FirewallZoneCustomization, len(zones))
for idx := range zones {
izones[idx] = iblueprint.FirewallZoneCustomization(zones[idx])
}
ifw.Zones = izones
}
customizations.Firewall = &ifw
}
if services := c.Services; services != nil {
iservices := iblueprint.ServicesCustomization(*services)
customizations.Services = &iservices
}
if fips := c.FIPS; fips != nil {
customizations.FIPS = fips
}
if installer := c.Installer; installer != nil {
iinst := iblueprint.InstallerCustomization{
Unattended: installer.Unattended,
SudoNopasswd: installer.SudoNopasswd,
}
if installer.Kickstart != nil {
iinst.Kickstart = &iblueprint.Kickstart{
Contents: installer.Kickstart.Contents,
}
}
if installer.Modules != nil {
iinst.Modules = &iblueprint.AnacondaModules{
Enable: installer.Modules.Enable,
Disable: installer.Modules.Disable,
}
}
customizations.Installer = &iinst
}
if rpm := c.RPM; rpm != nil && rpm.ImportKeys != nil {
irpm := iblueprint.RPMCustomization{
ImportKeys: &iblueprint.RPMImportKeys{
Files: rpm.ImportKeys.Files,
},
}
customizations.RPM = &irpm
}
if rhsm := c.RHSM; rhsm != nil && rhsm.Config != nil {
irhsm := iblueprint.RHSMCustomization{
Config: &iblueprint.RHSMConfig{},
}
if plugins := rhsm.Config.DNFPlugins; plugins != nil {
irhsm.Config.DNFPlugins = &iblueprint.SubManDNFPluginsConfig{}
if plugins.ProductID != nil && plugins.ProductID.Enabled != nil {
irhsm.Config.DNFPlugins.ProductID = &iblueprint.DNFPluginConfig{
Enabled: common.ToPtr(*plugins.ProductID.Enabled),
}
}
if plugins.SubscriptionManager != nil && plugins.SubscriptionManager.Enabled != nil {
irhsm.Config.DNFPlugins.SubscriptionManager = &iblueprint.DNFPluginConfig{
Enabled: common.ToPtr(*plugins.SubscriptionManager.Enabled),
}
}
}
if subManConf := rhsm.Config.SubscriptionManager; subManConf != nil {
irhsm.Config.SubscriptionManager = &iblueprint.SubManConfig{}
if subManConf.RHSMConfig != nil && subManConf.RHSMConfig.ManageRepos != nil {
irhsm.Config.SubscriptionManager.RHSMConfig = &iblueprint.SubManRHSMConfig{
ManageRepos: common.ToPtr(*subManConf.RHSMConfig.ManageRepos),
}
}
if subManConf.RHSMCertdConfig != nil && subManConf.RHSMCertdConfig.AutoRegistration != nil {
irhsm.Config.SubscriptionManager.RHSMCertdConfig = &iblueprint.SubManRHSMCertdConfig{
AutoRegistration: common.ToPtr(*subManConf.RHSMCertdConfig.AutoRegistration),
}
}
}
customizations.RHSM = &irhsm
}
if ca := c.CACerts; ca != nil {
ica := iblueprint.CACustomization{
PEMCerts: ca.PEMCerts,
}
customizations.CACerts = &ica
}
}
ibp := iblueprint.Blueprint{
Name: bp.Name,
Description: bp.Description,
Version: bp.Version,
Packages: pkgs,
Modules: modules,
EnabledModules: enabledModules,
Groups: groups,
Containers: containers,
Customizations: customizations,
Distro: bp.Distro,
}
return ibp
}

View file

@ -0,0 +1,508 @@
package blueprint
import (
"fmt"
"reflect"
"slices"
"strings"
"github.com/osbuild/images/pkg/cert"
"github.com/osbuild/images/pkg/customizations/anaconda"
"github.com/osbuild/images/pkg/disk"
)
type Customizations struct {
Hostname *string `json:"hostname,omitempty" toml:"hostname,omitempty"`
Kernel *KernelCustomization `json:"kernel,omitempty" toml:"kernel,omitempty"`
SSHKey []SSHKeyCustomization `json:"sshkey,omitempty" toml:"sshkey,omitempty"`
User []UserCustomization `json:"user,omitempty" toml:"user,omitempty"`
Group []GroupCustomization `json:"group,omitempty" toml:"group,omitempty"`
Timezone *TimezoneCustomization `json:"timezone,omitempty" toml:"timezone,omitempty"`
Locale *LocaleCustomization `json:"locale,omitempty" toml:"locale,omitempty"`
Firewall *FirewallCustomization `json:"firewall,omitempty" toml:"firewall,omitempty"`
Services *ServicesCustomization `json:"services,omitempty" toml:"services,omitempty"`
Filesystem []FilesystemCustomization `json:"filesystem,omitempty" toml:"filesystem,omitempty"`
Disk *DiskCustomization `json:"disk,omitempty" toml:"disk,omitempty"`
InstallationDevice string `json:"installation_device,omitempty" toml:"installation_device,omitempty"`
PartitioningMode string `json:"partitioning_mode,omitempty" toml:"partitioning_mode,omitempty"`
FDO *FDOCustomization `json:"fdo,omitempty" toml:"fdo,omitempty"`
OpenSCAP *OpenSCAPCustomization `json:"openscap,omitempty" toml:"openscap,omitempty"`
Ignition *IgnitionCustomization `json:"ignition,omitempty" toml:"ignition,omitempty"`
Directories []DirectoryCustomization `json:"directories,omitempty" toml:"directories,omitempty"`
Files []FileCustomization `json:"files,omitempty" toml:"files,omitempty"`
Repositories []RepositoryCustomization `json:"repositories,omitempty" toml:"repositories,omitempty"`
FIPS *bool `json:"fips,omitempty" toml:"fips,omitempty"`
Installer *InstallerCustomization `json:"installer,omitempty" toml:"installer,omitempty"`
RPM *RPMCustomization `json:"rpm,omitempty" toml:"rpm,omitempty"`
RHSM *RHSMCustomization `json:"rhsm,omitempty" toml:"rhsm,omitempty"`
CACerts *CACustomization `json:"cacerts,omitempty" toml:"cacerts,omitempty"`
ContainersStorage *ContainerStorageCustomization `json:"containers-storage,omitempty" toml:"containers-storage,omitempty"`
}
type IgnitionCustomization struct {
Embedded *EmbeddedIgnitionCustomization `json:"embedded,omitempty" toml:"embedded,omitempty"`
FirstBoot *FirstBootIgnitionCustomization `json:"firstboot,omitempty" toml:"firstboot,omitempty"`
}
type EmbeddedIgnitionCustomization struct {
Config string `json:"config,omitempty" toml:"config,omitempty"`
}
type FirstBootIgnitionCustomization struct {
ProvisioningURL string `json:"url,omitempty" toml:"url,omitempty"`
}
type FDOCustomization struct {
ManufacturingServerURL string `json:"manufacturing_server_url,omitempty" toml:"manufacturing_server_url,omitempty"`
DiunPubKeyInsecure string `json:"diun_pub_key_insecure,omitempty" toml:"diun_pub_key_insecure,omitempty"`
// This is the output of:
// echo "sha256:$(openssl x509 -fingerprint -sha256 -noout -in diun_cert.pem | cut -d"=" -f2 | sed 's/://g')"
DiunPubKeyHash string `json:"diun_pub_key_hash,omitempty" toml:"diun_pub_key_hash,omitempty"`
DiunPubKeyRootCerts string `json:"diun_pub_key_root_certs,omitempty" toml:"diun_pub_key_root_certs,omitempty"`
DiMfgStringTypeMacIface string `json:"di_mfg_string_type_mac_iface,omitempty" toml:"di_mfg_string_type_mac_iface,omitempty"`
}
type KernelCustomization struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
Append string `json:"append" toml:"append"`
}
type SSHKeyCustomization struct {
User string `json:"user" toml:"user"`
Key string `json:"key" toml:"key"`
}
type UserCustomization struct {
Name string `json:"name" toml:"name"`
Description *string `json:"description,omitempty" toml:"description,omitempty"`
Password *string `json:"password,omitempty" toml:"password,omitempty"`
Key *string `json:"key,omitempty" toml:"key,omitempty"`
Home *string `json:"home,omitempty" toml:"home,omitempty"`
Shell *string `json:"shell,omitempty" toml:"shell,omitempty"`
Groups []string `json:"groups,omitempty" toml:"groups,omitempty"`
UID *int `json:"uid,omitempty" toml:"uid,omitempty"`
GID *int `json:"gid,omitempty" toml:"gid,omitempty"`
ExpireDate *int `json:"expiredate,omitempty" toml:"expiredate,omitempty"`
ForcePasswordReset *bool `json:"force_password_reset,omitempty" toml:"force_password_reset,omitempty"`
}
type GroupCustomization struct {
Name string `json:"name" toml:"name"`
GID *int `json:"gid,omitempty" toml:"gid,omitempty"`
}
type TimezoneCustomization struct {
Timezone *string `json:"timezone,omitempty" toml:"timezone,omitempty"`
NTPServers []string `json:"ntpservers,omitempty" toml:"ntpservers,omitempty"`
}
type LocaleCustomization struct {
Languages []string `json:"languages,omitempty" toml:"languages,omitempty"`
Keyboard *string `json:"keyboard,omitempty" toml:"keyboard,omitempty"`
}
type FirewallCustomization struct {
Ports []string `json:"ports,omitempty" toml:"ports,omitempty"`
Services *FirewallServicesCustomization `json:"services,omitempty" toml:"services,omitempty"`
Zones []FirewallZoneCustomization `json:"zones,omitempty" toml:"zones,omitempty"`
}
type FirewallZoneCustomization struct {
Name *string `json:"name,omitempty" toml:"name,omitempty"`
Sources []string `json:"sources,omitempty" toml:"sources,omitempty"`
}
type FirewallServicesCustomization struct {
Enabled []string `json:"enabled,omitempty" toml:"enabled,omitempty"`
Disabled []string `json:"disabled,omitempty" toml:"disabled,omitempty"`
}
type ServicesCustomization struct {
Enabled []string `json:"enabled,omitempty" toml:"enabled,omitempty"`
Disabled []string `json:"disabled,omitempty" toml:"disabled,omitempty"`
Masked []string `json:"masked,omitempty" toml:"masked,omitempty"`
}
type OpenSCAPCustomization struct {
DataStream string `json:"datastream,omitempty" toml:"datastream,omitempty"`
ProfileID string `json:"profile_id,omitempty" toml:"profile_id,omitempty"`
Tailoring *OpenSCAPTailoringCustomizations `json:"tailoring,omitempty" toml:"tailoring,omitempty"`
JSONTailoring *OpenSCAPJSONTailoringCustomizations `json:"json_tailoring,omitempty" toml:"json_tailoring,omitempty"`
PolicyID string `json:"policy_id,omitempty" toml:"policy_id,omitempty"`
}
type OpenSCAPTailoringCustomizations struct {
Selected []string `json:"selected,omitempty" toml:"selected,omitempty"`
Unselected []string `json:"unselected,omitempty" toml:"unselected,omitempty"`
}
type OpenSCAPJSONTailoringCustomizations struct {
ProfileID string `json:"profile_id,omitempty" toml:"profile_id,omitempty"`
Filepath string `json:"filepath,omitempty" toml:"filepath,omitempty"`
}
type CACustomization struct {
PEMCerts []string `json:"pem_certs,omitempty" toml:"pem_certs,omitempty"`
}
// Configure the container storage separately from containers, since we most likely would
// like to use the same storage path for all of the containers.
type ContainerStorageCustomization struct {
// destination is always `containers-storage`, so we won't expose this
StoragePath *string `json:"destination-path,omitempty" toml:"destination-path,omitempty"`
}
type CustomizationError struct {
Message string
}
func (e *CustomizationError) Error() string {
return e.Message
}
// CheckCustomizations returns an error of type `CustomizationError`
// if `c` has any customizations not specified in `allowed`
func (c *Customizations) CheckAllowed(allowed ...string) error {
if c == nil {
return nil
}
allowMap := make(map[string]bool)
for _, a := range allowed {
allowMap[a] = true
}
t := reflect.TypeOf(*c)
v := reflect.ValueOf(*c)
for i := 0; i < t.NumField(); i++ {
empty := false
field := v.Field(i)
switch field.Kind() {
case reflect.String:
if field.String() == "" {
empty = true
}
case reflect.Array, reflect.Slice:
if field.Len() == 0 {
empty = true
}
case reflect.Ptr:
if field.IsNil() {
empty = true
}
default:
panic(fmt.Sprintf("unhandled customization field type %s, %s", v.Kind(), t.Field(i).Name))
}
if !empty && !allowMap[t.Field(i).Name] {
return &CustomizationError{fmt.Sprintf("'%s' is not allowed", t.Field(i).Name)}
}
}
return nil
}
func (c *Customizations) GetHostname() *string {
if c == nil {
return nil
}
return c.Hostname
}
func (c *Customizations) GetPrimaryLocale() (*string, *string) {
if c == nil {
return nil, nil
}
if c.Locale == nil {
return nil, nil
}
if len(c.Locale.Languages) == 0 {
return nil, c.Locale.Keyboard
}
return &c.Locale.Languages[0], c.Locale.Keyboard
}
func (c *Customizations) GetTimezoneSettings() (*string, []string) {
if c == nil {
return nil, nil
}
if c.Timezone == nil {
return nil, nil
}
return c.Timezone.Timezone, c.Timezone.NTPServers
}
func (c *Customizations) GetUsers() []UserCustomization {
if c == nil || (c.User == nil && c.SSHKey == nil) {
return nil
}
var users []UserCustomization
// prepend sshkey for backwards compat (overridden by users)
if len(c.SSHKey) > 0 {
for _, k := range c.SSHKey {
key := k.Key
users = append(users, UserCustomization{
Name: k.User,
Key: &key,
})
}
}
users = append(users, c.User...)
// sanitize user home directory in blueprint: if it has a trailing slash,
// it might lead to the directory not getting the correct selinux labels
for idx := range users {
u := users[idx]
if u.Home != nil {
homedir := strings.TrimRight(*u.Home, "/")
u.Home = &homedir
users[idx] = u
}
}
return users
}
func (c *Customizations) GetGroups() []GroupCustomization {
if c == nil {
return nil
}
return c.Group
}
func (c *Customizations) GetKernel() *KernelCustomization {
var kernelName, kernelAppend string
if c != nil && c.Kernel != nil {
kernelName = c.Kernel.Name
kernelAppend = c.Kernel.Append
}
if kernelName == "" {
kernelName = "kernel"
}
return &KernelCustomization{
Name: kernelName,
Append: kernelAppend,
}
}
func (c *Customizations) GetFirewall() *FirewallCustomization {
if c == nil {
return nil
}
return c.Firewall
}
func (c *Customizations) GetServices() *ServicesCustomization {
if c == nil {
return nil
}
return c.Services
}
func (c *Customizations) GetFilesystems() []FilesystemCustomization {
if c == nil {
return nil
}
return c.Filesystem
}
func (c *Customizations) GetFilesystemsMinSize() uint64 {
if c == nil {
return 0
}
var agg uint64
for _, m := range c.Filesystem {
agg += m.MinSize
}
// This ensures that file system customization `size` is a multiple of
// sector size (512)
if agg%512 != 0 {
agg = (agg/512 + 1) * 512
}
return agg
}
func (c *Customizations) GetPartitioning() (*DiskCustomization, error) {
if c == nil {
return nil, nil
}
if err := c.Disk.Validate(); err != nil {
return nil, err
}
return c.Disk, nil
}
// GetPartitioningMode converts the string to a disk.PartitioningMode type
func (c *Customizations) GetPartitioningMode() (disk.PartitioningMode, error) {
if c == nil {
return disk.DefaultPartitioningMode, nil
}
switch c.PartitioningMode {
case "raw":
return disk.RawPartitioningMode, nil
case "lvm":
return disk.LVMPartitioningMode, nil
case "auto-lvm":
return disk.AutoLVMPartitioningMode, nil
case "":
return disk.DefaultPartitioningMode, nil
default:
return disk.DefaultPartitioningMode, fmt.Errorf("invalid partitioning mode '%s'", c.PartitioningMode)
}
}
func (c *Customizations) GetInstallationDevice() string {
if c == nil || c.InstallationDevice == "" {
return ""
}
return c.InstallationDevice
}
func (c *Customizations) GetFDO() *FDOCustomization {
if c == nil {
return nil
}
return c.FDO
}
func (c *Customizations) GetOpenSCAP() *OpenSCAPCustomization {
if c == nil {
return nil
}
return c.OpenSCAP
}
func (c *Customizations) GetIgnition() *IgnitionCustomization {
if c == nil {
return nil
}
return c.Ignition
}
func (c *Customizations) GetDirectories() []DirectoryCustomization {
if c == nil {
return nil
}
return c.Directories
}
func (c *Customizations) GetFiles() []FileCustomization {
if c == nil {
return nil
}
return c.Files
}
func (c *Customizations) GetRepositories() ([]RepositoryCustomization, error) {
if c == nil {
return nil, nil
}
for _, repo := range c.Repositories {
err := validateCustomRepository(&repo)
if err != nil {
return nil, err
}
}
return c.Repositories, nil
}
func (c *Customizations) GetFIPS() bool {
if c == nil || c.FIPS == nil {
return false
}
return *c.FIPS
}
func (c *Customizations) GetContainerStorage() *ContainerStorageCustomization {
if c == nil || c.ContainersStorage == nil {
return nil
}
if *c.ContainersStorage.StoragePath == "" {
return nil
}
return c.ContainersStorage
}
func (c *Customizations) GetInstaller() (*InstallerCustomization, error) {
if c == nil || c.Installer == nil {
return nil, nil
}
// Validate conflicting customizations: Installer options aren't supported
// when the user adds their own kickstart content
if c.Installer.Kickstart != nil && len(c.Installer.Kickstart.Contents) > 0 {
if c.Installer.Unattended {
return nil, fmt.Errorf("installer.unattended is not supported when adding custom kickstart contents")
}
if len(c.Installer.SudoNopasswd) > 0 {
return nil, fmt.Errorf("installer.sudo-nopasswd is not supported when adding custom kickstart contents")
}
}
// Disabling the user module isn't supported when users or groups are
// defined
if c.Installer.Modules != nil &&
slices.Contains(c.Installer.Modules.Disable, anaconda.ModuleUsers) &&
len(c.User)+len(c.Group) > 0 {
return nil, fmt.Errorf("blueprint contains user or group customizations but disables the required Users Anaconda module")
}
return c.Installer, nil
}
func (c *Customizations) GetRPM() *RPMCustomization {
if c == nil {
return nil
}
return c.RPM
}
func (c *Customizations) GetRHSM() *RHSMCustomization {
if c == nil {
return nil
}
return c.RHSM
}
func (c *Customizations) checkCACerts() error {
if c == nil || c.CACerts == nil {
return nil
}
for _, bundle := range c.CACerts.PEMCerts {
_, err := cert.ParseCerts(bundle)
if err != nil {
return err
}
}
return nil
}
func (c *Customizations) GetCACerts() (*CACustomization, error) {
if c == nil {
return nil, nil
}
if err := c.checkCACerts(); err != nil {
return nil, err
}
return c.CACerts, nil
}

View file

@ -0,0 +1,689 @@
package blueprint
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"regexp"
"slices"
"strings"
"github.com/google/uuid"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type DiskCustomization struct {
// Type of the partition table: gpt or dos.
// Optional, the default depends on the distro and image type.
Type string `json:"type,omitempty" toml:"type,omitempty"`
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"minsize,omitempty"`
}
type diskCustomizationMarshaler struct {
Type string `json:"type,omitempty" toml:"type,omitempty"`
MinSize datasizes.Size `json:"minsize,omitempty" toml:"minsize,omitempty"`
Partitions []PartitionCustomization `json:"partitions,omitempty" toml:"partitions,omitempty"`
}
func (dc *DiskCustomization) UnmarshalJSON(data []byte) error {
var dcm diskCustomizationMarshaler
if err := json.Unmarshal(data, &dcm); err != nil {
return err
}
dc.Type = dcm.Type
dc.MinSize = dcm.MinSize.Uint64()
dc.Partitions = dcm.Partitions
return nil
}
func (dc *DiskCustomization) UnmarshalTOML(data any) error {
return unmarshalTOMLviaJSON(dc, data)
}
// PartitionCustomization defines a single partition on a disk. The Type
// defines the kind of "payload" for the partition: plain, lvm, or btrfs.
// - plain: the payload will be a filesystem on a partition (e.g. xfs, ext4).
// See [FilesystemTypedCustomization] for extra fields.
// - lvm: the payload will be an LVM volume group. See [VGCustomization] for
// extra fields
// - btrfs: the payload will be a btrfs volume. See
// [BtrfsVolumeCustomization] for extra fields.
type PartitionCustomization struct {
// The type of payload for the partition (optional, defaults to "plain").
Type string `json:"type,omitempty" toml:"type,omitempty"`
// Minimum size of the partition that contains the filesystem (for "plain"
// filesystem), volume group ("lvm"), or btrfs volume ("btrfs"). The final
// size of the partition will be larger than the minsize if the sum of the
// contained volumes (logical volumes or subvolumes) is larger. In
// addition, certain mountpoints have required minimum sizes. See
// https://osbuild.org/docs/user-guide/partitioning for more details.
// (optional, defaults depend on payload and mountpoints).
MinSize uint64 `json:"minsize" toml:"minsize"`
// The partition type GUID for GPT partitions. For DOS partitions, this
// field can be used to set the (2 hex digit) partition type.
// If not set, the type will be automatically set based on the mountpoint
// or the payload type.
PartType string `json:"part_type,omitempty" toml:"part_type,omitempty"`
BtrfsVolumeCustomization
VGCustomization
FilesystemTypedCustomization
}
// A filesystem on a plain partition or LVM logical volume.
// Note the differences from [FilesystemCustomization]:
// - Adds a label.
// - Adds a filesystem type (fs_type).
// - Does not define a size. The size is defined by its container: a
// partition ([PartitionCustomization]) or LVM logical volume
// ([LVCustomization]).
//
// Setting the FSType to "swap" creates a swap area (and the Mountpoint must be
// empty).
type FilesystemTypedCustomization struct {
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
// Filesystem label
Label string `json:"label,omitempty" toml:"label,omitempty"`
// Filesystem type (ext4, xfs, vfat)
FSType string `json:"fs_type,omitempty" toml:"fs_type,omitempty"`
}
// An LVM volume group with one or more logical volumes.
type VGCustomization struct {
// Volume group name (optional, default will be automatically generated).
Name string `json:"name,omitempty" toml:"name,omitempty"`
// One or more logical volumes for this volume group (required).
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
LogicalVolumes []LVCustomization `json:"logical_volumes,omitempty" toml:"logical_volumes,omitempty"`
}
type LVCustomization struct {
// Logical volume name
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Minimum size of the logical volume
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
// Custom JSON unmarshaller for LVCustomization for handling the conversion of
// data sizes (minsize) expressed as strings to uint64.
func (lv *LVCustomization) UnmarshalJSON(data []byte) error {
var lvAnySize struct {
Name string `json:"name,omitempty" toml:"name,omitempty"`
MinSize any `json:"minsize,omitempty" toml:"minsize,omitempty"`
FilesystemTypedCustomization
}
if err := json.Unmarshal(data, &lvAnySize); err != nil {
return err
}
lv.Name = lvAnySize.Name
lv.FilesystemTypedCustomization = lvAnySize.FilesystemTypedCustomization
if lvAnySize.MinSize == nil {
return fmt.Errorf("minsize is required")
}
size, err := decodeSize(lvAnySize.MinSize)
if err != nil {
return err
}
lv.MinSize = size
return nil
}
// A btrfs volume consisting of one or more subvolumes.
type BtrfsVolumeCustomization struct {
Subvolumes []BtrfsSubvolumeCustomization `json:"subvolumes,omitempty" toml:"subvolumes,omitempty"`
}
type BtrfsSubvolumeCustomization struct {
// The name of the subvolume, which defines the location (path) on the
// root volume (required).
// See https://btrfs.readthedocs.io/en/latest/Subvolumes.html
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
Name string `json:"name,omitempty" toml:"name,omitempty"`
// Mountpoint for the subvolume.
// Note that it is marked omitempty because the fields of the embedded
// structs are optional in the scope of the [PartitionCustomization].
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
}
// Custom JSON unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalJSON(data []byte) error {
errPrefix := "JSON unmarshal:"
var typeSniffer struct {
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
}
if err := json.Unmarshal(data, &typeSniffer); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
partType := "plain"
if typeSniffer.Type != "" {
partType = typeSniffer.Type
}
switch partType {
case "plain":
if err := decodePlain(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, data); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
v.PartType = typeSniffer.PartType
if typeSniffer.MinSize == nil {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(typeSniffer.MinSize)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// decodePlain decodes the data into a struct that only embeds the
// FilesystemCustomization with DisallowUnknownFields. This ensures that when
// the type is "plain", none of the fields for btrfs or lvm are used.
func decodePlain(v *PartitionCustomization, data []byte) error {
var plain struct {
// Type, minsize, and part_type are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
FilesystemTypedCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&plain)
if err != nil {
return fmt.Errorf("error decoding partition with type \"plain\": %w", err)
}
v.FilesystemTypedCustomization = plain.FilesystemTypedCustomization
return nil
}
// decodeBtrfs decodes the data into a struct that only embeds the
// BtrfsVolumeCustomization with DisallowUnknownFields. This ensures that when
// the type is btrfs, none of the fields for plain or lvm are used.
func decodeBtrfs(v *PartitionCustomization, data []byte) error {
var btrfs struct {
// Type, minsize, and part_type are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
BtrfsVolumeCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
err := decoder.Decode(&btrfs)
if err != nil {
return fmt.Errorf("error decoding partition with type \"btrfs\": %w", err)
}
v.BtrfsVolumeCustomization = btrfs.BtrfsVolumeCustomization
return nil
}
// decodeLVM decodes the data into a struct that only embeds the
// VGCustomization with DisallowUnknownFields. This ensures that when the type
// is lvm, none of the fields for plain or btrfs are used.
func decodeLVM(v *PartitionCustomization, data []byte) error {
var vg struct {
// Type, minsize, and part_type are handled by the caller. These are added here to
// satisfy "DisallowUnknownFields" when decoding.
Type string `json:"type"`
MinSize any `json:"minsize"`
PartType string `json:"part_type"`
VGCustomization
}
decoder := json.NewDecoder(bytes.NewReader(data))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&vg); err != nil {
return fmt.Errorf("error decoding partition with type \"lvm\": %w", err)
}
v.VGCustomization = vg.VGCustomization
return nil
}
// Custom TOML unmarshaller that first reads the value of the "type" field and
// then deserialises the whole object into a struct that only contains the
// fields valid for that partition type. This ensures that no fields are set
// for the substructure of a different type than the one defined in the "type"
// fields.
func (v *PartitionCustomization) UnmarshalTOML(data any) error {
errPrefix := "TOML unmarshal:"
d, ok := data.(map[string]any)
if !ok {
return fmt.Errorf("%s customizations.partition is not an object", errPrefix)
}
partType := "plain"
if typeField, ok := d["type"]; ok {
typeStr, ok := typeField.(string)
if !ok {
return fmt.Errorf("%s type must be a string, got \"%v\" of type %T", errPrefix, typeField, typeField)
}
partType = typeStr
}
// serialise the data to JSON and reuse the subobject decoders
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("%s error while decoding partition customization: %w", errPrefix, err)
}
switch partType {
case "plain":
if err := decodePlain(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "btrfs":
if err := decodeBtrfs(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
case "lvm":
if err := decodeLVM(v, dataJSON); err != nil {
return fmt.Errorf("%s %w", errPrefix, err)
}
default:
return fmt.Errorf("%s unknown partition type: %s", errPrefix, partType)
}
v.Type = partType
minsizeField, ok := d["minsize"]
if !ok {
return fmt.Errorf("minsize is required")
}
minsize, err := decodeSize(minsizeField)
if err != nil {
return fmt.Errorf("%s error decoding minsize for partition: %w", errPrefix, err)
}
v.MinSize = minsize
return nil
}
// Validate checks for customization combinations that are generally not
// supported or can create conflicts, regardless of specific distro or image
// type policies. The validator ensures all of the following properties:
// - All mountpoints are valid
// - All mountpoints are unique
// - All LVM volume group names are unique
// - All LVM logical volume names are unique within a given volume group
// - All btrfs subvolume names are unique within a given btrfs volume
// - All btrfs subvolume names are valid and non-empty
// - All filesystems are valid for their mountpoints (e.g. xfs or ext4 for /boot)
// - No LVM logical volume has an invalid mountpoint (/boot or /boot/efi)
// - Plain filesystem types are valid for the partition type
// - All non-empty properties are valid for the partition type (e.g.
// LogicalVolumes is empty when the type is "plain" or "btrfs")
// - Filesystems with FSType set to "swap" do not specify a mountpoint.
//
// Note that in *addition* consumers should also call
// ValidateLayoutConstraints() to validate that the policy for disk
// customizations is met.
func (p *DiskCustomization) Validate() error {
if p == nil {
return nil
}
switch p.Type {
case "gpt", "":
case "dos":
// dos/mbr only supports 4 partitions
// Unfortunately, at this stage it's unknown whether we will need extra
// partitions (bios boot, root, esp), so this check is just to catch
// obvious invalid customizations early. The final partition table is
// checked after it's created.
if len(p.Partitions) > 4 {
return fmt.Errorf("invalid partitioning customizations: \"dos\" partition table type only supports up to 4 partitions: got %d", len(p.Partitions))
}
default:
return fmt.Errorf("unknown partition table type: %s (valid: gpt, dos)", p.Type)
}
mountpoints := make(map[string]bool)
vgnames := make(map[string]bool)
var errs []error
for _, part := range p.Partitions {
if err := part.ValidatePartitionTypeID(p.Type); err != nil {
errs = append(errs, err)
}
switch part.Type {
case "plain", "":
errs = append(errs, part.validatePlain(mountpoints))
case "lvm":
errs = append(errs, part.validateLVM(mountpoints, vgnames))
case "btrfs":
errs = append(errs, part.validateBtrfs(mountpoints))
default:
errs = append(errs, fmt.Errorf("unknown partition type: %s", part.Type))
}
}
// will discard all nil errors
if err := errors.Join(errs...); err != nil {
return fmt.Errorf("invalid partitioning customizations:\n%w", err)
}
return nil
}
func validateMountpoint(path string) error {
if path == "" {
return fmt.Errorf("mountpoint is empty")
}
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("mountpoint %q is not an absolute path", path)
}
if cleanPath := filepath.Clean(path); path != cleanPath {
return fmt.Errorf("mountpoint %q is not a canonical path (did you mean %q?)", path, cleanPath)
}
return nil
}
// ValidateLayoutConstraints checks that at most one LVM Volume Group or btrfs
// volume is defined. Returns an error if both LVM and btrfs are set and if
// either has more than one element.
//
// Note that this is a *policy* validation, in theory the "disk" code
// does support the constraints but we choose not to allow them for
// now. Each consumer of "DiskCustomization" should call this
// *unless* it's very low-level and not end-user-facing.
func (p *DiskCustomization) ValidateLayoutConstraints() error {
if p == nil {
return nil
}
var btrfsVols, lvmVGs uint
for _, part := range p.Partitions {
switch part.Type {
case "lvm":
lvmVGs++
case "btrfs":
btrfsVols++
}
if lvmVGs > 0 && btrfsVols > 0 {
return fmt.Errorf("btrfs and lvm partitioning cannot be combined")
}
}
if btrfsVols > 1 {
return fmt.Errorf("multiple btrfs volumes are not yet supported")
}
if lvmVGs > 1 {
return fmt.Errorf("multiple LVM volume groups are not yet supported")
}
return nil
}
// Check that the fs type is valid for the mountpoint.
func validateFilesystemType(path, fstype string) error {
badfsMsgFmt := "unsupported filesystem type for %q: %s"
switch path {
case "/boot":
switch fstype {
case "xfs", "ext4":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
case "/boot/efi":
switch fstype {
case "vfat":
default:
return fmt.Errorf(badfsMsgFmt, path, fstype)
}
}
return nil
}
// These mountpoints must be on a plain partition (i.e. not on LVM or btrfs).
var plainOnlyMountpoints = []string{
"/boot",
"/boot/efi", // not allowed by our global policies, but that might change
}
var validPlainFSTypes = []string{
"ext4",
"vfat",
"xfs",
}
// exactly 2 hex digits
var validDosPartitionType = regexp.MustCompile(`^[0-9a-fA-F]{2}$`)
// ValidatePartitionTypeID returns an error if the partition type ID is not
// valid given the partition table type. If the partition table type is an
// empty string, the function returns an error only if the partition type ID is
// invalid for both gpt and dos partition tables.
func (p *PartitionCustomization) ValidatePartitionTypeID(ptType string) error {
// Empty PartType is fine, it will be selected automatically
if p.PartType == "" {
return nil
}
_, uuidErr := uuid.Parse(p.PartType)
validDosType := validDosPartitionType.MatchString(p.PartType)
switch ptType {
case "gpt":
if uuidErr != nil {
return fmt.Errorf("invalid partition part_type %q for partition table type %q (must be a valid UUID): %w", p.PartType, ptType, uuidErr)
}
case "dos":
if !validDosType {
return fmt.Errorf("invalid partition part_type %q for partition table type %q (must be a 2-digit hex number)", p.PartType, ptType)
}
case "":
// We don't know the partition table type yet, the fallback is controlled
// by the CustomPartitionTableOptions, so return an error if it fails both.
if uuidErr != nil && !validDosType {
return fmt.Errorf("invalid part_type %q: must be a valid UUID for GPT partition tables or a 2-digit hex number for DOS partition tables", p.PartType)
}
default:
// ignore: handled elsewhere
}
return nil
}
func (p *PartitionCustomization) validatePlain(mountpoints map[string]bool) error {
if p.FSType == "swap" {
// make sure the mountpoint is empty and return
if p.Mountpoint != "" {
return fmt.Errorf("mountpoint for swap partition must be empty (got %q)", p.Mountpoint)
}
return nil
}
if err := validateMountpoint(p.Mountpoint); err != nil {
return err
}
if mountpoints[p.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", p.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, p.FSType) {
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for mountpoint %q: %s", p.Mountpoint, p.FSType)
}
if err := validateFilesystemType(p.Mountpoint, p.FSType); err != nil {
return err
}
mountpoints[p.Mountpoint] = true
return nil
}
func (p *PartitionCustomization) validateLVM(mountpoints, vgnames map[string]bool) error {
if p.Name != "" && vgnames[p.Name] { // VGs with no name get autogenerated names
return fmt.Errorf("duplicate LVM volume group name %q in partitioning customizations", p.Name)
}
// check for invalid property usage
if len(p.Subvolumes) > 0 {
return fmt.Errorf("subvolumes defined for LVM volume group (partition type \"lvm\")")
}
if p.Label != "" {
return fmt.Errorf("label %q defined for LVM volume group (partition type \"lvm\")", p.Label)
}
vgnames[p.Name] = true
lvnames := make(map[string]bool)
for _, lv := range p.LogicalVolumes {
if lv.Name != "" && lvnames[lv.Name] { // LVs with no name get autogenerated names
return fmt.Errorf("duplicate LVM logical volume name %q in volume group %q in partitioning customizations", lv.Name, p.Name)
}
lvnames[lv.Name] = true
if lv.FSType == "swap" {
// make sure the mountpoint is empty and return
if lv.Mountpoint != "" {
return fmt.Errorf("mountpoint for swap logical volume with name %q in volume group %q must be empty", lv.Name, p.Name)
}
return nil
}
if err := validateMountpoint(lv.Mountpoint); err != nil {
return fmt.Errorf("invalid logical volume customization: %w", err)
}
if mountpoints[lv.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", lv.Mountpoint)
}
mountpoints[lv.Mountpoint] = true
if slices.Contains(plainOnlyMountpoints, lv.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for logical volume", lv.Mountpoint)
}
// TODO: allow empty fstype with default from distro
if !slices.Contains(validPlainFSTypes, lv.FSType) {
return fmt.Errorf("unknown or invalid filesystem type (fs_type) for logical volume with mountpoint %q: %s", lv.Mountpoint, lv.FSType)
}
}
return nil
}
func (p *PartitionCustomization) validateBtrfs(mountpoints map[string]bool) error {
if p.Mountpoint != "" {
return fmt.Errorf(`"mountpoint" is not supported for btrfs volumes (only subvolumes can have mountpoints)`)
}
if len(p.Subvolumes) == 0 {
return fmt.Errorf("btrfs volume requires subvolumes")
}
if len(p.LogicalVolumes) > 0 {
return fmt.Errorf("LVM logical volumes defined for btrfs volume (partition type \"btrfs\")")
}
subvolnames := make(map[string]bool)
for _, subvol := range p.Subvolumes {
if subvol.Name == "" {
return fmt.Errorf("btrfs subvolume with empty name in partitioning customizations")
}
if subvolnames[subvol.Name] {
return fmt.Errorf("duplicate btrfs subvolume name %q in partitioning customizations", subvol.Name)
}
subvolnames[subvol.Name] = true
if err := validateMountpoint(subvol.Mountpoint); err != nil {
return fmt.Errorf("invalid btrfs subvolume customization: %w", err)
}
if mountpoints[subvol.Mountpoint] {
return fmt.Errorf("duplicate mountpoint %q in partitioning customizations", subvol.Mountpoint)
}
if slices.Contains(plainOnlyMountpoints, subvol.Mountpoint) {
return fmt.Errorf("invalid mountpoint %q for btrfs subvolume", subvol.Mountpoint)
}
mountpoints[subvol.Mountpoint] = true
}
return nil
}
// CheckDiskMountpointsPolicy checks if the mountpoints under a [DiskCustomization] are allowed by the policy.
func CheckDiskMountpointsPolicy(partitioning *DiskCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
if partitioning == nil {
return nil
}
// collect all mountpoints
var mountpoints []string
for _, part := range partitioning.Partitions {
if part.Mountpoint != "" {
mountpoints = append(mountpoints, part.Mountpoint)
}
for _, lv := range part.LogicalVolumes {
if lv.Mountpoint != "" {
mountpoints = append(mountpoints, lv.Mountpoint)
}
}
for _, subvol := range part.Subvolumes {
mountpoints = append(mountpoints, subvol.Mountpoint)
}
}
var errs []error
for _, mp := range mountpoints {
if err := mountpointAllowList.Check(mp); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return fmt.Errorf("The following errors occurred while setting up custom mountpoints:\n%w", errors.Join(errs...))
}
return nil
}

View file

@ -0,0 +1,159 @@
package blueprint
import (
"encoding/json"
"fmt"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/pathpolicy"
)
type FilesystemCustomization struct {
Mountpoint string `json:"mountpoint,omitempty" toml:"mountpoint,omitempty"`
MinSize uint64 `json:"minsize,omitempty" toml:"minsize,omitempty"`
// Note: The TOML `size` tag has been deprecated in favor of `minsize`.
// we check for it in the TOML unmarshaler and use it as `minsize`.
// However due to the TOML marshaler implementation, we can omit adding
// a field for this tag and get the benifit of not having to export it.
}
func (fsc *FilesystemCustomization) UnmarshalTOML(data interface{}) error {
d, _ := data.(map[string]interface{})
switch d["mountpoint"].(type) {
case string:
fsc.Mountpoint = d["mountpoint"].(string)
default:
return fmt.Errorf("TOML unmarshal: mountpoint must be string, got %v of type %T", d["mountpoint"], d["mountpoint"])
}
var size uint64
var minsize uint64
// `size` is an alias for `minsize. We check for the `size` keyword
// for backwards compatibility. We don't export a `Size` field as
// we would like to discourage its use.
switch d["size"].(type) {
case int64:
size = uint64(d["size"].(int64))
case string:
s, err := datasizes.Parse(d["size"].(string))
if err != nil {
return fmt.Errorf("TOML unmarshal: size is not valid filesystem size (%w)", err)
}
size = s
case nil:
size = 0
default:
return fmt.Errorf("TOML unmarshal: size must be integer or string, got %v of type %T", d["size"], d["size"])
}
switch d["minsize"].(type) {
case int64:
minsize = uint64(d["minsize"].(int64))
case string:
s, err := datasizes.Parse(d["minsize"].(string))
if err != nil {
return fmt.Errorf("TOML unmarshal: minsize is not valid filesystem size (%w)", err)
}
minsize = s
case nil:
minsize = 0
default:
return fmt.Errorf("TOML unmarshal: minsize must be integer or string, got %v of type %T", d["minsize"], d["minsize"])
}
if size == 0 && minsize == 0 {
return fmt.Errorf("TOML unmarshal: minsize must be greater than 0, got %v", minsize)
}
if size > 0 && minsize == 0 {
fsc.MinSize = size
return nil
}
if size == 0 && minsize > 0 {
fsc.MinSize = minsize
return nil
}
if size > 0 && minsize > 0 {
return fmt.Errorf("TOML unmarshal: size and minsize cannot both be set (size is an alias for minsize)")
}
return nil
}
func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error {
var v interface{}
if err := json.Unmarshal(data, &v); err != nil {
return err
}
d, _ := v.(map[string]interface{})
switch d["mountpoint"].(type) {
case string:
fsc.Mountpoint = d["mountpoint"].(string)
default:
return fmt.Errorf("JSON unmarshal: mountpoint must be string, got %v of type %T", d["mountpoint"], d["mountpoint"])
}
// The JSON specification only mentions float64 and Go defaults to it: https://go.dev/blog/json
switch d["minsize"].(type) {
case float64:
// Note that it uses different key than the TOML version
fsc.MinSize = uint64(d["minsize"].(float64))
case string:
size, err := datasizes.Parse(d["minsize"].(string))
if err != nil {
return fmt.Errorf("JSON unmarshal: size is not valid filesystem size (%w)", err)
}
fsc.MinSize = size
default:
return fmt.Errorf("JSON unmarshal: minsize must be float64 number or string, got %v of type %T", d["minsize"], d["minsize"])
}
return nil
}
// decodeSize takes an integer or string representing a data size (with a data
// suffix) and returns the uint64 representation.
func decodeSize(size any) (uint64, error) {
switch s := size.(type) {
case string:
return datasizes.Parse(s)
case int64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
return uint64(s), nil
case float64:
if s < 0 {
return 0, fmt.Errorf("cannot be negative")
}
// TODO: emit warning of possible truncation?
return uint64(s), nil
case uint64:
return s, nil
default:
return 0, fmt.Errorf("failed to convert value \"%v\" to number", size)
}
}
// CheckMountpointsPolicy checks if the mountpoints are allowed by the policy
func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAllowList *pathpolicy.PathPolicies) error {
invalidMountpoints := []string{}
for _, m := range mountpoints {
err := mountpointAllowList.Check(m.Mountpoint)
if err != nil {
invalidMountpoints = append(invalidMountpoints, m.Mountpoint)
}
}
if len(invalidMountpoints) > 0 {
return fmt.Errorf("The following custom mountpoints are not supported %+q", invalidMountpoints)
}
return nil
}

View file

@ -0,0 +1,475 @@
package blueprint
import (
"encoding/json"
"fmt"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/pathpolicy"
)
// validateModeString checks that the given string is a valid mode octal number
func validateModeString(mode string) error {
// Check that the mode string matches the octal format regular expression.
// The leading is optional.
if regexp.MustCompile(`^[0]{0,1}[0-7]{3}$`).MatchString(mode) {
return nil
}
return fmt.Errorf("invalid mode %s: must be an octal number", mode)
}
// DirectoryCustomization represents a directory to be created in the image
type DirectoryCustomization struct {
// Absolute path to the directory
Path string `json:"path" toml:"path"`
// Owner of the directory specified as a string (user name), int64 (UID) or nil
User interface{} `json:"user,omitempty" toml:"user,omitempty"`
// Owner of the directory specified as a string (group name), int64 (UID) or nil
Group interface{} `json:"group,omitempty" toml:"group,omitempty"`
// Permissions of the directory specified as an octal number
Mode string `json:"mode,omitempty" toml:"mode,omitempty"`
// EnsureParents ensures that all parent directories of the directory exist
EnsureParents bool `json:"ensure_parents,omitempty" toml:"ensure_parents,omitempty"`
}
// Custom TOML unmarshalling for DirectoryCustomization with validation
func (d *DirectoryCustomization) UnmarshalTOML(data interface{}) error {
var dir DirectoryCustomization
dataMap, _ := data.(map[string]interface{})
switch path := dataMap["path"].(type) {
case string:
dir.Path = path
default:
return fmt.Errorf("UnmarshalTOML: path must be a string")
}
switch user := dataMap["user"].(type) {
case string:
dir.User = user
case int64:
dir.User = user
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: user must be a string or an integer, got %T", user)
}
switch group := dataMap["group"].(type) {
case string:
dir.Group = group
case int64:
dir.Group = group
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: group must be a string or an integer")
}
switch mode := dataMap["mode"].(type) {
case string:
dir.Mode = mode
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: mode must be a string")
}
switch ensureParents := dataMap["ensure_parents"].(type) {
case bool:
dir.EnsureParents = ensureParents
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: ensure_parents must be a bool")
}
// try converting to fsnode.Directory to validate all values
_, err := dir.ToFsNodeDirectory()
if err != nil {
return err
}
*d = dir
return nil
}
// Custom JSON unmarshalling for DirectoryCustomization with validation
func (d *DirectoryCustomization) UnmarshalJSON(data []byte) error {
type directoryCustomization DirectoryCustomization
var dirPrivate directoryCustomization
if err := json.Unmarshal(data, &dirPrivate); err != nil {
return err
}
dir := DirectoryCustomization(dirPrivate)
if uid, ok := dir.User.(float64); ok {
// check if uid can be converted to int64
if uid != float64(int64(uid)) {
return fmt.Errorf("invalid user %f: must be an integer", uid)
}
dir.User = int64(uid)
}
if gid, ok := dir.Group.(float64); ok {
// check if gid can be converted to int64
if gid != float64(int64(gid)) {
return fmt.Errorf("invalid group %f: must be an integer", gid)
}
dir.Group = int64(gid)
}
// try converting to fsnode.Directory to validate all values
_, err := dir.ToFsNodeDirectory()
if err != nil {
return err
}
*d = dir
return nil
}
// ToFsNodeDirectory converts the DirectoryCustomization to an fsnode.Directory
func (d DirectoryCustomization) ToFsNodeDirectory() (*fsnode.Directory, error) {
var mode *os.FileMode
if d.Mode != "" {
err := validateModeString(d.Mode)
if err != nil {
return nil, err
}
modeNum, err := strconv.ParseUint(d.Mode, 8, 32)
if err != nil {
return nil, fmt.Errorf("invalid mode %s: %v", d.Mode, err)
}
// modeNum is parsed as an unsigned 32 bit int
/* #nosec G115 */
mode = common.ToPtr(os.FileMode(modeNum))
}
return fsnode.NewDirectory(d.Path, mode, d.User, d.Group, d.EnsureParents)
}
// DirectoryCustomizationsToFsNodeDirectories converts a slice of DirectoryCustomizations
// to a slice of fsnode.Directories
func DirectoryCustomizationsToFsNodeDirectories(dirs []DirectoryCustomization) ([]*fsnode.Directory, error) {
if len(dirs) == 0 {
return nil, nil
}
var fsDirs []*fsnode.Directory
var errors []error
for _, dir := range dirs {
fsDir, err := dir.ToFsNodeDirectory()
if err != nil {
errors = append(errors, err)
}
fsDirs = append(fsDirs, fsDir)
}
if len(errors) > 0 {
return nil, fmt.Errorf("invalid directory customizations: %v", errors)
}
return fsDirs, nil
}
// FileCustomization represents a file to be created in the image
type FileCustomization struct {
// Absolute path to the file
Path string `json:"path" toml:"path"`
// Owner of the directory specified as a string (user name), int64 (UID) or nil
User interface{} `json:"user,omitempty" toml:"user,omitempty"`
// Owner of the directory specified as a string (group name), int64 (UID) or nil
Group interface{} `json:"group,omitempty" toml:"group,omitempty"`
// Permissions of the file specified as an octal number
Mode string `json:"mode,omitempty" toml:"mode,omitempty"`
// Data is the file content in plain text
Data string `json:"data,omitempty" toml:"data,omitempty"`
}
// Custom TOML unmarshalling for FileCustomization with validation
func (f *FileCustomization) UnmarshalTOML(data interface{}) error {
var file FileCustomization
dataMap, _ := data.(map[string]interface{})
switch path := dataMap["path"].(type) {
case string:
file.Path = path
default:
return fmt.Errorf("UnmarshalTOML: path must be a string")
}
switch user := dataMap["user"].(type) {
case string:
file.User = user
case int64:
file.User = user
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: user must be a string or an integer")
}
switch group := dataMap["group"].(type) {
case string:
file.Group = group
case int64:
file.Group = group
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: group must be a string or an integer")
}
switch mode := dataMap["mode"].(type) {
case string:
file.Mode = mode
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: mode must be a string")
}
switch data := dataMap["data"].(type) {
case string:
file.Data = data
case nil:
break
default:
return fmt.Errorf("UnmarshalTOML: data must be a string")
}
// try converting to fsnode.File to validate all values
_, err := file.ToFsNodeFile()
if err != nil {
return err
}
*f = file
return nil
}
// Custom JSON unmarshalling for FileCustomization with validation
func (f *FileCustomization) UnmarshalJSON(data []byte) error {
type fileCustomization FileCustomization
var filePrivate fileCustomization
if err := json.Unmarshal(data, &filePrivate); err != nil {
return err
}
file := FileCustomization(filePrivate)
if uid, ok := file.User.(float64); ok {
// check if uid can be converted to int64
if uid != float64(int64(uid)) {
return fmt.Errorf("invalid user %f: must be an integer", uid)
}
file.User = int64(uid)
}
if gid, ok := file.Group.(float64); ok {
// check if gid can be converted to int64
if gid != float64(int64(gid)) {
return fmt.Errorf("invalid group %f: must be an integer", gid)
}
file.Group = int64(gid)
}
// try converting to fsnode.File to validate all values
_, err := file.ToFsNodeFile()
if err != nil {
return err
}
*f = file
return nil
}
// ToFsNodeFile converts the FileCustomization to an fsnode.File
func (f FileCustomization) ToFsNodeFile() (*fsnode.File, error) {
var data []byte
if f.Data != "" {
data = []byte(f.Data)
}
var mode *os.FileMode
if f.Mode != "" {
err := validateModeString(f.Mode)
if err != nil {
return nil, err
}
modeNum, err := strconv.ParseUint(f.Mode, 8, 32)
if err != nil {
return nil, fmt.Errorf("invalid mode %s: %v", f.Mode, err)
}
// modeNum is parsed as an unsigned 32 bit int
/* #nosec G115 */
mode = common.ToPtr(os.FileMode(modeNum))
}
return fsnode.NewFile(f.Path, mode, f.User, f.Group, data)
}
// FileCustomizationsToFsNodeFiles converts a slice of FileCustomization to a slice of *fsnode.File
func FileCustomizationsToFsNodeFiles(files []FileCustomization) ([]*fsnode.File, error) {
if len(files) == 0 {
return nil, nil
}
var fsFiles []*fsnode.File
var errors []error
for _, file := range files {
fsFile, err := file.ToFsNodeFile()
if err != nil {
errors = append(errors, err)
}
fsFiles = append(fsFiles, fsFile)
}
if len(errors) > 0 {
return nil, fmt.Errorf("invalid file customizations: %v", errors)
}
return fsFiles, nil
}
// ValidateDirFileCustomizations validates the given Directory and File customizations.
// If the customizations are invalid, an error is returned. Otherwise, nil is returned.
//
// It currently ensures that:
// - No file path is a prefix of another file or directory path
// - There are no duplicate file or directory paths in the customizations
func ValidateDirFileCustomizations(dirs []DirectoryCustomization, files []FileCustomization) error {
fsNodesMap := make(map[string]interface{}, len(dirs)+len(files))
nodesPaths := make([]string, 0, len(dirs)+len(files))
// First check for duplicate paths
duplicatePaths := make([]string, 0)
for _, dir := range dirs {
if _, ok := fsNodesMap[dir.Path]; ok {
duplicatePaths = append(duplicatePaths, dir.Path)
}
fsNodesMap[dir.Path] = dir
nodesPaths = append(nodesPaths, dir.Path)
}
for _, file := range files {
if _, ok := fsNodesMap[file.Path]; ok {
duplicatePaths = append(duplicatePaths, file.Path)
}
fsNodesMap[file.Path] = file
nodesPaths = append(nodesPaths, file.Path)
}
// There is no point in continuing if there are duplicate paths,
// since the fsNodesMap will not be valid.
if len(duplicatePaths) > 0 {
return fmt.Errorf("duplicate files / directory customization paths: %v", duplicatePaths)
}
invalidFSNodes := make([]string, 0)
checkedPaths := make(map[string]bool)
// Sort the paths so that we always check the longest paths first. This
// ensures that we don't check a parent path before we check the child
// path. Reverse sort the slice based on directory depth.
sort.Slice(nodesPaths, func(i, j int) bool {
return strings.Count(nodesPaths[i], "/") > strings.Count(nodesPaths[j], "/")
})
for _, nodePath := range nodesPaths {
// Skip paths that we have already checked
if checkedPaths[nodePath] {
continue
}
// Check all parent paths of the current path. If any of them have
// already been checked, then we do not need to check them again.
// This is because we always check the longest paths first. If a parent
// path exists in the filesystem nodes map and it is a File,
// then it is an error because it is a parent of a Directory or File.
// Parent paths can be only Directories.
parentPath := nodePath
for {
parentPath = path.Dir(parentPath)
// "." is returned only when the path is relative and we reached
// the root directory. This should never happen because File
// and Directory customization paths are validated as part of
// the unmarshalling process from JSON and TOML.
if parentPath == "." {
panic("filesystem node has relative path set.")
}
if parentPath == "/" {
break
}
if checkedPaths[parentPath] {
break
}
// If the node is not a Directory, then it is an error because
// it is a parent of a Directory or File.
if node, ok := fsNodesMap[parentPath]; ok {
switch node.(type) {
case DirectoryCustomization:
break
case FileCustomization:
invalidFSNodes = append(invalidFSNodes, nodePath)
default:
panic(fmt.Sprintf("unexpected filesystem node customization type: %T", node))
}
}
checkedPaths[parentPath] = true
}
checkedPaths[nodePath] = true
}
if len(invalidFSNodes) > 0 {
return fmt.Errorf("the following filesystem nodes are parents of another node and are not directories: %s", invalidFSNodes)
}
return nil
}
// CheckFileCustomizationsPolicy checks if the given File customizations are allowed by the path policy.
// If any of the customizations are not allowed by the path policy, an error is returned. Otherwise, nil is returned.
func CheckFileCustomizationsPolicy(files []FileCustomization, pathPolicy *pathpolicy.PathPolicies) error {
var invalidPaths []string
for _, file := range files {
if err := pathPolicy.Check(file.Path); err != nil {
invalidPaths = append(invalidPaths, file.Path)
}
}
if len(invalidPaths) > 0 {
return fmt.Errorf("the following custom files are not allowed: %+q", invalidPaths)
}
return nil
}
// CheckDirectoryCustomizationsPolicy checks if the given Directory customizations are allowed by the path policy.
// If any of the customizations are not allowed by the path policy, an error is returned. Otherwise, nil is returned.
func CheckDirectoryCustomizationsPolicy(dirs []DirectoryCustomization, pathPolicy *pathpolicy.PathPolicies) error {
var invalidPaths []string
for _, dir := range dirs {
if err := pathPolicy.Check(dir.Path); err != nil {
invalidPaths = append(invalidPaths, dir.Path)
}
}
if len(invalidPaths) > 0 {
return fmt.Errorf("the following custom directories are not allowed: %+q", invalidPaths)
}
return nil
}

View file

@ -0,0 +1,17 @@
package blueprint
type InstallerCustomization struct {
Unattended bool `json:"unattended,omitempty" toml:"unattended,omitempty"`
SudoNopasswd []string `json:"sudo-nopasswd,omitempty" toml:"sudo-nopasswd,omitempty"`
Kickstart *Kickstart `json:"kickstart,omitempty" toml:"kickstart,omitempty"`
Modules *AnacondaModules `json:"modules,omitempty" toml:"modules,omitempty"`
}
type Kickstart struct {
Contents string `json:"contents" toml:"contents"`
}
type AnacondaModules struct {
Enable []string `json:"enable,omitempty" toml:"enable,omitempty"`
Disable []string `json:"disable,omitempty" toml:"disable,omitempty"`
}

View file

@ -0,0 +1,154 @@
package blueprint
import (
"fmt"
"net/url"
"regexp"
"strings"
"github.com/osbuild/blueprint/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/rpmmd"
)
type RepositoryCustomization struct {
Id string `json:"id" toml:"id"`
BaseURLs []string `json:"baseurls,omitempty" toml:"baseurls,omitempty"`
GPGKeys []string `json:"gpgkeys,omitempty" toml:"gpgkeys,omitempty"`
Metalink string `json:"metalink,omitempty" toml:"metalink,omitempty"`
Mirrorlist string `json:"mirrorlist,omitempty" toml:"mirrorlist,omitempty"`
Name string `json:"name,omitempty" toml:"name,omitempty"`
Priority *int `json:"priority,omitempty" toml:"priority,omitempty"`
Enabled *bool `json:"enabled,omitempty" toml:"enabled,omitempty"`
GPGCheck *bool `json:"gpgcheck,omitempty" toml:"gpgcheck,omitempty"`
RepoGPGCheck *bool `json:"repo_gpgcheck,omitempty" toml:"repo_gpgcheck,omitempty"`
SSLVerify *bool `json:"sslverify,omitempty" toml:"sslverify,omitempty"`
ModuleHotfixes *bool `json:"module_hotfixes,omitempty" toml:"module_hotfixes,omitempty"`
Filename string `json:"filename,omitempty" toml:"filename,omitempty"`
// When set the repository will be used during the depsolve of
// payload repositories to install packages from it.
InstallFrom bool `json:"install_from" toml:"install_from"`
}
const repoFilenameRegex = "^[\\w.-]{1,250}\\.repo$"
func validateCustomRepository(repo *RepositoryCustomization) error {
if repo.Id == "" {
return fmt.Errorf("Repository ID is required")
}
filenameRegex := regexp.MustCompile(repoFilenameRegex)
if !filenameRegex.MatchString(repo.getFilename()) {
return fmt.Errorf("Repository filename %q is invalid", repo.getFilename())
}
if len(repo.BaseURLs) == 0 && repo.Mirrorlist == "" && repo.Metalink == "" {
return fmt.Errorf("Repository base URL, mirrorlist or metalink is required")
}
if repo.GPGCheck != nil && *repo.GPGCheck && len(repo.GPGKeys) == 0 {
return fmt.Errorf("Repository gpg check is set to true but no gpg keys are provided")
}
for _, key := range repo.GPGKeys {
// check for a valid GPG key prefix & contains GPG suffix
keyIsGPGKey := strings.HasPrefix(key, "-----BEGIN PGP PUBLIC KEY BLOCK-----") && strings.Contains(key, "-----END PGP PUBLIC KEY BLOCK-----")
// check for a valid URL
keyIsURL := false
_, err := url.ParseRequestURI(key)
if err == nil {
keyIsURL = true
}
if !keyIsGPGKey && !keyIsURL {
return fmt.Errorf("Repository gpg key is not a valid URL or a valid gpg key")
}
}
return nil
}
func (rc *RepositoryCustomization) getFilename() string {
if rc.Filename == "" {
return fmt.Sprintf("%s.repo", rc.Id)
}
if !strings.HasSuffix(rc.Filename, ".repo") {
return fmt.Sprintf("%s.repo", rc.Filename)
}
return rc.Filename
}
func RepoCustomizationsInstallFromOnly(repos []RepositoryCustomization) []rpmmd.RepoConfig {
var res []rpmmd.RepoConfig
for _, repo := range repos {
if !repo.InstallFrom {
continue
}
res = append(res, repo.customRepoToRepoConfig())
}
return res
}
func RepoCustomizationsToRepoConfigAndGPGKeyFiles(repos []RepositoryCustomization) (map[string][]rpmmd.RepoConfig, []*fsnode.File, error) {
if len(repos) == 0 {
return nil, nil, nil
}
repoMap := make(map[string][]rpmmd.RepoConfig, len(repos))
var gpgKeyFiles []*fsnode.File
for _, repo := range repos {
filename := repo.getFilename()
convertedRepo := repo.customRepoToRepoConfig()
// convert any inline gpgkeys to fsnode.File and
// replace the gpgkey with the file path
for idx, gpgkey := range repo.GPGKeys {
if _, ok := url.ParseRequestURI(gpgkey); ok != nil {
// create the file path
path := fmt.Sprintf("/etc/pki/rpm-gpg/RPM-GPG-KEY-%s-%d", repo.Id, idx)
// replace the gpgkey with the file path
convertedRepo.GPGKeys[idx] = fmt.Sprintf("file://%s", path)
// create the fsnode for the gpgkey keyFile
keyFile, err := fsnode.NewFile(path, nil, nil, nil, []byte(gpgkey))
if err != nil {
return nil, nil, err
}
gpgKeyFiles = append(gpgKeyFiles, keyFile)
}
}
repoMap[filename] = append(repoMap[filename], convertedRepo)
}
return repoMap, gpgKeyFiles, nil
}
func (repo RepositoryCustomization) customRepoToRepoConfig() rpmmd.RepoConfig {
urls := make([]string, len(repo.BaseURLs))
copy(urls, repo.BaseURLs)
keys := make([]string, len(repo.GPGKeys))
copy(keys, repo.GPGKeys)
repoConfig := rpmmd.RepoConfig{
Id: repo.Id,
BaseURLs: urls,
GPGKeys: keys,
Name: repo.Name,
Metalink: repo.Metalink,
MirrorList: repo.Mirrorlist,
CheckGPG: repo.GPGCheck,
CheckRepoGPG: repo.RepoGPGCheck,
Priority: repo.Priority,
ModuleHotfixes: repo.ModuleHotfixes,
Enabled: repo.Enabled,
}
if repo.SSLVerify != nil {
repoConfig.IgnoreSSL = common.ToPtr(!*repo.SSLVerify)
}
return repoConfig
}

View file

@ -0,0 +1,36 @@
package blueprint
// Subscription Manager [rhsm] configuration
type SubManRHSMConfig struct {
ManageRepos *bool `json:"manage_repos,omitempty" toml:"manage_repos,omitempty"`
AutoEnableYumPlugins *bool `json:"auto_enable_yum_plugins,omitempty" toml:"auto_enable_yum_plugins,omitempty"`
}
// Subscription Manager [rhsmcertd] configuration
type SubManRHSMCertdConfig struct {
AutoRegistration *bool `json:"auto_registration,omitempty" toml:"auto_registration,omitempty"`
}
// Subscription Manager 'rhsm.conf' configuration
type SubManConfig struct {
RHSMConfig *SubManRHSMConfig `json:"rhsm,omitempty" toml:"rhsm,omitempty"`
RHSMCertdConfig *SubManRHSMCertdConfig `json:"rhsmcertd,omitempty" toml:"rhsmcertd,omitempty"`
}
type DNFPluginConfig struct {
Enabled *bool `json:"enabled,omitempty" toml:"enabled,omitempty"`
}
type SubManDNFPluginsConfig struct {
ProductID *DNFPluginConfig `json:"product_id,omitempty" toml:"product_id,omitempty"`
SubscriptionManager *DNFPluginConfig `json:"subscription_manager,omitempty" toml:"subscription_manager,omitempty"`
}
type RHSMConfig struct {
DNFPlugins *SubManDNFPluginsConfig `json:"dnf_plugins,omitempty" toml:"dnf_plugins,omitempty"`
SubscriptionManager *SubManConfig `json:"subscription_manager,omitempty" toml:"subscription_manager,omitempty"`
}
type RHSMCustomization struct {
Config *RHSMConfig `json:"config,omitempty" toml:"config,omitempty"`
}

View file

@ -0,0 +1,10 @@
package blueprint
type RPMImportKeys struct {
// File paths in the image to import keys from
Files []string `json:"files,omitempty" toml:"files,omitempty"`
}
type RPMCustomization struct {
ImportKeys *RPMImportKeys `json:"import_keys,omitempty" toml:"import_keys,omitempty"`
}

View file

@ -0,0 +1,24 @@
package blueprint
import (
"encoding/json"
"fmt"
)
// XXX: move to interal/common ?
func unmarshalTOMLviaJSON(u json.Unmarshaler, data any) error {
// This is the most efficient way to reuse code when unmarshaling
// structs in toml, it leaks json errors which is a bit sad but
// because the toml unmarshaler gives us not "[]byte" but an
// already pre-processed "any" we cannot just unmarshal into our
// "fooMarshaling" struct and reuse the result so we resort to
// this workaround (but toml will go away long term anyway).
dataJSON, err := json.Marshal(data)
if err != nil {
return fmt.Errorf("error unmarshaling TOML data %v: %w", data, err)
}
if err := u.UnmarshalJSON(dataJSON); err != nil {
return fmt.Errorf("error decoding TOML %v: %w", data, err)
}
return nil
}

View file

@ -15,6 +15,7 @@ type ImageOptions struct {
BaseUrl string `json:"base_url"`
Insights bool `json:"insights"`
Rhc bool `json:"rhc"`
Proxy string `json:"proxy"`
}
type RHSMStatus string

View file

@ -11,10 +11,10 @@ import (
const DefaultBtrfsCompression = "zstd:1"
type Btrfs struct {
UUID string
Label string
Mountpoint string
Subvolumes []BtrfsSubvolume
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Label string `json:"label,omitempty" yaml:"label,omitempty"`
Mountpoint string `json:"mountpoint,omitempty" yaml:"mountpoint,omitempty"`
Subvolumes []BtrfsSubvolume `json:"subvolumes,omitempty" yaml:"subvolumes,omitempty"`
}
func init() {
@ -107,15 +107,15 @@ func (b *Btrfs) minSize(size uint64) uint64 {
}
type BtrfsSubvolume struct {
Name string
Size uint64
Mountpoint string
GroupID uint64
Compress string
ReadOnly bool
Name string `json:"name" yaml:"name"`
Size uint64 `json:"size" yaml:"size"`
Mountpoint string `json:"mountpoint,omitempty" yaml:"mountpoint,omitempty"`
GroupID uint64 `json:"group_id,omitempty" yaml:"group_id,omitempty"`
Compress string `json:"compress,omitempty" yaml:"compress,omitempty"`
ReadOnly bool `json:"read_only,omitempty" yaml:"read_only,omitempty"`
// UUID of the parent volume
UUID string
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
}
func (bs *BtrfsSubvolume) Clone() Entity {

View file

@ -24,11 +24,11 @@ import (
"io"
"math/rand"
"reflect"
"slices"
"strings"
"slices"
"github.com/google/uuid"
"github.com/osbuild/images/pkg/arch"
)
@ -248,6 +248,10 @@ func (t PartitionTableType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
func (t PartitionTableType) MarshalYAML() (interface{}, error) {
return t.String(), nil
}
func (t *PartitionTableType) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {

View file

@ -9,19 +9,19 @@ import (
// Filesystem related functions
type Filesystem struct {
Type string `json:"type"`
Type string `json:"type" yaml:"type"`
// ID of the filesystem, vfat doesn't use traditional UUIDs, therefore this
// is just a string.
UUID string `json:"uuid,omitempty"`
Label string `json:"label,omitempty"`
Mountpoint string `json:"mountpoint,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Label string `json:"label,omitempty" yaml:"label,omitempty"`
Mountpoint string `json:"mountpoint,omitempty" yaml:"mountpoint,omitempty"`
// The fourth field of fstab(5); fs_mntops
FSTabOptions string `json:"fstab_options,omitempty"`
FSTabOptions string `json:"fstab_options,omitempty" yaml:"fstab_options,omitempty"`
// The fifth field of fstab(5); fs_freq
FSTabFreq uint64 `json:"fstab_freq,omitempty"`
FSTabFreq uint64 `json:"fstab_freq,omitempty" yaml:"fstab_freq,omitempty"`
// The sixth field of fstab(5); fs_passno
FSTabPassNo uint64 `json:"fstab_passno,omitempty"`
FSTabPassNo uint64 `json:"fstab_passno,omitempty" yaml:"fstab_passno,omitempty"`
}
func init() {

View file

@ -1,6 +1,7 @@
package disk
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
@ -13,41 +14,41 @@ import (
// Argon2id defines parameters for the key derivation function for LUKS.
type Argon2id struct {
// Number of iterations to perform.
Iterations uint
Iterations uint `json:"iterations,omitempty" yaml:"iterations,omitempty"`
// Amount of memory to use (in KiB).
Memory uint
Memory uint `json:"memory,omitempty" yaml:"memory,omitempty"`
// Degree of parallelism (i.e. number of threads).
Parallelism uint
Parallelism uint `json:"parallelism,omitempty" yaml:"parallelism,omitempty"`
}
// ClevisBind defines parameters for binding a LUKS device with a given policy.
type ClevisBind struct {
Pin string
Policy string
Pin string `json:"pin,omitempty" yaml:"pin,omitempty"`
Policy string `json:"policy,omitempty" yaml:"policy,omitempty"`
// If enabled, the passphrase will be removed from the LUKS device at the
// end of the build (using the org.osbuild.luks2.remove-key stage).
RemovePassphrase bool
RemovePassphrase bool `json:"remove_passphrase,omitempty" yaml:"remove_passphrase,omitempty"`
}
// LUKSContainer represents a LUKS encrypted volume.
type LUKSContainer struct {
Passphrase string
UUID string
Cipher string
Label string
Subsystem string
SectorSize uint64
Passphrase string `json:"passphrase,omitempty" yaml:"passphrase,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
Cipher string `json:"cipher,omitempty" yaml:"cipher,omitempty"`
Label string `json:"label,omitempty" yaml:"label,omitempty"`
Subsystem string `json:"subsystem,omitempty" yaml:"subsystem,omitempty"`
SectorSize uint64 `json:"sector_size,omitempty" yaml:"sector_size,omitempty"`
// The password-based key derivation function's parameters.
PBKDF Argon2id
PBKDF Argon2id `json:"pbkdf,omitempty" yaml:"pbkdf,omitempty"`
// Parameters for binding the LUKS device.
Clevis *ClevisBind
Clevis *ClevisBind `json:"clevis,omitempty" yaml:"clevis,omitempty"`
Payload Entity
Payload Entity `json:"payload,omitempty" yaml:"payload,omitempty"`
}
func init() {
@ -131,3 +132,24 @@ func (lc *LUKSContainer) minSize(size uint64) uint64 {
}
return minSize
}
func (lc *LUKSContainer) UnmarshalJSON(data []byte) (err error) {
// keep in sync with lvm.go,partition.go,luks.go
type alias LUKSContainer
var withoutPayload struct {
alias
Payload json.RawMessage `json:"payload" yaml:"payload"`
PayloadType string `json:"payload_type" yaml:"payload_type"`
}
if err := jsonUnmarshalStrict(data, &withoutPayload); err != nil {
return fmt.Errorf("cannot unmarshal %q: %w", data, err)
}
*lc = LUKSContainer(withoutPayload.alias)
lc.Payload, err = unmarshalJSONPayload(data)
return err
}
func (lc *LUKSContainer) UnmarshalYAML(unmarshal func(any) error) error {
return unmarshalYAMLviaJSON(lc, unmarshal)
}

View file

@ -1,6 +1,7 @@
package disk
import (
"encoding/json"
"fmt"
"reflect"
"strings"
@ -13,10 +14,10 @@ import (
const LVMDefaultExtentSize = 4 * datasizes.MebiByte
type LVMVolumeGroup struct {
Name string
Description string
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Description string `json:"description,omitempty" yaml:"description,omitempty"`
LogicalVolumes []LVMLogicalVolume
LogicalVolumes []LVMLogicalVolume `json:"logical_volumes,omitempty" yaml:"logical_volumes,omitempty"`
}
func init() {
@ -174,10 +175,20 @@ func (vg *LVMVolumeGroup) minSize(size uint64) uint64 {
return vg.AlignUp(size)
}
func (vg *LVMVolumeGroup) UnmarshalJSON(data []byte) error {
type alias LVMVolumeGroup
var tmp alias
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*vg = LVMVolumeGroup(tmp)
return nil
}
type LVMLogicalVolume struct {
Name string
Size uint64
Payload Entity
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Size uint64 `json:"size,omitempty" yaml:"size,omitempty"`
Payload Entity `json:"payload,omitempty" yaml:"payload,omitempty"`
}
func (lv *LVMLogicalVolume) Clone() Entity {
@ -232,3 +243,24 @@ func lvname(path string) string {
path = strings.TrimLeft(path, "/")
return strings.ReplaceAll(path, "/", "_") + "lv"
}
func (lv *LVMLogicalVolume) UnmarshalJSON(data []byte) (err error) {
// keep in sync with lvm.go,partition.go,luks.go
type alias LVMLogicalVolume
var withoutPayload struct {
alias
Payload json.RawMessage `json:"payload" yaml:"payload"`
PayloadType string `json:"payload_type" yaml:"payload_type"`
}
if err := jsonUnmarshalStrict(data, &withoutPayload); err != nil {
return fmt.Errorf("cannot unmarshal %q: %w", data, err)
}
*lv = LVMLogicalVolume(withoutPayload.alias)
lv.Payload, err = unmarshalJSONPayload(data)
return err
}
func (lv *LVMLogicalVolume) UnmarshalYAML(unmarshal func(any) error) error {
return unmarshalYAMLviaJSON(lv, unmarshal)
}

View file

@ -1,28 +1,26 @@
package disk
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
type Partition struct {
// Start of the partition in bytes
Start uint64 `json:"start"`
Start uint64 `json:"start,omitempty" yaml:"start,omitempty"`
// Size of the partition in bytes
Size uint64 `json:"size"`
Size uint64 `json:"size" yaml:"size"`
// Partition type, e.g. 0x83 for MBR or a UUID for gpt
Type string `json:"type,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
// `Legacy BIOS bootable` (GPT) or `active` (DOS) flag
Bootable bool `json:"bootable,omitempty"`
Bootable bool `json:"bootable,omitempty" yaml:"bootable,omitempty"`
// ID of the partition, dos doesn't use traditional UUIDs, therefore this
// is just a string.
UUID string `json:"uuid,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
// If nil, the partition is raw; It doesn't contain a payload.
Payload PayloadEntity `json:"payload,omitempty"`
Payload PayloadEntity `json:"payload,omitempty" yaml:"payload,omitempty"`
}
func (p *Partition) Clone() Entity {
@ -105,14 +103,14 @@ func (p *Partition) IsPReP() bool {
func (p *Partition) MarshalJSON() ([]byte, error) {
type partAlias Partition
entityName := "no-payload"
var entityName string
if p.Payload != nil {
entityName = p.Payload.EntityName()
}
partWithPayloadType := struct {
partAlias
PayloadType string `json:"payload_type,omitempty"`
PayloadType string `json:"payload_type,omitempty" yaml:"payload_type,omitempty"`
}{
partAlias(*p),
entityName,
@ -121,36 +119,23 @@ func (p *Partition) MarshalJSON() ([]byte, error) {
return json.Marshal(partWithPayloadType)
}
func (p *Partition) UnmarshalJSON(data []byte) error {
type partAlias Partition
var partWithoutPayload struct {
partAlias
Payload json.RawMessage `json:"payload"`
PayloadType string `json:"payload_type,omitempty"`
func (p *Partition) UnmarshalJSON(data []byte) (err error) {
// keep in sync with lvm.go,partition.go,luks.go
type alias Partition
var withoutPayload struct {
alias
Payload json.RawMessage `json:"payload" yaml:"payload"`
PayloadType string `json:"payload_type" yaml:"payload_type"`
}
if err := jsonUnmarshalStrict(data, &withoutPayload); err != nil {
return fmt.Errorf("cannot unmarshal %q: %w", data, err)
}
*p = Partition(withoutPayload.alias)
dec := json.NewDecoder(bytes.NewBuffer(data))
if err := dec.Decode(&partWithoutPayload); err != nil {
return fmt.Errorf("cannot build partition from %q: %w", data, err)
}
*p = Partition(partWithoutPayload.partAlias)
// no payload, e.g. bios partiton
if partWithoutPayload.PayloadType == "no-payload" {
return nil
}
entType := payloadEntityMap[partWithoutPayload.PayloadType]
if entType == nil {
return fmt.Errorf("cannot build partition from %q: unknown payload %q", data, partWithoutPayload.PayloadType)
}
entValP := reflect.New(entType).Elem().Addr()
ent := entValP.Interface()
if err := json.Unmarshal(partWithoutPayload.Payload, &ent); err != nil {
return err
}
p.Payload = ent.(PayloadEntity)
return nil
p.Payload, err = unmarshalJSONPayload(data)
return err
}
func (t *Partition) UnmarshalYAML(unmarshal func(any) error) error {
return unmarshalYAMLviaJSON(t, unmarshal)
}

View file

@ -15,19 +15,19 @@ import (
type PartitionTable struct {
// Size of the disk (in bytes).
Size uint64 `json:"size"`
Size uint64 `json:"size,omitempty" yaml:"size,omitempty"`
// Unique identifier of the partition table (GPT only).
UUID string `json:"uuid,omitempty"`
UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
// Partition table type, e.g. dos, gpt.
Type PartitionTableType `json:"type"`
Partitions []Partition `json:"partitions"`
Type PartitionTableType `json:"type" yaml:"type"`
Partitions []Partition `json:"partitions" yaml:"partitions"`
// Sector size in bytes
SectorSize uint64 `json:"sector_size,omitempty"`
SectorSize uint64 `json:"sector_size,omitempty" yaml:"sector_size,omitempty"`
// Extra space at the end of the partition table (sectors)
ExtraPadding uint64 `json:"extra_padding,omitempty"`
ExtraPadding uint64 `json:"extra_padding,omitempty" yaml:"extra_padding,omitempty"`
// Starting offset of the first partition in the table (Mb)
StartOffset uint64 `json:"start_offset,omitempty"`
StartOffset uint64 `json:"start_offset,omitempty" yaml:"start_offset,omitempty"`
}
type PartitioningMode string

View file

@ -1,8 +1,10 @@
package disk
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
// unmarshalYAMLviaJSON unmarshals via the JSON interface, this avoids code
@ -22,3 +24,35 @@ func unmarshalYAMLviaJSON(u json.Unmarshaler, unmarshal func(any) error) error {
}
return nil
}
func unmarshalJSONPayload(data []byte) (PayloadEntity, error) {
var payload struct {
Payload json.RawMessage `json:"payload"`
PayloadType string `json:"payload_type,omitempty"`
}
if err := json.Unmarshal(data, &payload); err != nil {
return nil, fmt.Errorf("cannot peek payload: %w", err)
}
if payload.PayloadType == "" {
if len(payload.Payload) > 0 {
return nil, fmt.Errorf("cannot build payload: empty payload type but payload is: %q", payload.Payload)
}
return nil, nil
}
entType := payloadEntityMap[payload.PayloadType]
if entType == nil {
return nil, fmt.Errorf("cannot build payload from %q: unknown payload type %q", data, payload.PayloadType)
}
entValP := reflect.New(entType).Elem().Addr()
ent := entValP.Interface()
if err := jsonUnmarshalStrict(payload.Payload, &ent); err != nil {
return nil, fmt.Errorf("cannot decode payload for %q: %w", data, err)
}
return ent.(PayloadEntity), nil
}
func jsonUnmarshalStrict(data []byte, v any) error {
dec := json.NewDecoder(bytes.NewBuffer(data))
dec.DisallowUnknownFields()
return dec.Decode(&v)
}

View file

@ -12,23 +12,288 @@
- "geolite2-country"
- "plymouth"
partitioning:
ids:
- &prep_partition_dosid "41"
- &filesystem_linux_dosid "83"
- &fat16_bdosid "06"
guids:
- &bios_boot_partition_guid "21686148-6449-6E6F-744E-656564454649"
- &efi_system_partition_guid "C12A7328-F81F-11D2-BA4B-00A0C93EC93B"
- &filesystem_data_guid "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
- &xboot_ldr_partition_guid "BC13C2FF-59E6-4262-A352-B275FD6F7172"
# static UUIDs for partitions and filesystems
# NOTE(akoutsou): These are unnecessary and have stuck around since the
# beginning where (I believe) the goal was to have predictable,
# reproducible partition tables. They might be removed soon in favour of
# proper, random UUIDs, with reproducibility being controlled by fixing
# rng seeds.
uuids:
- &bios_boot_partition_uuid "FAC7F1FB-3E8D-4137-A512-961DE09A5549"
- &root_partition_uuid "6264D520-3FB9-423F-8AB8-7A0A8E3D3562"
- &data_partition_uuid "CB07C243-BC44-4717-853E-28852021225B"
- &efi_system_partition_uuid "68B2905B-DF3E-4FB3-80FA-49D1E773AA33"
- &efi_filesystem_uuid "7B77-95E7"
default_partition_tables: &default_partition_tables
x86_64:
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
partitions:
- size: 1_048_576 # 1 MiB
bootable: true
type: *bios_boot_partition_guid
uuid: *bios_boot_partition_uuid
- &default_partition_table_part_efi
size: 209_715_200 # 200 MiB
type: *efi_system_partition_guid
uuid: *efi_system_partition_uuid
payload_type: "filesystem"
payload:
type: vfat
uuid: *efi_filesystem_uuid
mountpoint: "/boot/efi"
label: "EFI-SYSTEM"
fstab_options: "defaults,uid=0,gid=0,umask=077,shortname=winnt"
fstab_freq: 0
fstab_passno: 2
- &default_partition_table_part_boot
size: 524_288_000 # 500 * MiB
type: *filesystem_data_guid
uuid: *data_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
mountpoint: "/boot"
label: "boot"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
- &default_partition_table_part_root
size: 2_147_483_648 # 2 * datasizes.GibiByte,
type: *filesystem_data_guid
uuid: *root_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "root"
mountpoint: "/"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
aarch64: &default_partition_table_aarch64
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
partitions:
- *default_partition_table_part_efi
- *default_partition_table_part_boot
- *default_partition_table_part_root
ppc64le:
uuid: "0x14fc63d2"
type: "dos"
partitions:
- size: 4_194_304 # 4 MiB
bootable: true
type: *prep_partition_dosid
- &default_partition_table_part_boot_ppc64le
size: 524_288_000 # 500 * MiB
payload_type: "filesystem"
payload:
type: "ext4"
mountpoint: "/boot"
label: "boot"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
- &default_partition_table_part_root_ppc64le
size: 2_147_483_648 # 2 * datasizes.GibiByte,
payload_type: "filesystem"
payload:
type: "ext4"
mountpoint: "/"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
s390x:
uuid: "0x14fc63d2"
type: "dos"
partitions:
- *default_partition_table_part_boot_ppc64le
- <<: *default_partition_table_part_root_ppc64le
bootable: true
riscv64: *default_partition_table_aarch64
minimal_raw_partition_tables: &minimal_raw_partition_tables
x86_64:
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- *default_partition_table_part_efi
- &minimal_raw_partition_table_part_boot
<<: *default_partition_table_part_boot
size: 1_073_741_824 # 1 * datasizes.GibiByte,
type: *xboot_ldr_partition_guid
- &minimal_raw_partition_table_part_root
<<: *default_partition_table_part_root
aarch64: &minimal_raw_partition_table_aarch64
uuid: "0xc1748067"
type: "dos"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- <<: *default_partition_table_part_efi
bootable: true
type: *fat16_bdosid
uuid: ""
- <<: *minimal_raw_partition_table_part_boot
type: *filesystem_linux_dosid
uuid: ""
- <<: *default_partition_table_part_root
type: *filesystem_linux_dosid
uuid: ""
riscv64: *minimal_raw_partition_table_aarch64
iot_base_partition_tables: &iot_base_partition_tables
x86_64:
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- &iot_base_partition_table_part_efi
size: 525_336_576 # 501 * datasizes.MebiByte
type: *efi_system_partition_guid
uuid: *efi_system_partition_uuid
payload_type: "filesystem"
payload:
type: vfat
uuid: *efi_filesystem_uuid
mountpoint: "/boot/efi"
label: "EFI-SYSTEM"
fstab_options: "umask=0077,shortname=winnt"
fstab_freq: 0
fstab_passno: 2
- &iot_base_partition_table_part_boot
size: 1_073_741_824 # 1 * datasizes.GibiByte,
type: *filesystem_data_guid
uuid: *data_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "boot"
mountpoint: "/boot"
fstab_options: "defaults"
fstab_freq: 1
fstab_passno: 2
- &iot_base_partition_table_part_root
size: 2_693_791_744 # 2569 * datasizes.MebiByte,
type: *filesystem_data_guid
uuid: *root_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "root"
mountpoint: "/"
fstab_options: "defaults,ro"
fstab_freq: 1
fstab_passno: 1
aarch64: &iot_base_partition_table_aarch64
uuid: "0xc1748067"
type: "dos"
start_offset: 8_388_608 # 8 * datasizes.MebiByte
partitions:
- <<: *iot_base_partition_table_part_efi
bootable: true
type: *fat16_bdosid
uuid: ""
- <<: *iot_base_partition_table_part_boot
type: *filesystem_linux_dosid
uuid: ""
- <<: *iot_base_partition_table_part_root
type: *filesystem_linux_dosid
uuid: ""
iot_simplified_installer_partition_tables: &iot_simplified_installer_partition_tables
x86_64: &iot_simplified_installer_partition_tables_x86
uuid: "D209C89E-EA5E-4FBD-B161-B461CCE297E0"
type: "gpt"
partitions:
- *iot_base_partition_table_part_efi
- size: 1_073_741_824 # 1 * datasizes.GibiByte,
type: *xboot_ldr_partition_guid
uuid: *data_partition_uuid
payload_type: "filesystem"
payload:
type: "ext4"
label: "boot"
mountpoint: "/boot"
fstab_options: "defaults"
fstab_freq: 1
fstab_passno: 1
- type: *filesystem_data_guid
uuid: *root_partition_uuid
payload_type: "luks"
payload:
label: "crypt_root"
cipher: "cipher_null"
passphrase: "osbuild"
pbkdf:
memory: 32
iterations: 4
parallelism: 1
clevis:
pin: "null"
policy: "{}"
remove_passphrase: true
payload_type: "lvm"
payload:
name: "rootvg"
description: "built with lvm2 and osbuild"
logical_volumes:
- size: 8_589_934_592 # 8 * datasizes.GibiByte,
name: "rootlv"
payload_type: "filesystem"
payload:
type: "ext4"
label: "root"
mountpoint: "/"
fstab_options: "defaults"
fstab_freq: 0
fstab_passno: 0
aarch64:
<<: *iot_simplified_installer_partition_tables_x86
image_config:
default:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-fedora-ds.xml"
hostname: "localhost.localdomain"
install_weak_deps: true
locale: "C.UTF-8"
machine_id_uninitialized: true
timezone: "UTC"
image_types:
qcow2: &qcow2
partition_table:
<<: *default_partition_tables
package_sets:
- *cloud_base_pkgset
- include:
- "qemu-guest-agent"
- *cloud_base_pkgset
- include:
- "qemu-guest-agent"
ami: *qcow2
oci: *qcow2
openstack: *qcow2
vhd:
partition_table:
<<: *default_partition_tables
package_sets:
- *cloud_base_pkgset
- include:
- "WALinuxAgent"
vmdk: &vmdk
partition_table:
<<: *default_partition_tables
package_sets:
- include:
- "@Fedora Cloud Server"
@ -172,6 +437,20 @@ image_types:
iot_container: *iot_commit
iot_raw_image:
partition_table:
<<: *iot_base_partition_tables
partition_table_override:
condition:
version_greater_or_equal:
"42":
- partition_index: 2
fstab_options: "defaults,ro"
iot_qcow2_image:
partition_table:
<<: *iot_base_partition_tables
iot_bootable_container:
package_sets:
- include:
@ -573,6 +852,8 @@ image_types:
- "fuse-libs"
minimal_raw: &minimal_raw
partition_table:
<<: *minimal_raw_partition_tables
package_sets:
- include:
- "@core"
@ -599,6 +880,8 @@ image_types:
minimal_raw_zst: *minimal_raw
iot_simplified_installer:
partition_table:
<<: *iot_simplified_installer_partition_tables
package_sets:
- *installer_pkgset
- include:

View file

@ -3,6 +3,7 @@ package defs
import (
"embed"
"errors"
"fmt"
"io/fs"
"os"
@ -15,38 +16,140 @@ import (
"gopkg.in/yaml.v3"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/experimentalflags"
"github.com/osbuild/images/pkg/rpmmd"
)
var (
ErrImageTypeNotFound = errors.New("image type not found")
ErrNoPartitionTableForImgType = errors.New("no partition table for image type")
ErrNoPartitionTableForArch = errors.New("no partition table for arch")
)
//go:embed */*.yaml
var data embed.FS
var DataFS fs.FS = data
type toplevelYAML struct {
ImageTypes map[string]imageType `yaml:"image_types"`
Common map[string]any `yaml:".common,omitempty"`
ImageConfig imageConfig `yaml:"image_config,omitempty"`
ImageTypes map[string]imageType `yaml:"image_types"`
Common map[string]any `yaml:".common,omitempty"`
}
type imageConfig struct {
Default *distro.ImageConfig `yaml:"default"`
Condition *imageConfigConditions `yaml:"condition,omitempty"`
}
type imageConfigConditions struct {
DistroName map[string]*distro.ImageConfig `yaml:"distro_name,omitempty"`
}
type imageType struct {
PackageSets []packageSet `yaml:"package_sets"`
// archStr->partitionTable
PartitionTables map[string]*disk.PartitionTable `yaml:"partition_table"`
// override specific aspects of the partition table
PartitionTablesOverrides *partitionTablesOverrides `yaml:"partition_table_override"`
}
type packageSet struct {
Include []string `yaml:"include"`
Exclude []string `yaml:"exclude"`
Condition *conditions `yaml:"condition,omitempty"`
Include []string `yaml:"include"`
Exclude []string `yaml:"exclude"`
Condition *pkgSetConditions `yaml:"condition,omitempty"`
}
type conditions struct {
type pkgSetConditions struct {
Architecture map[string]packageSet `yaml:"architecture,omitempty"`
VersionLessThan map[string]packageSet `yaml:"version_less_than,omitempty"`
VersionGreaterOrEqual map[string]packageSet `yaml:"version_greater_or_equal,omitempty"`
DistroName map[string]packageSet `yaml:"distro_name,omitempty"`
}
type partitionTablesOverrides struct {
Conditional *partitionTablesOverwriteConditional `yaml:"condition"`
}
func (po *partitionTablesOverrides) Apply(it distro.ImageType, pt *disk.PartitionTable, replacements map[string]string) error {
if po == nil {
return nil
}
cond := po.Conditional
_, distroVersion := splitDistroNameVer(it.Arch().Distro().Name())
for gteqVer, geOverrides := range cond.VersionGreaterOrEqual {
if r, ok := replacements[gteqVer]; ok {
gteqVer = r
}
if common.VersionGreaterThanOrEqual(distroVersion, gteqVer) {
for _, overrideOp := range geOverrides {
if err := overrideOp.Apply(pt); err != nil {
return err
}
}
}
}
return nil
}
type partitionTablesOverwriteConditional struct {
VersionGreaterOrEqual map[string][]partitionTablesOverrideOp `yaml:"version_greater_or_equal,omitempty"`
}
type partitionTablesOverrideOp struct {
PartitionIndex int `yaml:"partition_index"`
Size uint64 `yaml:"size"`
FSTabOptions string `yaml:"fstab_options"`
}
func (op *partitionTablesOverrideOp) Apply(pt *disk.PartitionTable) error {
selectPart := op.PartitionIndex
if selectPart > len(pt.Partitions) {
return fmt.Errorf("override %q part %v outside of partitionTable %+v", op, selectPart, pt)
}
if op.Size > 0 {
pt.Partitions[selectPart].Size = op.Size
}
if op.FSTabOptions != "" {
part := pt.Partitions[selectPart]
fs, ok := part.Payload.(*disk.Filesystem)
if !ok {
return fmt.Errorf("override %q part %v for fstab_options expecting filesystem got %T", op, selectPart, part)
}
fs.FSTabOptions = op.FSTabOptions
}
return nil
}
// DistroImageConfig returns the distro wide ImageConfig.
//
// Each ImageType gets this as their default ImageConfig.
func DistroImageConfig(distroNameVer string) (*distro.ImageConfig, error) {
toplevel, err := load(distroNameVer)
if err != nil {
return nil, err
}
imgConfig := toplevel.ImageConfig.Default
cond := toplevel.ImageConfig.Condition
if cond != nil {
distroName, _ := splitDistroNameVer(distroNameVer)
// XXX: we shoudl probably use a similar pattern like
// for the partition table overrides (via
// findElementIndexByJSONTag) but this if fine for now
if distroNameCnf, ok := cond.DistroName[distroName]; ok {
imgConfig = distroNameCnf.InheritFrom(imgConfig)
}
}
return imgConfig, nil
}
// PackageSet loads the PackageSet from the yaml source file discovered via the
// imagetype. By default the imagetype name is used to load the packageset
// but with "overrideTypeName" this can be overriden (useful for e.g.
@ -62,61 +165,18 @@ func PackageSet(it distro.ImageType, overrideTypeName string, replacements map[s
archName := arch.Name()
distribution := arch.Distro()
distroNameVer := distribution.Name()
// we need to split from the right for "centos-stream-10" like
// distro names, sadly go has no rsplit() so we do it manually
// XXX: we cannot use distroidparser here because of import cycles
distroName := distroNameVer[:strings.LastIndex(distroNameVer, "-")]
distroVersion := strings.SplitN(distroNameVer, "-", 2)[1]
distroNameMajorVer := strings.SplitN(distroNameVer, ".", 2)[0]
// XXX: this is a short term measure, pass a set of
// searchPaths down the stack instead
var dataFS fs.FS = DataFS
if overrideDir := experimentalflags.String("yamldir"); overrideDir != "" {
logrus.Warnf("using experimental override dir %q", overrideDir)
dataFS = os.DirFS(overrideDir)
}
// XXX: this is only needed temporary until we have a "distros.yaml"
// that describes some high-level properties of each distro
// (like their yaml dirs)
var baseDir string
switch distroName {
case "rhel":
// rhel yaml files are under ./rhel-$majorVer
baseDir = distroNameMajorVer
case "centos":
// centos yaml is just rhel but we have (sadly) no symlinks
// in "go:embed" so we have to have this slightly ugly
// workaround
baseDir = fmt.Sprintf("rhel-%s", distroVersion)
case "fedora", "test-distro":
// our other distros just have a single yaml dir per distro
// and use condition.version_gt etc
baseDir = distroName
default:
return rpmmd.PackageSet{}, fmt.Errorf("unsupported distro in loader %q (add to loader.go)", distroName)
}
f, err := dataFS.Open(filepath.Join(baseDir, "distro.yaml"))
if err != nil {
return rpmmd.PackageSet{}, err
}
defer f.Close()
decoder := yaml.NewDecoder(f)
decoder.KnownFields(true)
distroName, distroVersion := splitDistroNameVer(distroNameVer)
// each imagetype can have multiple package sets, so that we can
// use yaml aliases/anchors to de-duplicate them
var toplevel toplevelYAML
if err := decoder.Decode(&toplevel); err != nil {
toplevel, err := load(distroNameVer)
if err != nil {
return rpmmd.PackageSet{}, err
}
imgType, ok := toplevel.ImageTypes[typeName]
if !ok {
return rpmmd.PackageSet{}, fmt.Errorf("unknown image type name %q", typeName)
return rpmmd.PackageSet{}, fmt.Errorf("%w: %q", ErrImageTypeNotFound, typeName)
}
var rpmmdPkgSet rpmmd.PackageSet
@ -172,3 +232,98 @@ func PackageSet(it distro.ImageType, overrideTypeName string, replacements map[s
return rpmmdPkgSet, nil
}
// PartitionTable returns the partionTable for the given distro/imgType.
func PartitionTable(it distro.ImageType, replacements map[string]string) (*disk.PartitionTable, error) {
distroNameVer := it.Arch().Distro().Name()
typeName := strings.ReplaceAll(it.Name(), "-", "_")
toplevel, err := load(distroNameVer)
if err != nil {
return nil, err
}
imgType, ok := toplevel.ImageTypes[typeName]
if !ok {
return nil, fmt.Errorf("%w: %q", ErrImageTypeNotFound, typeName)
}
if imgType.PartitionTables == nil {
return nil, fmt.Errorf("%w: %q", ErrNoPartitionTableForImgType, typeName)
}
arch := it.Arch()
archName := arch.Name()
pt, ok := imgType.PartitionTables[archName]
if !ok {
return nil, fmt.Errorf("%w (%q): %q", ErrNoPartitionTableForArch, typeName, archName)
}
if err := imgType.PartitionTablesOverrides.Apply(it, pt, replacements); err != nil {
return nil, err
}
return pt, nil
}
func splitDistroNameVer(distroNameVer string) (string, string) {
// we need to split from the right for "centos-stream-10" like
// distro names, sadly go has no rsplit() so we do it manually
// XXX: we cannot use distroidparser here because of import cycles
idx := strings.LastIndex(distroNameVer, "-")
return distroNameVer[:idx], distroNameVer[idx+1:]
}
func load(distroNameVer string) (*toplevelYAML, error) {
// we need to split from the right for "centos-stream-10" like
// distro names, sadly go has no rsplit() so we do it manually
// XXX: we cannot use distroidparser here because of import cycles
distroName, distroVersion := splitDistroNameVer(distroNameVer)
distroNameMajorVer := strings.SplitN(distroNameVer, ".", 2)[0]
// XXX: this is a short term measure, pass a set of
// searchPaths down the stack instead
var dataFS fs.FS = DataFS
if overrideDir := experimentalflags.String("yamldir"); overrideDir != "" {
logrus.Warnf("using experimental override dir %q", overrideDir)
dataFS = os.DirFS(overrideDir)
}
// XXX: this is only needed temporary until we have a "distros.yaml"
// that describes some high-level properties of each distro
// (like their yaml dirs)
var baseDir string
switch distroName {
case "rhel":
// rhel yaml files are under ./rhel-$majorVer
baseDir = distroNameMajorVer
case "centos":
// centos yaml is just rhel but we have (sadly) no symlinks
// in "go:embed" so we have to have this slightly ugly
// workaround
baseDir = fmt.Sprintf("rhel-%s", distroVersion)
case "fedora", "test-distro":
// our other distros just have a single yaml dir per distro
// and use condition.version_gt etc
baseDir = distroName
default:
return nil, fmt.Errorf("unsupported distro in loader %q (add to loader.go)", distroName)
}
f, err := dataFS.Open(filepath.Join(baseDir, "distro.yaml"))
if err != nil {
return nil, err
}
defer f.Close()
decoder := yaml.NewDecoder(f)
decoder.KnownFields(true)
// each imagetype can have multiple package sets, so that we can
// use yaml aliases/anchors to de-duplicate them
var toplevel toplevelYAML
if err := decoder.Decode(&toplevel); err != nil {
return nil, err
}
return &toplevel, nil
}

View file

@ -152,6 +152,23 @@
- "grub2-efi-aa64"
- "shim-aa64"
image_config:
default:
default_kernel: "kernel"
# XXX: this needs to be conditional for centos and rhel
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-rhel10-ds.xml"
install_weak_deps: true
locale: "C.UTF-8"
sysconfig:
networking: true
no_zero_conf: true
timezone: "UTC"
update_default_kernel: true
condition:
distro_name:
centos:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-cs10-ds.xml"
image_types:
# XXX: not a real pkgset but the "os" pipeline pkgset for image-installer
# find a nicer way to represent this

View file

@ -44,6 +44,23 @@
include:
- "insights-client"
image_config:
default:
timezone: "America/New_York"
locale: "en_US.UTF-8"
gpgkey_files:
- "/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"
sysconfig:
networking: true
no_zero_conf: true
create_default_network_scripts: true
default_kernel: "kernel"
update_default_kernel: true
kernel_options_bootloader: true
# RHEL 7 grub does not support BLS
no_bls: true
install_weak_deps: true
image_types:
azure_rhui:
package_sets:

View file

@ -524,6 +524,23 @@
- "insights-client"
- "subscription-manager-cockpit"
image_config:
default:
default_kernel: "kernel"
# XXX: this needs to be conditional for centos and rhel
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-rhel8-ds.xml"
install_weak_deps: true
kernel_options_bootloader: true
locale: "en_US.UTF-8"
sysconfig:
networking: true
no_zero_conf: true
timezone: "America/New_York"
update_default_kernel: true
condition:
distro_name:
centos:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-centos8-ds.xml"
image_types:
# XXX: not a real pkgset but the "os" pipeline pkgset for image-installer

View file

@ -362,6 +362,22 @@
include:
- "dmidecode"
image_config:
default:
default_kernel: "kernel"
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-rhel9-ds.xml"
install_weak_deps: true
locale: "C.UTF-8"
sysconfig:
networking: true
no_zero_conf: true
timezone: "America/New_York"
update_default_kernel: true
condition:
distro_name:
centos:
default_oscap_datastream: "/usr/share/xml/scap/ssg/content/ssg-cs9-ds.xml"
image_types:
# XXX: not a real pkgset but the "os" pipeline pkgset for image-installer
# find a nicer way to represent this

View file

@ -102,6 +102,10 @@ type ImageType interface {
// has no partition table. Only support for RHEL 8.5+
PartitionType() disk.PartitionTableType
// Return the base partition tabe for the given image type, will
// return `nil` if there is none
BasePartitionTable() (*disk.PartitionTable, error)
// Returns the corresponding boot mode ("legacy", "uefi", "hybrid") or "none"
BootMode() platform.BootMode

View file

@ -242,7 +242,6 @@ func mkIotSimplifiedInstallerImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "xz", "coi-tree", "efiboot-tree", "bootiso-tree", "bootiso"},
exports: []string{"bootiso"},
basePartitionTables: iotSimplifiedInstallerPartitionTables,
kernelOptions: ostreeDeploymentKernelOptions(),
requiredPartitionSizes: requiredDirectorySizes,
}
@ -265,15 +264,14 @@ func mkIotRawImgType(d distribution) imageType {
LockRootUser: common.ToPtr(true),
IgnitionPlatform: common.ToPtr("metal"),
},
defaultSize: 4 * datasizes.GibiByte,
rpmOstree: true,
bootable: true,
image: iotImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: iotBasePartitionTables,
kernelOptions: ostreeDeploymentKernelOptions(),
defaultSize: 4 * datasizes.GibiByte,
rpmOstree: true,
bootable: true,
image: iotImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "xz"},
exports: []string{"xz"},
kernelOptions: ostreeDeploymentKernelOptions(),
// Passing an empty map into the required partition sizes disables the
// default partition sizes normally set so our `basePartitionTables` can
@ -304,7 +302,6 @@ func mkIotQcow2ImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"ostree-deployment", "image", "qcow2"},
exports: []string{"qcow2"},
basePartitionTables: iotBasePartitionTables,
kernelOptions: ostreeDeploymentKernelOptions(),
requiredPartitionSizes: requiredDirectorySizes,
}
@ -329,7 +326,6 @@ func mkQcow2ImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "qcow2"},
exports: []string{"qcow2"},
basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
}
@ -362,7 +358,6 @@ func mkVmdkImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk"},
exports: []string{"vmdk"},
basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
}
@ -383,7 +378,6 @@ func mkOvaImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vmdk", "ovf", "archive"},
exports: []string{"archive"},
basePartitionTables: defaultBasePartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
}
@ -438,10 +432,8 @@ func mkWslImgType(d distribution) imageType {
ExcludeDocs: common.ToPtr(true),
Locale: common.ToPtr("C.UTF-8"),
Timezone: common.ToPtr("Etc/UTC"),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
},
image: containerImage,
@ -481,7 +473,6 @@ func mkMinimalRawImgType(d distribution) imageType {
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: minimalrawPartitionTables,
requiredPartitionSizes: requiredDirectorySizes,
}
if common.VersionGreaterThanOrEqual(d.osVersion, "43") {
@ -508,16 +499,6 @@ type distribution struct {
defaultImageConfig *distro.ImageConfig
}
// Fedora based OS image configuration defaults
var defaultDistroImageConfig = &distro.ImageConfig{
Hostname: common.ToPtr("localhost.localdomain"),
Timezone: common.ToPtr("UTC"),
Locale: common.ToPtr("C.UTF-8"),
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultFedoraDatastream()),
InstallWeakDeps: common.ToPtr(true),
MachineIdUninitialized: common.ToPtr(true),
}
func defaultDistroInstallerConfig(d *distribution) *distro.InstallerConfig {
config := distro.InstallerConfig{}
// In Fedora 42 the ifcfg module was replaced by net-lib.
@ -543,15 +524,16 @@ func getDistro(version int) distribution {
if version < 0 {
panic("Invalid Fedora version (must be positive)")
}
nameVer := fmt.Sprintf("fedora-%d", version)
return distribution{
name: fmt.Sprintf("fedora-%d", version),
name: nameVer,
product: "Fedora",
osVersion: strconv.Itoa(version),
releaseVersion: strconv.Itoa(version),
modulePlatformID: fmt.Sprintf("platform:f%d", version),
ostreeRefTmpl: fmt.Sprintf("fedora/%d/%%s/iot", version),
runner: &runner.Fedora{Version: uint64(version)},
defaultImageConfig: defaultDistroImageConfig,
defaultImageConfig: common.Must(defs.DistroImageConfig(nameVer)),
}
}

View file

@ -210,7 +210,7 @@ func osCustomizations(
osc.ShellInit = imageConfig.ShellInit
osc.Grub2Config = imageConfig.Grub2Config
osc.Sysconfig = imageConfig.Sysconfig
osc.Sysconfig = imageConfig.SysconfigStageOptions()
osc.SystemdLogind = imageConfig.SystemdLogind
osc.CloudInit = imageConfig.CloudInit
osc.Modprobe = imageConfig.Modprobe
@ -226,7 +226,7 @@ func osCustomizations(
osc.SshdConfig = imageConfig.SshdConfig
osc.AuthConfig = imageConfig.Authconfig
osc.PwQuality = imageConfig.PwQuality
osc.WSLConfig = imageConfig.WSLConfig
osc.WSLConfig = imageConfig.WSLConfStageOptions()
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)

View file

@ -1,6 +1,7 @@
package fedora
import (
"errors"
"fmt"
"math/rand"
"strings"
@ -16,6 +17,7 @@ import (
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/experimentalflags"
"github.com/osbuild/images/pkg/image"
"github.com/osbuild/images/pkg/manifest"
@ -56,9 +58,7 @@ type imageType struct {
// rpmOstree: iot/ostree
rpmOstree bool
// bootable image
bootable bool
// List of valid arches for the image type
basePartitionTables distro.BasePartitionTableMap
bootable bool
requiredPartitionSizes map[string]uint64
}
@ -139,14 +139,18 @@ func (t *imageType) BootMode() platform.BootMode {
return platform.BOOT_NONE
}
func (t *imageType) BasePartitionTable() (*disk.PartitionTable, error) {
return defs.PartitionTable(t, VersionReplacements())
}
func (t *imageType) getPartitionTable(
customizations *blueprint.Customizations,
options distro.ImageOptions,
rng *rand.Rand,
) (*disk.PartitionTable, error) {
basePartitionTable, exists := t.basePartitionTables[t.arch.Name()]
if !exists {
return nil, fmt.Errorf("unknown arch for partition table: %s", t.arch.Name())
basePartitionTable, err := t.BasePartitionTable()
if err != nil {
return nil, err
}
imageSize := t.Size(options.Size)
@ -185,7 +189,7 @@ func (t *imageType) getPartitionTable(
}
mountpoints := customizations.GetFilesystems()
return disk.NewPartitionTable(&basePartitionTable, mountpoints, imageSize, partitioningMode, t.platform.GetArch(), t.requiredPartitionSizes, rng)
return disk.NewPartitionTable(basePartitionTable, mountpoints, imageSize, partitioningMode, t.platform.GetArch(), t.requiredPartitionSizes, rng)
}
func (t *imageType) getDefaultImageConfig() *distro.ImageConfig {
@ -207,10 +211,13 @@ func (t *imageType) getDefaultInstallerConfig() (*distro.InstallerConfig, error)
}
func (t *imageType) PartitionType() disk.PartitionTableType {
basePartitionTable, exists := t.basePartitionTables[t.arch.Name()]
if !exists {
basePartitionTable, err := t.BasePartitionTable()
if errors.Is(err, defs.ErrNoPartitionTableForImgType) {
return disk.PT_NONE
}
if err != nil {
panic(err)
}
return basePartitionTable.Type
}

View file

@ -1,594 +0,0 @@
package fedora
import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/datasizes"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
)
var defaultBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 1 * datasizes.MebiByte,
Bootable: true,
Type: disk.BIOSBootPartitionGUID,
UUID: disk.BIOSBootPartitionUUID,
},
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_PPC64LE.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: disk.PT_DOS,
Partitions: []disk.Partition{
{
Size: 4 * datasizes.MebiByte,
Type: disk.PRepPartitionDOSID,
Bootable: true,
},
{
Size: 500 * datasizes.MebiByte,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_S390X.String(): disk.PartitionTable{
UUID: "0x14fc63d2",
Type: disk.PT_DOS,
Partitions: []disk.Partition{
{
Size: 500 * datasizes.MebiByte,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Bootable: true,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_RISCV64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
var minimalrawPartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: disk.PT_DOS,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
arch.ARCH_RISCV64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: disk.PT_DOS,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * datasizes.MebiByte,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
var iotBasePartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 2,
},
},
{
Size: 2569 * datasizes.MebiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults,ro",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: disk.PT_DOS,
StartOffset: 8 * datasizes.MebiByte,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.FAT16BDOSID,
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 2,
},
},
{
Size: 2569 * datasizes.MebiByte,
Type: disk.FilesystemLinuxDOSID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults,ro",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
},
},
}
var iotSimplifiedInstallerPartitionTables = distro.BasePartitionTableMap{
arch.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
{
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.LUKSContainer{
Label: "crypt_root",
Cipher: "cipher_null",
Passphrase: "osbuild",
PBKDF: disk.Argon2id{
Memory: 32,
Iterations: 4,
Parallelism: 1,
},
Clevis: &disk.ClevisBind{
Pin: "null",
Policy: "{}",
RemovePassphrase: true,
},
Payload: &disk.LVMVolumeGroup{
Name: "rootvg",
Description: "built with lvm2 and osbuild",
LogicalVolumes: []disk.LVMLogicalVolume{
{
Size: 8 * datasizes.GibiByte,
Name: "rootlv",
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
},
},
},
},
arch.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: disk.PT_GPT,
Partitions: []disk.Partition{
{
Size: 501 * datasizes.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "umask=0077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 1 * datasizes.GibiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.DataPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 1,
FSTabPassNo: 1,
},
},
{
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.LUKSContainer{
Label: "crypt_root",
Cipher: "cipher_null",
Passphrase: "osbuild",
PBKDF: disk.Argon2id{
Memory: 32,
Iterations: 4,
Parallelism: 1,
},
Clevis: &disk.ClevisBind{
Pin: "null",
Policy: "{}",
RemovePassphrase: true,
},
Payload: &disk.LVMVolumeGroup{
Name: "rootvg",
Description: "built with lvm2 and osbuild",
LogicalVolumes: []disk.LVMLogicalVolume{
{
Size: 8 * datasizes.GibiByte,
Name: "rootlv",
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
},
},
},
},
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"reflect"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/customizations/fsnode"
"github.com/osbuild/images/pkg/customizations/shell"
"github.com/osbuild/images/pkg/customizations/subscription"
@ -12,19 +13,22 @@ import (
// ImageConfig represents a (default) configuration applied to the image payload.
type ImageConfig struct {
Hostname *string
Timezone *string
Hostname *string `yaml:"hostname,omitempty"`
Timezone *string `yaml:"timezone,omitempty"`
TimeSynchronization *osbuild.ChronyStageOptions
Locale *string
Locale *string `yaml:"locale,omitempty"`
Keyboard *osbuild.KeymapStageOptions
EnabledServices []string
DisabledServices []string
MaskedServices []string
DefaultTarget *string
Sysconfig []*osbuild.SysconfigStageOptions
Sysconfig *Sysconfig `yaml:"sysconfig,omitempty"`
DefaultKernel *string `yaml:"default_kernel,omitempty"`
UpdateDefaultKernel *bool `yaml:"update_default_kernel,omitempty"`
// List of files from which to import GPG keys into the RPM database
GPGKeyFiles []string
GPGKeyFiles []string `yaml:"gpgkey_files,omitempty"`
// Disable SELinux labelling
NoSElinux *bool
@ -64,7 +68,8 @@ type ImageConfig struct {
Firewall *osbuild.FirewallStageOptions
UdevRules *osbuild.UdevRulesStageOptions
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
WSLConfig *osbuild.WSLConfStageOptions
WSLConfig *WSLConfig
Files []*fsnode.File
Directories []*fsnode.Directory
@ -75,15 +80,15 @@ type ImageConfig struct {
//
// This should only be used for old distros that use grub and it is
// applied on all architectures, except for s390x.
KernelOptionsBootloader *bool
KernelOptionsBootloader *bool `yaml:"kernel_options_bootloader,omitempty"`
// The default OSCAP datastream to use for the image as a fallback,
// if no datastream value is provided by the user.
DefaultOSCAPDatastream *string
DefaultOSCAPDatastream *string `yaml:"default_oscap_datastream,omitempty"`
// NoBLS configures the image bootloader with traditional menu entries
// instead of BLS. Required for legacy systems like RHEL 7.
NoBLS *bool
NoBLS *bool `yaml:"no_bls,omitempty"`
// OSTree specific configuration
@ -98,18 +103,22 @@ type ImageConfig struct {
// InstallWeakDeps enables installation of weak dependencies for packages
// that are statically defined for the pipeline.
InstallWeakDeps *bool
InstallWeakDeps *bool `yaml:"install_weak_deps,omitempty"`
// How to handle the /etc/machine-id file, when set to true it causes the
// machine id to be set to 'uninitialized' which causes ConditionFirstboot
// to be triggered in systemd
MachineIdUninitialized *bool
MachineIdUninitialized *bool `yaml:"machine_id_uninitialized,omitempty"`
// MountUnits creates systemd .mount units to describe the filesystem
// instead of writing to /etc/fstab
MountUnits *bool
}
type WSLConfig struct {
BootSystemd bool
}
// InheritFrom inherits unset values from the provided parent configuration and
// returns a new structure instance, which is a result of the inheritance.
func (c *ImageConfig) InheritFrom(parentConfig *ImageConfig) *ImageConfig {
@ -134,3 +143,76 @@ func (c *ImageConfig) InheritFrom(parentConfig *ImageConfig) *ImageConfig {
}
return &finalConfig
}
func (c *ImageConfig) WSLConfStageOptions() *osbuild.WSLConfStageOptions {
if c.WSLConfig == nil {
return nil
}
return &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: c.WSLConfig.BootSystemd,
},
}
}
type Sysconfig struct {
Networking bool `yaml:"networking,omitempty"`
NoZeroConf bool `yaml:"no_zero_conf,omitempty"`
CreateDefaultNetworkScripts bool `yaml:"create_default_network_scripts,omitempty"`
}
func (c *ImageConfig) SysconfigStageOptions() []*osbuild.SysconfigStageOptions {
var opts *osbuild.SysconfigStageOptions
if c.DefaultKernel != nil {
if opts == nil {
opts = &osbuild.SysconfigStageOptions{}
}
if opts.Kernel == nil {
opts.Kernel = &osbuild.SysconfigKernelOptions{}
}
opts.Kernel.DefaultKernel = *c.DefaultKernel
}
if c.UpdateDefaultKernel != nil {
if opts == nil {
opts = &osbuild.SysconfigStageOptions{}
}
if opts.Kernel == nil {
opts.Kernel = &osbuild.SysconfigKernelOptions{}
}
opts.Kernel.UpdateDefault = *c.UpdateDefaultKernel
}
if c.Sysconfig != nil {
if c.Sysconfig.Networking {
if opts == nil {
opts = &osbuild.SysconfigStageOptions{}
}
if opts.Network == nil {
opts.Network = &osbuild.SysconfigNetworkOptions{}
}
opts.Network.Networking = c.Sysconfig.Networking
opts.Network.NoZeroConf = c.Sysconfig.NoZeroConf
if c.Sysconfig.CreateDefaultNetworkScripts {
opts.NetworkScripts = &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
}
}
}
}
if opts == nil {
return nil
}
return []*osbuild.SysconfigStageOptions{opts}
}

View file

@ -228,6 +228,9 @@ func osCustomizations(
var subscriptionStatus subscription.RHSMStatus
if options.Subscription != nil {
subscriptionStatus = subscription.RHSMConfigWithSubscription
if options.Subscription.Proxy != "" {
osc.InsightsClientConfig = &osbuild.InsightsClientConfigStageOptions{Proxy: options.Subscription.Proxy}
}
} else {
subscriptionStatus = subscription.RHSMConfigNoSubscription
}
@ -241,7 +244,7 @@ func osCustomizations(
osc.ShellInit = imageConfig.ShellInit
osc.Grub2Config = imageConfig.Grub2Config
osc.Sysconfig = imageConfig.Sysconfig
osc.Sysconfig = imageConfig.SysconfigStageOptions()
osc.SystemdLogind = imageConfig.SystemdLogind
osc.CloudInit = imageConfig.CloudInit
osc.Modprobe = imageConfig.Modprobe
@ -263,7 +266,7 @@ func osCustomizations(
osc.WAAgentConfig = imageConfig.WAAgentConfig
osc.UdevRules = imageConfig.UdevRules
osc.GCPGuestAgentConfig = imageConfig.GCPGuestAgentConfig
osc.WSLConfig = imageConfig.WSLConfig
osc.WSLConfig = imageConfig.WSLConfStageOptions()
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)

View file

@ -187,6 +187,21 @@ func (t *ImageType) BootMode() platform.BootMode {
return platform.BOOT_NONE
}
func (t *ImageType) BasePartitionTable() (*disk.PartitionTable, error) {
// XXX: simplify once https://github.com/osbuild/images/pull/1372
// (or something similar) went in, see pkg/distro/fedora, once
// the yaml based loading is in we can drop from ImageType
// "BasePartitionTables BasePartitionTableFunc"
if t.BasePartitionTables == nil {
return nil, nil
}
basePartitionTable, exists := t.BasePartitionTables(t)
if !exists {
return nil, nil
}
return &basePartitionTable, nil
}
func (t *ImageType) GetPartitionTable(
customizations *blueprint.Customizations,
options distro.ImageOptions,

View file

@ -203,18 +203,12 @@ func defaultEc2ImageConfig() *distro.ImageConfig {
"reboot.target",
"tuned",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
DefaultTarget: common.ToPtr("multi-user.target"),
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -321,17 +321,11 @@ func defaultAzureImageConfig(rd *rhel.Distribution) *distro.ImageConfig {
Layouts: []string{"us"},
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
EnabledServices: []string{
"firewalld",

View file

@ -8,8 +8,8 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -50,24 +50,7 @@ func distroISOLabelFunc(t *rhel.ImageType) string {
}
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("UTC"),
Locale: common.ToPtr("C.UTF-8"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultRHEL10Datastream(d.IsRHEL())),
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func newDistro(name string, major, minor int) *rhel.Distribution {

View file

@ -108,14 +108,14 @@ func baseGCEImageConfig() *distro.ImageConfig {
PermitRootLogin: osbuild.PermitRootLoginValueNo,
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
DefaultKernel: "kernel-core",
UpdateDefault: true,
},
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
// XXX: ensure the "old" behavior is preserved (that is
// likely a bug) where for GCE the sysconfig network
// options are not set because the merge of imageConfig
// is shallow and the previous setup was changing the
// kernel without also changing the network options.
Sysconfig: &distro.Sysconfig{},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-floppy.conf",

View file

@ -37,10 +37,8 @@ func mkWSLImgType() *rhel.ImageType {
},
},
NoSElinux: common.ToPtr(true),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
}

View file

@ -108,31 +108,13 @@ func ec2ImageConfig() *distro.ImageConfig {
"sshd",
"rsyslog",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
DefaultTarget: common.ToPtr("multi-user.target"),
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
CreateDefaultNetworkScripts: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -49,17 +49,12 @@ var azureDefaultImgConfig = &distro.ImageConfig{
},
SELinuxForceRelabel: common.ToPtr(true),
Authconfig: &osbuild.AuthconfigStageOptions{},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
EnabledServices: []string{
"cloud-config",

View file

@ -6,35 +6,14 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
// RHEL-based OS image configuration defaults
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("en_US.UTF-8"),
GPGKeyFiles: []string{
"/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
KernelOptionsBootloader: common.ToPtr(true),
NoBLS: common.ToPtr(true), // RHEL 7 grub does not support BLS
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func newDistro(name string, minor int) *rhel.Distribution {

View file

@ -38,30 +38,12 @@ func mkQcow2ImgType() *rhel.ImageType {
var qcow2DefaultImgConfig = &distro.ImageConfig{
DefaultTarget: common.ToPtr("multi-user.target"),
SELinuxForceRelabel: common.ToPtr(true),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
CreateDefaultNetworkScripts: true,
},
RHSMConfig: map[subscription.RHSMStatus]*subscription.RHSMConfig{
subscription.RHSMConfigNoSubscription: {

View file

@ -197,31 +197,13 @@ func baseEc2ImageConfig() *distro.ImageConfig {
"cloud-final",
"reboot.target",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
DefaultTarget: common.ToPtr("multi-user.target"),
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
CreateDefaultNetworkScripts: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -370,17 +370,11 @@ var defaultAzureImageConfig = &distro.ImageConfig{
Layouts: []string{"us"},
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
DefaultKernel: common.ToPtr("kernel-core"),
UpdateDefaultKernel: common.ToPtr(true),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
EnabledServices: []string{
"nm-cloud-setup.service",

View file

@ -8,8 +8,8 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -37,25 +37,7 @@ var (
// RHEL-based OS image configuration defaults
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("en_US.UTF-8"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
KernelOptionsBootloader: common.ToPtr(true),
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultRHEL8Datastream(d.IsRHEL())),
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func distroISOLabelFunc(t *rhel.ImageType) string {

View file

@ -129,14 +129,14 @@ func defaultGceByosImageConfig(rd distro.Distro) *distro.ImageConfig {
PermitRootLogin: osbuild.PermitRootLoginValueNo,
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
DefaultKernel: "kernel-core",
UpdateDefault: true,
},
},
},
DefaultKernel: common.ToPtr("kernel-core"),
UpdateDefaultKernel: common.ToPtr(true),
// XXX: ensure the "old" behavior is preserved (that is
// likely a bug) where for GCE the sysconfig network
// options are not set because the merge of imageConfig
// is shallow and the previous setup was changing the
// kernel without also changing the network options.
Sysconfig: &distro.Sysconfig{},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-floppy.conf",

View file

@ -4,7 +4,6 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
)
func mkWslImgType() *rhel.ImageType {
@ -24,10 +23,8 @@ func mkWslImgType() *rhel.ImageType {
it.DefaultImageConfig = &distro.ImageConfig{
Locale: common.ToPtr("en_US.UTF-8"),
NoSElinux: common.ToPtr(true),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
}

View file

@ -50,31 +50,14 @@ func defaultEc2ImageConfig() *distro.ImageConfig {
"reboot.target",
"tuned",
},
DefaultTarget: common.ToPtr("multi-user.target"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
NetworkScripts: &osbuild.NetworkScriptsOptions{
IfcfgFiles: map[string]osbuild.IfcfgFile{
"eth0": {
Device: "eth0",
Bootproto: osbuild.IfcfgBootprotoDHCP,
OnBoot: common.ToPtr(true),
Type: osbuild.IfcfgTypeEthernet,
UserCtl: common.ToPtr(true),
PeerDNS: common.ToPtr(true),
IPv6Init: common.ToPtr(false),
},
},
},
},
DefaultTarget: common.ToPtr("multi-user.target"),
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
CreateDefaultNetworkScripts: true,
},
SystemdLogind: []*osbuild.SystemdLogindStageOptions{
{

View file

@ -334,17 +334,11 @@ func defaultAzureImageConfig(rd *rhel.Distribution) *distro.ImageConfig {
Layouts: []string{"us"},
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel-core",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
Sysconfig: &distro.Sysconfig{
Networking: true,
NoZeroConf: true,
},
EnabledServices: []string{
"firewalld",

View file

@ -8,8 +8,8 @@ import (
"github.com/osbuild/images/pkg/arch"
"github.com/osbuild/images/pkg/customizations/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distro/defs"
"github.com/osbuild/images/pkg/distro/rhel"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/platform"
)
@ -53,24 +53,7 @@ func distroISOLabelFunc(t *rhel.ImageType) string {
}
func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig {
return &distro.ImageConfig{
Timezone: common.ToPtr("America/New_York"),
Locale: common.ToPtr("C.UTF-8"),
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
UpdateDefault: true,
DefaultKernel: "kernel",
},
Network: &osbuild.SysconfigNetworkOptions{
Networking: true,
NoZeroConf: true,
},
},
},
DefaultOSCAPDatastream: common.ToPtr(oscap.DefaultRHEL9Datastream(d.IsRHEL())),
InstallWeakDeps: common.ToPtr(true),
}
return common.Must(defs.DistroImageConfig(d.Name()))
}
func newDistro(name string, major, minor int) *rhel.Distribution {

View file

@ -105,14 +105,14 @@ func baseGCEImageConfig() *distro.ImageConfig {
PermitRootLogin: osbuild.PermitRootLoginValueNo,
},
},
Sysconfig: []*osbuild.SysconfigStageOptions{
{
Kernel: &osbuild.SysconfigKernelOptions{
DefaultKernel: "kernel-core",
UpdateDefault: true,
},
},
},
UpdateDefaultKernel: common.ToPtr(true),
DefaultKernel: common.ToPtr("kernel-core"),
// XXX: ensure the "old" behavior is preserved (that is
// likely a bug) where for GCE the sysconfig network
// options are not set because the merge of imageConfig
// is shallow and the previous setup was changing the
// kernel without also changing the network options.
Sysconfig: &distro.Sysconfig{},
Modprobe: []*osbuild.ModprobeStageOptions{
{
Filename: "blacklist-floppy.conf",

View file

@ -38,10 +38,8 @@ func mkWSLImgType() *rhel.ImageType {
},
Locale: common.ToPtr("en_US.UTF-8"),
NoSElinux: common.ToPtr(true),
WSLConfig: &osbuild.WSLConfStageOptions{
Boot: osbuild.WSLConfBootOptions{
Systemd: true,
},
WSLConfig: &distro.WSLConfig{
BootSystemd: true,
},
}

View file

@ -212,6 +212,10 @@ func (t *TestImageType) PartitionType() disk.PartitionTableType {
return disk.PT_NONE
}
func (t *TestImageType) BasePartitionTable() (*disk.PartitionTable, error) {
return nil, nil
}
func (t *TestImageType) BootMode() platform.BootMode {
return platform.BOOT_HYBRID
}

View file

@ -99,35 +99,36 @@ type OSCustomizations struct {
ShellInit []shell.InitFile
// TODO: drop osbuild types from the API
Firewall *osbuild.FirewallStageOptions
Grub2Config *osbuild.GRUB2Config
Sysconfig []*osbuild.SysconfigStageOptions
SystemdLogind []*osbuild.SystemdLogindStageOptions
CloudInit []*osbuild.CloudInitStageOptions
Modprobe []*osbuild.ModprobeStageOptions
DracutConf []*osbuild.DracutConfStageOptions
SystemdUnit []*osbuild.SystemdUnitStageOptions
Authselect *osbuild.AuthselectStageOptions
SELinuxConfig *osbuild.SELinuxConfigStageOptions
Tuned *osbuild.TunedStageOptions
Tmpfilesd []*osbuild.TmpfilesdStageOptions
PamLimitsConf []*osbuild.PamLimitsConfStageOptions
Sysctld []*osbuild.SysctldStageOptions
DNFConfig []*osbuild.DNFConfigStageOptions
DNFAutomaticConfig *osbuild.DNFAutomaticConfigStageOptions
YUMConfig *osbuild.YumConfigStageOptions
YUMRepos []*osbuild.YumReposStageOptions
SshdConfig *osbuild.SshdConfigStageOptions
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
AuthConfig *osbuild.AuthconfigStageOptions
PwQuality *osbuild.PwqualityConfStageOptions
NTPServers []osbuild.ChronyConfigServer
WAAgentConfig *osbuild.WAAgentConfStageOptions
UdevRules *osbuild.UdevRulesStageOptions
WSLConfig *osbuild.WSLConfStageOptions
LeapSecTZ *string
Presets []osbuild.Preset
ContainersStorage *string
Firewall *osbuild.FirewallStageOptions
Grub2Config *osbuild.GRUB2Config
Sysconfig []*osbuild.SysconfigStageOptions
SystemdLogind []*osbuild.SystemdLogindStageOptions
CloudInit []*osbuild.CloudInitStageOptions
Modprobe []*osbuild.ModprobeStageOptions
DracutConf []*osbuild.DracutConfStageOptions
SystemdUnit []*osbuild.SystemdUnitStageOptions
Authselect *osbuild.AuthselectStageOptions
SELinuxConfig *osbuild.SELinuxConfigStageOptions
Tuned *osbuild.TunedStageOptions
Tmpfilesd []*osbuild.TmpfilesdStageOptions
PamLimitsConf []*osbuild.PamLimitsConfStageOptions
Sysctld []*osbuild.SysctldStageOptions
DNFConfig []*osbuild.DNFConfigStageOptions
DNFAutomaticConfig *osbuild.DNFAutomaticConfigStageOptions
YUMConfig *osbuild.YumConfigStageOptions
YUMRepos []*osbuild.YumReposStageOptions
SshdConfig *osbuild.SshdConfigStageOptions
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
AuthConfig *osbuild.AuthconfigStageOptions
PwQuality *osbuild.PwqualityConfStageOptions
NTPServers []osbuild.ChronyConfigServer
WAAgentConfig *osbuild.WAAgentConfStageOptions
UdevRules *osbuild.UdevRulesStageOptions
WSLConfig *osbuild.WSLConfStageOptions
InsightsClientConfig *osbuild.InsightsClientConfigStageOptions
LeapSecTZ *string
Presets []osbuild.Preset
ContainersStorage *string
// OpenSCAP config
OpenSCAPRemediationConfig *oscap.RemediationConfig
@ -634,7 +635,11 @@ func (p *OS) serialize() osbuild.Pipeline {
}
if p.SshdConfig != nil {
pipeline.AddStage((osbuild.NewSshdConfigStage(p.SshdConfig)))
pipeline.AddStage(osbuild.NewSshdConfigStage(p.SshdConfig))
}
if p.InsightsClientConfig != nil {
pipeline.AddStage(osbuild.NewInsightsClientConfigStage(p.InsightsClientConfig))
}
if p.AuthConfig != nil {

View file

@ -0,0 +1,15 @@
package osbuild
type InsightsClientConfigStageOptions struct {
Proxy string `json:"proxy,omitempty"`
Path string `json:"path,omitempty"`
}
func (InsightsClientConfigStageOptions) isStageOptions() {}
func NewInsightsClientConfigStage(options *InsightsClientConfigStageOptions) *Stage {
return &Stage{
Type: "org.osbuild.insights-client.config",
Options: options,
}
}

View file

@ -1,11 +1,11 @@
package osbuild
type SysconfigStageOptions struct {
Kernel *SysconfigKernelOptions `json:"kernel,omitempty"`
Network *SysconfigNetworkOptions `json:"network,omitempty"`
NetworkScripts *NetworkScriptsOptions `json:"network-scripts,omitempty"`
Desktop *SysconfigDesktopOptions `json:"desktop,omitempty"`
LiveSys *SysconfigLivesysOptions `json:"livesys,omitempty"`
Kernel *SysconfigKernelOptions `json:"kernel,omitempty" yaml:"kernel,omitempty"`
Network *SysconfigNetworkOptions `json:"network,omitempty" yaml:"network,omitempty"`
NetworkScripts *NetworkScriptsOptions `json:"network-scripts,omitempty" yaml:"network-scripts,omitempty"`
Desktop *SysconfigDesktopOptions `json:"desktop,omitempty" yaml:"desktop,omitempty"`
LiveSys *SysconfigLivesysOptions `json:"livesys,omitempty" yaml:"libesys,omitempty"`
}
func (SysconfigStageOptions) isStageOptions() {}
@ -19,12 +19,14 @@ func NewSysconfigStage(options *SysconfigStageOptions) *Stage {
type SysconfigNetworkOptions struct {
Networking bool `json:"networking,omitempty"`
NoZeroConf bool `json:"no_zero_conf,omitempty"`
// XXX: ideally this would be no_zeroconf" (because zeroconf
// is the program name) but we need to keep for compatibility
NoZeroConf bool `json:"no_zero_conf,omitempty" yaml:"no_zero_conf,omitempty"`
}
type SysconfigKernelOptions struct {
UpdateDefault bool `json:"update_default,omitempty"`
DefaultKernel string `json:"default_kernel,omitempty"`
UpdateDefault bool `json:"update_default,omitempty" yaml:"update_default,omitempty"`
DefaultKernel string `json:"default_kernel,omitempty" yaml:"default_kernel,omitempty"`
}
type SysconfigDesktopOptions struct {

View file

@ -244,6 +244,7 @@ func GenSystemdMountStages(pt *disk.PartitionTable) ([]*Stage, error) {
}
options := &SystemdUnitCreateStageOptions{
UnitPath: EtcUnitPath, // create all mount units in /etc/systemd/
Config: SystemdUnit{
Unit: &UnitSection{
// Adds the following dependencies for mount units (systemd.mount(5)):

8
vendor/modules.txt vendored
View file

@ -173,7 +173,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version
github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
# github.com/BurntSushi/toml v1.4.0
# github.com/BurntSushi/toml v1.5.1-0.20250403130103-3d3abc24416a
## explicit; go 1.18
github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
@ -1045,7 +1045,11 @@ github.com/oracle/oci-go-sdk/v54/identity
github.com/oracle/oci-go-sdk/v54/objectstorage
github.com/oracle/oci-go-sdk/v54/objectstorage/transfer
github.com/oracle/oci-go-sdk/v54/workrequests
# github.com/osbuild/images v0.128.0
# github.com/osbuild/blueprint v1.6.0
## explicit; go 1.22.8
github.com/osbuild/blueprint/internal/common
github.com/osbuild/blueprint/pkg/blueprint
# github.com/osbuild/images v0.131.0
## explicit; go 1.22.8
github.com/osbuild/images/data/dependencies
github.com/osbuild/images/data/repositories