vendor: Update osbuild/images to commit dd48a38be218

This is needed for the test_distro.NewTestDistro change.
This commit is contained in:
Brian C. Lane 2023-09-15 08:18:06 -07:00 committed by Achilleas Koutsou
parent eab16830aa
commit 1b65f15449
345 changed files with 276130 additions and 14546 deletions

View file

@ -1,325 +0,0 @@
package dnfjson
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"sort"
"sync"
"time"
"github.com/osbuild/images/pkg/rpmmd"
"github.com/gobwas/glob"
)
// CleanupOldCacheDirs will remove cache directories for unsupported distros
// eg. Once support for a fedora release stops and it is removed, this will
// delete its directory under root.
//
// A happy side effect of this is that it will delete old cache directories
// and files from before the switch to per-distro cache directories.
//
// NOTE: This does not return any errors. This is because the most common one
// will be a nonexistant directory which will be created later, during initial
// cache creation. Any other errors like permission issues will be caught by
// later use of the cache. eg. touchRepo
func CleanupOldCacheDirs(root string, distros []string) {
dirs, _ := os.ReadDir(root)
for _, e := range dirs {
if strSliceContains(distros, e.Name()) {
// known distro
continue
}
if e.IsDir() {
// Remove the directory and everything under it
_ = os.RemoveAll(filepath.Join(root, e.Name()))
} else {
_ = os.Remove(filepath.Join(root, e.Name()))
}
}
}
// strSliceContains returns true if the elem string is in the slc array
func strSliceContains(slc []string, elem string) bool {
for _, s := range slc {
if elem == s {
return true
}
}
return false
}
// global cache locker
var cacheLocks sync.Map
// A collection of directory paths, their total size, and their most recent
// modification time.
type pathInfo struct {
paths []string
size uint64
mtime time.Time
}
type rpmCache struct {
// root path for the cache
root string
// individual repository cache data
repoElements map[string]pathInfo
// list of known repository IDs, sorted by mtime
repoRecency []string
// total cache size
size uint64
// max cache size
maxSize uint64
// locker for this cache directory
locker *sync.RWMutex
}
func newRPMCache(path string, maxSize uint64) *rpmCache {
absPath, err := filepath.Abs(path) // convert to abs if it's not already
if err != nil {
panic(err) // can only happen if the CWD does not exist and the path isn't already absolute
}
path = absPath
locker := new(sync.RWMutex)
if l, loaded := cacheLocks.LoadOrStore(path, locker); loaded {
// value existed and was loaded
locker = l.(*sync.RWMutex)
}
r := &rpmCache{
root: path,
repoElements: make(map[string]pathInfo),
size: 0,
maxSize: maxSize,
locker: locker,
}
// collect existing cache paths and timestamps
r.updateInfo()
return r
}
// updateInfo updates the repoPaths and repoRecency fields of the rpmCache.
//
// NOTE: This does not return any errors. This is because the most common one
// will be a nonexistant directory which will be created later, during initial
// cache creation. Any other errors like permission issues will be caught by
// later use of the cache. eg. touchRepo
func (r *rpmCache) updateInfo() {
dirs, _ := os.ReadDir(r.root)
for _, d := range dirs {
r.updateCacheDirInfo(filepath.Join(r.root, d.Name()))
}
}
func (r *rpmCache) updateCacheDirInfo(path string) {
// See updateInfo NOTE on error handling
cacheEntries, _ := os.ReadDir(path)
// each repository has multiple cache entries (3 on average), so using the
// number of cacheEntries to allocate the map and ID slice is a high upper
// bound, but guarantees we wont need to grow and reallocate either.
repos := make(map[string]pathInfo, len(cacheEntries))
repoIDs := make([]string, 0, len(cacheEntries))
var totalSize uint64
// Collect the paths grouped by their repo ID
// We assume the first 64 characters of a file or directory name are the
// repository ID because we use a sha256 sum of the repository config to
// create the ID (64 hex chars)
for _, entry := range cacheEntries {
eInfo, err := entry.Info()
if err != nil {
// skip it
continue
}
fname := entry.Name()
if len(fname) < 64 {
// unknown file in cache; ignore
continue
}
repoID := fname[:64]
repo, ok := repos[repoID]
if !ok {
// new repo ID
repoIDs = append(repoIDs, repoID)
}
mtime := eInfo.ModTime()
ePath := filepath.Join(path, entry.Name())
// calculate and add entry size
size, err := dirSize(ePath)
if err != nil {
// skip it
continue
}
repo.size += size
totalSize += size
// add path
repo.paths = append(repo.paths, ePath)
// if for some reason the mtimes of the various entries of a single
// repository are out of sync, use the most recent one
if repo.mtime.Before(mtime) {
repo.mtime = mtime
}
// update the collection
repos[repoID] = repo
}
sortFunc := func(idx, jdx int) bool {
ir := repos[repoIDs[idx]]
jr := repos[repoIDs[jdx]]
return ir.mtime.Before(jr.mtime)
}
// sort IDs by mtime (oldest first)
sort.Slice(repoIDs, sortFunc)
r.size = totalSize
r.repoElements = repos
r.repoRecency = repoIDs
}
func (r *rpmCache) shrink() error {
r.locker.Lock()
defer r.locker.Unlock()
// start deleting until we drop below r.maxSize
nDeleted := 0
for idx := 0; idx < len(r.repoRecency) && r.size >= r.maxSize; idx++ {
repoID := r.repoRecency[idx]
nDeleted++
repo, ok := r.repoElements[repoID]
if !ok {
// cache inconsistency?
// ignore and let the ID be removed from the recency list
continue
}
for _, gPath := range repo.paths {
if err := os.RemoveAll(gPath); err != nil {
return err
}
}
r.size -= repo.size
delete(r.repoElements, repoID)
}
// update recency list
r.repoRecency = r.repoRecency[nDeleted:]
return nil
}
// Update file atime and mtime on the filesystem to time t for all files in the
// root of the cache that match the repo ID. This should be called whenever a
// repository is used.
// This function does not update the internal cache info. A call to
// updateInfo() should be made after touching one or more repositories.
func (r *rpmCache) touchRepo(repoID string, t time.Time) error {
repoGlob, err := glob.Compile(fmt.Sprintf("%s*", repoID))
if err != nil {
return err
}
distroDirs, err := os.ReadDir(r.root)
if err != nil {
return err
}
for _, d := range distroDirs {
// we only touch the top-level directories and files of the cache
cacheEntries, err := os.ReadDir(filepath.Join(r.root, d.Name()))
if err != nil {
return err
}
for _, cacheEntry := range cacheEntries {
if repoGlob.Match(cacheEntry.Name()) {
path := filepath.Join(r.root, d.Name(), cacheEntry.Name())
if err := os.Chtimes(path, t, t); err != nil {
return err
}
}
}
}
return nil
}
func dirSize(path string) (uint64, error) {
var size uint64
sizer := func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
size += uint64(info.Size())
return nil
}
err := filepath.Walk(path, sizer)
return size, err
}
// dnfResults holds the results of a dnfjson request
// expire is the time the request was made, used to expire the entry
type dnfResults struct {
expire time.Time
pkgs rpmmd.PackageList
}
// dnfCache is a cache of results from dnf-json requests
type dnfCache struct {
results map[string]dnfResults
timeout time.Duration
*sync.RWMutex
}
// NewDNFCache returns a pointer to an initialized dnfCache struct
func NewDNFCache(timeout time.Duration) *dnfCache {
return &dnfCache{
results: make(map[string]dnfResults),
timeout: timeout,
RWMutex: new(sync.RWMutex),
}
}
// CleanCache deletes unused cache entries
// This prevents the cache from growing for longer than the timeout interval
func (d *dnfCache) CleanCache() {
d.Lock()
defer d.Unlock()
// Delete expired resultCache entries
for k := range d.results {
if time.Since(d.results[k].expire) > d.timeout {
delete(d.results, k)
}
}
}
// Get returns the package list and true if cached
// or an empty list and false if not cached or if cache is timed out
func (d *dnfCache) Get(hash string) (rpmmd.PackageList, bool) {
d.RLock()
defer d.RUnlock()
result, ok := d.results[hash]
if !ok || time.Since(result.expire) >= d.timeout {
return rpmmd.PackageList{}, false
}
return result.pkgs, true
}
// Store saves the package list in the cache
func (d *dnfCache) Store(hash string, pkgs rpmmd.PackageList) {
d.Lock()
defer d.Unlock()
d.results[hash] = dnfResults{expire: time.Now(), pkgs: pkgs}
}

View file

@ -1,655 +0,0 @@
// Package dnfjson is an interface to the dnf-json Python script that is
// packaged with the osbuild-composer project. The core component of this
// package is the Solver type. The Solver can be configured with
// distribution-specific values (platform ID, architecture, and version
// information) and provides methods for dependency resolution (Depsolve) and
// retrieving a full list of repository package metadata (FetchMetadata).
//
// Alternatively, a BaseSolver can be created which represents an un-configured
// Solver. This type can't be used for depsolving, but can be used to create
// configured Solver instances sharing the same cache directory.
//
// This package relies on the types defined in rpmmd to describe RPM package
// metadata.
package dnfjson
import (
"bytes"
"crypto/sha256"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"time"
"github.com/osbuild/images/pkg/rhsm"
"github.com/osbuild/images/pkg/rpmmd"
)
// BaseSolver defines the basic solver configuration without platform
// information. It can be used to create configured Solver instances with the
// NewWithConfig() method. A BaseSolver maintains the global repository cache
// directory.
type BaseSolver struct {
// Cache information
cache *rpmCache
// Path to the dnf-json binary and optional args (default: "/usr/libexec/osbuild-composer/dnf-json")
dnfJsonCmd []string
resultCache *dnfCache
}
// Create a new unconfigured BaseSolver (without platform information). It can
// be used to create configured Solver instances with the NewWithConfig()
// method.
func NewBaseSolver(cacheDir string) *BaseSolver {
return &BaseSolver{
cache: newRPMCache(cacheDir, 1024*1024*1024), // 1 GiB
dnfJsonCmd: []string{"/usr/libexec/osbuild-composer/dnf-json"},
resultCache: NewDNFCache(60 * time.Second),
}
}
// SetMaxCacheSize sets the maximum size for the global repository metadata
// cache. This is the maximum size of the cache after a CleanCache()
// call. Cache cleanup is never performed automatically.
func (s *BaseSolver) SetMaxCacheSize(size uint64) {
s.cache.maxSize = size
}
// SetDNFJSONPath sets the path to the dnf-json binary and optionally any command line arguments.
func (s *BaseSolver) SetDNFJSONPath(cmd string, args ...string) {
s.dnfJsonCmd = append([]string{cmd}, args...)
}
// NewWithConfig initialises a Solver with the platform information and the
// BaseSolver's subscription info, cache directory, and dnf-json path.
// Also loads system subscription information.
func (bs *BaseSolver) NewWithConfig(modulePlatformID, releaseVer, arch, distro string) *Solver {
s := new(Solver)
s.BaseSolver = *bs
s.modulePlatformID = modulePlatformID
s.arch = arch
s.releaseVer = releaseVer
s.distro = distro
subs, _ := rhsm.LoadSystemSubscriptions()
s.subscriptions = subs
return s
}
// CleanCache deletes the least recently used repository metadata caches until
// the total size of the cache falls below the configured maximum size (see
// SetMaxCacheSize()).
func (bs *BaseSolver) CleanCache() error {
bs.resultCache.CleanCache()
return bs.cache.shrink()
}
// Solver is configured with system information in order to resolve
// dependencies for RPM packages using DNF.
type Solver struct {
BaseSolver
// Platform ID, e.g., "platform:el8"
modulePlatformID string
// System architecture
arch string
// Release version of the distro. This is used in repo files on the host
// system and required for subscription support.
releaseVer string
// Full distribution string, eg. fedora-38, used to create separate dnf cache directories
// for each distribution.
distro string
subscriptions *rhsm.Subscriptions
}
// Create a new Solver with the given configuration. Initialising a Solver also loads system subscription information.
func NewSolver(modulePlatformID, releaseVer, arch, distro, cacheDir string) *Solver {
s := NewBaseSolver(cacheDir)
return s.NewWithConfig(modulePlatformID, releaseVer, arch, distro)
}
// GetCacheDir returns a distro specific rpm cache directory
// It ensures that the distro name is below the root cache directory, and if there is
// a problem it returns the root cache intead of an error.
func (s *Solver) GetCacheDir() string {
b := filepath.Base(s.distro)
if b == "." || b == "/" {
return s.cache.root
}
return filepath.Join(s.cache.root, s.distro)
}
// Depsolve the list of required package sets with explicit excludes using
// their associated repositories. Each package set is depsolved as a separate
// transactions in a chain. It returns a list of all packages (with solved
// dependencies) that will be installed into the system.
func (s *Solver) Depsolve(pkgSets []rpmmd.PackageSet) ([]rpmmd.PackageSpec, error) {
req, repoMap, err := s.makeDepsolveRequest(pkgSets)
if err != nil {
return nil, err
}
// get non-exclusive read lock
s.cache.locker.RLock()
defer s.cache.locker.RUnlock()
output, err := run(s.dnfJsonCmd, req)
if err != nil {
return nil, err
}
// touch repos to now
now := time.Now().Local()
for _, r := range repoMap {
// ignore errors
_ = s.cache.touchRepo(r.Hash(), now)
}
s.cache.updateInfo()
var result packageSpecs
if err := json.Unmarshal(output, &result); err != nil {
return nil, err
}
return result.toRPMMD(repoMap), nil
}
// FetchMetadata returns the list of all the available packages in repos and
// their info.
func (s *Solver) FetchMetadata(repos []rpmmd.RepoConfig) (rpmmd.PackageList, error) {
req, err := s.makeDumpRequest(repos)
if err != nil {
return nil, err
}
// get non-exclusive read lock
s.cache.locker.RLock()
defer s.cache.locker.RUnlock()
// Is this cached?
if pkgs, ok := s.resultCache.Get(req.Hash()); ok {
return pkgs, nil
}
result, err := run(s.dnfJsonCmd, req)
if err != nil {
return nil, err
}
// touch repos to now
now := time.Now().Local()
for _, r := range repos {
// ignore errors
_ = s.cache.touchRepo(r.Hash(), now)
}
s.cache.updateInfo()
var pkgs rpmmd.PackageList
if err := json.Unmarshal(result, &pkgs); err != nil {
return nil, err
}
sortID := func(pkg rpmmd.Package) string {
return fmt.Sprintf("%s-%s-%s", pkg.Name, pkg.Version, pkg.Release)
}
sort.Slice(pkgs, func(i, j int) bool {
return sortID(pkgs[i]) < sortID(pkgs[j])
})
// Cache the results
s.resultCache.Store(req.Hash(), pkgs)
return pkgs, nil
}
// SearchMetadata searches for packages and returns a list of the info for matches.
func (s *Solver) SearchMetadata(repos []rpmmd.RepoConfig, packages []string) (rpmmd.PackageList, error) {
req, err := s.makeSearchRequest(repos, packages)
if err != nil {
return nil, err
}
// get non-exclusive read lock
s.cache.locker.RLock()
defer s.cache.locker.RUnlock()
// Is this cached?
if pkgs, ok := s.resultCache.Get(req.Hash()); ok {
return pkgs, nil
}
result, err := run(s.dnfJsonCmd, req)
if err != nil {
return nil, err
}
// touch repos to now
now := time.Now().Local()
for _, r := range repos {
// ignore errors
_ = s.cache.touchRepo(r.Hash(), now)
}
s.cache.updateInfo()
var pkgs rpmmd.PackageList
if err := json.Unmarshal(result, &pkgs); err != nil {
return nil, err
}
sortID := func(pkg rpmmd.Package) string {
return fmt.Sprintf("%s-%s-%s", pkg.Name, pkg.Version, pkg.Release)
}
sort.Slice(pkgs, func(i, j int) bool {
return sortID(pkgs[i]) < sortID(pkgs[j])
})
// Cache the results
s.resultCache.Store(req.Hash(), pkgs)
return pkgs, nil
}
func (s *Solver) reposFromRPMMD(rpmRepos []rpmmd.RepoConfig) ([]repoConfig, error) {
dnfRepos := make([]repoConfig, len(rpmRepos))
for idx, rr := range rpmRepos {
dr := repoConfig{
ID: rr.Hash(),
Name: rr.Name,
BaseURLs: rr.BaseURLs,
Metalink: rr.Metalink,
MirrorList: rr.MirrorList,
GPGKeys: rr.GPGKeys,
MetadataExpire: rr.MetadataExpire,
repoHash: rr.Hash(),
}
if rr.CheckGPG != nil {
dr.CheckGPG = *rr.CheckGPG
}
if rr.CheckRepoGPG != nil {
dr.CheckRepoGPG = *rr.CheckRepoGPG
}
if rr.IgnoreSSL != nil {
dr.IgnoreSSL = *rr.IgnoreSSL
}
if rr.RHSM {
if s.subscriptions == nil {
return nil, fmt.Errorf("This system does not have any valid subscriptions. Subscribe it before specifying rhsm: true in sources.")
}
secrets, err := s.subscriptions.GetSecretsForBaseurl(rr.BaseURLs, s.arch, s.releaseVer)
if err != nil {
return nil, fmt.Errorf("RHSM secrets not found on the host for this baseurl: %s", rr.BaseURLs)
}
dr.SSLCACert = secrets.SSLCACert
dr.SSLClientKey = secrets.SSLClientKey
dr.SSLClientCert = secrets.SSLClientCert
}
dnfRepos[idx] = dr
}
return dnfRepos, nil
}
// Repository configuration for resolving dependencies for a set of packages. A
// Solver needs at least one RPM repository configured to be able to depsolve.
type repoConfig struct {
ID string `json:"id"`
Name string `json:"name,omitempty"`
BaseURLs []string `json:"baseurl,omitempty"`
Metalink string `json:"metalink,omitempty"`
MirrorList string `json:"mirrorlist,omitempty"`
GPGKeys []string `json:"gpgkeys,omitempty"`
CheckGPG bool `json:"gpgcheck"`
CheckRepoGPG bool `json:"check_repogpg"`
IgnoreSSL bool `json:"ignoressl"`
SSLCACert string `json:"sslcacert,omitempty"`
SSLClientKey string `json:"sslclientkey,omitempty"`
SSLClientCert string `json:"sslclientcert,omitempty"`
MetadataExpire string `json:"metadata_expire,omitempty"`
// set the repo hass from `rpmmd.RepoConfig.Hash()` function
// rather than re-calculating it
repoHash string
}
// use the hash calculated by the `rpmmd.RepoConfig.Hash()`
// function rather than re-implementing the same code
func (r *repoConfig) Hash() string {
return r.repoHash
}
// Helper function for creating a depsolve request payload.
// The request defines a sequence of transactions, each depsolving one of the
// elements of `pkgSets` in the order they appear. The `repoConfigs` are used
// as the base repositories for all transactions. The extra repository configs
// in `pkgsetsRepos` are used for each of the `pkgSets` with matching index.
// The length of `pkgsetsRepos` must match the length of `pkgSets` or be empty
// (nil or empty slice).
//
// NOTE: Due to implementation limitations of DNF and dnf-json, each package set
// in the chain must use all of the repositories used by its predecessor.
// An error is returned if this requirement is not met.
func (s *Solver) makeDepsolveRequest(pkgSets []rpmmd.PackageSet) (*Request, map[string]rpmmd.RepoConfig, error) {
// dedupe repository configurations but maintain order
// the order in which repositories are added to the request affects the
// order of the dependencies in the result
repos := make([]rpmmd.RepoConfig, 0)
rpmRepoMap := make(map[string]rpmmd.RepoConfig)
for _, ps := range pkgSets {
for _, repo := range ps.Repositories {
id := repo.Hash()
if _, ok := rpmRepoMap[id]; !ok {
rpmRepoMap[id] = repo
repos = append(repos, repo)
}
}
}
transactions := make([]transactionArgs, len(pkgSets))
for dsIdx, pkgSet := range pkgSets {
transactions[dsIdx] = transactionArgs{
PackageSpecs: pkgSet.Include,
ExcludeSpecs: pkgSet.Exclude,
InstallWeakDeps: pkgSet.InstallWeakDeps,
}
for _, jobRepo := range pkgSet.Repositories {
transactions[dsIdx].RepoIDs = append(transactions[dsIdx].RepoIDs, jobRepo.Hash())
}
// If more than one transaction, ensure that the transaction uses
// all of the repos from its predecessor
if dsIdx > 0 {
prevRepoIDs := transactions[dsIdx-1].RepoIDs
if len(transactions[dsIdx].RepoIDs) < len(prevRepoIDs) {
return nil, nil, fmt.Errorf("chained packageSet %d does not use all of the repos used by its predecessor", dsIdx)
}
for idx, repoID := range prevRepoIDs {
if repoID != transactions[dsIdx].RepoIDs[idx] {
return nil, nil, fmt.Errorf("chained packageSet %d does not use all of the repos used by its predecessor", dsIdx)
}
}
}
}
dnfRepoMap, err := s.reposFromRPMMD(repos)
if err != nil {
return nil, nil, err
}
args := arguments{
Repos: dnfRepoMap,
Transactions: transactions,
}
req := Request{
Command: "depsolve",
ModulePlatformID: s.modulePlatformID,
Arch: s.arch,
CacheDir: s.GetCacheDir(),
Arguments: args,
}
return &req, rpmRepoMap, nil
}
// Helper function for creating a dump request payload
func (s *Solver) makeDumpRequest(repos []rpmmd.RepoConfig) (*Request, error) {
dnfRepos, err := s.reposFromRPMMD(repos)
if err != nil {
return nil, err
}
req := Request{
Command: "dump",
ModulePlatformID: s.modulePlatformID,
Arch: s.arch,
CacheDir: s.GetCacheDir(),
Arguments: arguments{
Repos: dnfRepos,
},
}
return &req, nil
}
// Helper function for creating a search request payload
func (s *Solver) makeSearchRequest(repos []rpmmd.RepoConfig, packages []string) (*Request, error) {
dnfRepos, err := s.reposFromRPMMD(repos)
if err != nil {
return nil, err
}
req := Request{
Command: "search",
ModulePlatformID: s.modulePlatformID,
Arch: s.arch,
CacheDir: s.GetCacheDir(),
Arguments: arguments{
Repos: dnfRepos,
Search: searchArgs{
Packages: packages,
},
},
}
return &req, nil
}
// convert internal a list of PackageSpecs to the rpmmd equivalent and attach
// key and subscription information based on the repository configs.
func (pkgs packageSpecs) toRPMMD(repos map[string]rpmmd.RepoConfig) []rpmmd.PackageSpec {
rpmDependencies := make([]rpmmd.PackageSpec, len(pkgs))
for i, dep := range pkgs {
repo, ok := repos[dep.RepoID]
if !ok {
panic("dependency repo ID not found in repositories")
}
dep := pkgs[i]
rpmDependencies[i].Name = dep.Name
rpmDependencies[i].Epoch = dep.Epoch
rpmDependencies[i].Version = dep.Version
rpmDependencies[i].Release = dep.Release
rpmDependencies[i].Arch = dep.Arch
rpmDependencies[i].RemoteLocation = dep.RemoteLocation
rpmDependencies[i].Checksum = dep.Checksum
if repo.CheckGPG != nil {
rpmDependencies[i].CheckGPG = *repo.CheckGPG
}
if repo.IgnoreSSL != nil {
rpmDependencies[i].IgnoreSSL = *repo.IgnoreSSL
}
if repo.RHSM {
rpmDependencies[i].Secrets = "org.osbuild.rhsm"
}
}
return rpmDependencies
}
// Request command and arguments for dnf-json
type Request struct {
// Command should be either "depsolve" or "dump"
Command string `json:"command"`
// Platform ID, e.g., "platform:el8"
ModulePlatformID string `json:"module_platform_id"`
// System architecture
Arch string `json:"arch"`
// Cache directory for the DNF metadata
CacheDir string `json:"cachedir"`
// Arguments for the action defined by Command
Arguments arguments `json:"arguments"`
}
// Hash returns a hash of the unique aspects of the Request
//
//nolint:errcheck
func (r *Request) Hash() string {
h := sha256.New()
h.Write([]byte(r.Command))
h.Write([]byte(r.ModulePlatformID))
h.Write([]byte(r.Arch))
for _, repo := range r.Arguments.Repos {
h.Write([]byte(repo.Hash()))
}
h.Write([]byte(fmt.Sprintf("%T", r.Arguments.Search.Latest)))
h.Write([]byte(strings.Join(r.Arguments.Search.Packages, "")))
return fmt.Sprintf("%x", h.Sum(nil))
}
// arguments for a dnf-json request
type arguments struct {
// Repositories to use for depsolving
Repos []repoConfig `json:"repos"`
// Search terms to use with search command
Search searchArgs `json:"search"`
// Depsolve package sets and repository mappings for this request
Transactions []transactionArgs `json:"transactions"`
}
type searchArgs struct {
// Only include latest NEVRA when true
Latest bool `json:"latest"`
// List of package name globs to search for
// If it has '*' it is passed to dnf glob search, if it has *name* it is passed
// to substr matching, and if it has neither an exact match is expected.
Packages []string `json:"packages"`
}
type transactionArgs struct {
// Packages to depsolve
PackageSpecs []string `json:"package-specs"`
// Packages to exclude from results
ExcludeSpecs []string `json:"exclude-specs"`
// IDs of repositories to use for this depsolve
RepoIDs []string `json:"repo-ids"`
// If we want weak deps for this depsolve
InstallWeakDeps bool `json:"install_weak_deps"`
}
type packageSpecs []PackageSpec
// Package specification
type PackageSpec struct {
Name string `json:"name"`
Epoch uint `json:"epoch"`
Version string `json:"version,omitempty"`
Release string `json:"release,omitempty"`
Arch string `json:"arch,omitempty"`
RepoID string `json:"repo_id,omitempty"`
Path string `json:"path,omitempty"`
RemoteLocation string `json:"remote_location,omitempty"`
Checksum string `json:"checksum,omitempty"`
Secrets string `json:"secrets,omitempty"`
}
// dnf-json error structure
type Error struct {
Kind string `json:"kind"`
Reason string `json:"reason"`
}
func (err Error) Error() string {
return fmt.Sprintf("DNF error occurred: %s: %s", err.Kind, err.Reason)
}
// parseError parses the response from dnf-json into the Error type and appends
// the name and URL of a repository to all detected repository IDs in the
// message.
func parseError(data []byte, repos []repoConfig) Error {
var e Error
if err := json.Unmarshal(data, &e); err != nil {
// dumping the error into the Reason can get noisy, but it's good for troubleshooting
return Error{
Kind: "InternalError",
Reason: fmt.Sprintf("Failed to unmarshal dnf-json error output %q: %s", string(data), err.Error()),
}
}
// append to any instance of a repository ID the URL (or metalink, mirrorlist, etc)
for _, repo := range repos {
idstr := fmt.Sprintf("'%s'", repo.ID)
var nameURL string
if len(repo.BaseURLs) > 0 {
nameURL = strings.Join(repo.BaseURLs, ",")
} else if len(repo.Metalink) > 0 {
nameURL = repo.Metalink
} else if len(repo.MirrorList) > 0 {
nameURL = repo.MirrorList
}
if len(repo.Name) > 0 {
nameURL = fmt.Sprintf("%s: %s", repo.Name, nameURL)
}
e.Reason = strings.Replace(e.Reason, idstr, fmt.Sprintf("%s [%s]", idstr, nameURL), -1)
}
return e
}
func ParseError(data []byte) Error {
var e Error
if err := json.Unmarshal(data, &e); err != nil {
// dumping the error into the Reason can get noisy, but it's good for troubleshooting
return Error{
Kind: "InternalError",
Reason: fmt.Sprintf("Failed to unmarshal dnf-json error output %q: %s", string(data), err.Error()),
}
}
return e
}
func run(dnfJsonCmd []string, req *Request) ([]byte, error) {
if len(dnfJsonCmd) == 0 {
return nil, fmt.Errorf("dnf-json command undefined")
}
ex := dnfJsonCmd[0]
args := make([]string, len(dnfJsonCmd)-1)
if len(dnfJsonCmd) > 1 {
args = dnfJsonCmd[1:]
}
cmd := exec.Command(ex, args...)
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
cmd.Stderr = os.Stderr
stdout := new(bytes.Buffer)
cmd.Stdout = stdout
err = cmd.Start()
if err != nil {
return nil, err
}
err = json.NewEncoder(stdin).Encode(req)
if err != nil {
return nil, err
}
stdin.Close()
err = cmd.Wait()
output := stdout.Bytes()
if runError, ok := err.(*exec.ExitError); ok && runError.ExitCode() != 0 {
return nil, parseError(output, req.Arguments.Repos)
}
return output, nil
}

View file

@ -1,225 +0,0 @@
// dnfjson_mock provides data and methods for testing the dnfjson package.
package dnfjson_mock
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/osbuild/images/internal/dnfjson"
"github.com/osbuild/images/pkg/rpmmd"
)
func generatePackageList() rpmmd.PackageList {
baseTime, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
if err != nil {
panic(err)
}
var packageList rpmmd.PackageList
for i := 0; i < 22; i++ {
basePackage := rpmmd.Package{
Name: fmt.Sprintf("package%d", i),
Summary: fmt.Sprintf("pkg%d sum", i),
Description: fmt.Sprintf("pkg%d desc", i),
URL: fmt.Sprintf("https://pkg%d.example.com", i),
Epoch: 0,
Version: fmt.Sprintf("%d.0", i),
Release: fmt.Sprintf("%d.fc30", i),
Arch: "x86_64",
BuildTime: baseTime.AddDate(0, i, 0),
License: "MIT",
}
secondBuild := basePackage
secondBuild.Version = fmt.Sprintf("%d.1", i)
secondBuild.BuildTime = basePackage.BuildTime.AddDate(0, 0, 1)
packageList = append(packageList, basePackage, secondBuild)
}
return packageList
}
// generateSearchResults creates results for use with the dnfjson search command
// which is used for listing a subset of modules and projects.
//
// The map key is a comma-separated list of the packages requested
// If no packages are included it returns all 22 packages, same as the mock dump
//
// nonexistingpkg returns an empty list
// badpackage1 returns a fetch error, same as when the package name is unknown
// baddepsolve returns package1, the test then tries to depsolve package1 using BadDepsolve()
// wich will return a depsolve error.
func generateSearchResults() map[string]interface{} {
allPackages := generatePackageList()
// This includes package16, package2, package20, and package21
var wildcardResults rpmmd.PackageList
wildcardResults = append(wildcardResults, allPackages[32], allPackages[33])
wildcardResults = append(wildcardResults, allPackages[4], allPackages[5])
for i := 40; i < 44; i++ {
wildcardResults = append(wildcardResults, allPackages[i])
}
fetchError := dnfjson.Error{
Kind: "FetchError",
Reason: "There was a problem when fetching packages.",
}
return map[string]interface{}{
"": allPackages,
"*": allPackages,
"nonexistingpkg": rpmmd.PackageList{},
"package1": rpmmd.PackageList{allPackages[2], allPackages[3]},
"package1,package2": rpmmd.PackageList{allPackages[2], allPackages[3], allPackages[4], allPackages[5]},
"package2*,package16": wildcardResults,
"package16": rpmmd.PackageList{allPackages[32], allPackages[33]},
"badpackage1": fetchError,
"baddepsolve": rpmmd.PackageList{allPackages[2], allPackages[3]},
}
}
func createBaseDepsolveFixture() []dnfjson.PackageSpec {
return []dnfjson.PackageSpec{
{
Name: "dep-package3",
Epoch: 7,
Version: "3.0.3",
Release: "1.fc30",
Arch: "x86_64",
RepoID: "REPOID", // added by mock-dnf-json
},
{
Name: "dep-package1",
Epoch: 0,
Version: "1.33",
Release: "2.fc30",
Arch: "x86_64",
RepoID: "REPOID", // added by mock-dnf-json
},
{
Name: "dep-package2",
Epoch: 0,
Version: "2.9",
Release: "1.fc30",
Arch: "x86_64",
RepoID: "REPOID", // added by mock-dnf-json
},
}
}
// BaseDeps is the expected list of dependencies (as rpmmd.PackageSpec) from
// the Base ResponseGenerator
func BaseDeps() []rpmmd.PackageSpec {
return []rpmmd.PackageSpec{
{
Name: "dep-package3",
Epoch: 7,
Version: "3.0.3",
Release: "1.fc30",
Arch: "x86_64",
CheckGPG: true,
},
{
Name: "dep-package1",
Epoch: 0,
Version: "1.33",
Release: "2.fc30",
Arch: "x86_64",
CheckGPG: true,
},
{
Name: "dep-package2",
Epoch: 0,
Version: "2.9",
Release: "1.fc30",
Arch: "x86_64",
CheckGPG: true,
},
}
}
type ResponseGenerator func(string) string
func Base(tmpdir string) string {
data := map[string]interface{}{
"depsolve": createBaseDepsolveFixture(),
"dump": generatePackageList(),
"search": generateSearchResults(),
}
path := filepath.Join(tmpdir, "base.json")
write(data, path)
return path
}
func NonExistingPackage(tmpdir string) string {
deps := dnfjson.Error{
Kind: "MarkingErrors",
Reason: "Error occurred when marking packages for installation: Problems in request:\nmissing packages: fash",
}
data := map[string]interface{}{
"depsolve": deps,
}
path := filepath.Join(tmpdir, "notexist.json")
write(data, path)
return path
}
func BadDepsolve(tmpdir string) string {
deps := dnfjson.Error{
Kind: "DepsolveError",
Reason: "There was a problem depsolving ['go2rpm']: \n Problem: conflicting requests\n - nothing provides askalono-cli needed by go2rpm-1-4.fc31.noarch",
}
data := map[string]interface{}{
"depsolve": deps,
"dump": generatePackageList(),
"search": generateSearchResults(),
}
path := filepath.Join(tmpdir, "baddepsolve.json")
write(data, path)
return path
}
func BadFetch(tmpdir string) string {
deps := dnfjson.Error{
Kind: "DepsolveError",
Reason: "There was a problem depsolving ['go2rpm']: \n Problem: conflicting requests\n - nothing provides askalono-cli needed by go2rpm-1-4.fc31.noarch",
}
pkgs := dnfjson.Error{
Kind: "FetchError",
Reason: "There was a problem when fetching packages.",
}
data := map[string]interface{}{
"depsolve": deps,
"dump": pkgs,
"search": generateSearchResults(),
}
path := filepath.Join(tmpdir, "badfetch.json")
write(data, path)
return path
}
func marshal(data interface{}) []byte {
jdata, err := json.Marshal(data)
if err != nil {
panic(err)
}
return jdata
}
func write(data interface{}, path string) {
fp, err := os.Create(path)
if err != nil {
panic(err)
}
if _, err := fp.Write(marshal(data)); err != nil {
panic(err)
}
}

View file

@ -211,10 +211,11 @@ func (c *Customizations) GetUsers() []UserCustomization {
// prepend sshkey for backwards compat (overridden by users)
if len(c.SSHKey) > 0 {
for _, c := range c.SSHKey {
for idx := range c.SSHKey {
keyc := c.SSHKey[idx]
users = append(users, UserCustomization{
Name: c.User,
Key: &c.Key,
Name: keyc.User,
Key: &keyc.Key,
})
}
}

View file

@ -17,6 +17,7 @@ type PartitionTable struct {
SectorSize uint64 // Sector size in bytes
ExtraPadding uint64 // Extra space at the end of the partition table (sectors)
StartOffset uint64 // Starting offset of the first partition in the table (Mb)
}
func NewPartitionTable(basePT *PartitionTable, mountpoints []blueprint.FilesystemCustomization, imageSize uint64, lvmify bool, requiredSizes map[string]uint64, rng *rand.Rand) (*PartitionTable, error) {
@ -77,6 +78,7 @@ func (pt *PartitionTable) Clone() Entity {
Partitions: make([]Partition, len(pt.Partitions)),
SectorSize: pt.SectorSize,
ExtraPadding: pt.ExtraPadding,
StartOffset: pt.StartOffset,
}
for idx, partition := range pt.Partitions {
@ -364,6 +366,7 @@ func (pt *PartitionTable) relayout(size uint64) uint64 {
}
start := pt.AlignUp(header)
start += pt.StartOffset
size = pt.AlignUp(size)
var rootIdx = -1

View file

@ -8,6 +8,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
@ -31,8 +32,11 @@ const (
// blueprint package set name
blueprintPkgsKey = "blueprint"
//Kernel options for ami, qcow2, openstack, vhd and vmdk types
defaultKernelOptions = "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0"
//Default kernel command line
defaultKernelOptions = "ro"
// Added kernel command line options for ami, qcow2, openstack, vhd and vmdk types
cloudKernelOptions = "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0"
)
var (
@ -61,6 +65,13 @@ var (
"dbus-parsec",
}
minimalRawServices = []string{
"NetworkManager.service",
"firewalld.service",
"initial-setup.service",
"sshd.service",
}
// Image Definitions
imageInstallerImgType = imageType{
name: "image-installer",
@ -237,7 +248,7 @@ var (
"cloud-init-local.service",
},
},
kernelOptions: defaultKernelOptions,
kernelOptions: cloudKernelOptions,
bootable: true,
defaultSize: 5 * common.GibiByte,
image: diskImage,
@ -247,35 +258,6 @@ var (
basePartitionTables: defaultBasePartitionTables,
}
vhdImgType = imageType{
name: "vhd",
filename: "disk.vhd",
mimeType: "application/x-vhd",
packageSets: map[string]packageSetFunc{
osPkgsKey: vhdCommonPackageSet,
},
defaultImageConfig: &distro.ImageConfig{
Locale: common.ToPtr("en_US.UTF-8"),
EnabledServices: []string{
"sshd",
},
DefaultTarget: common.ToPtr("multi-user.target"),
DisabledServices: []string{
"proc-sys-fs-binfmt_misc.mount",
"loadmodules.service",
},
},
kernelOptions: defaultKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "vpc"},
exports: []string{"vpc"},
basePartitionTables: defaultBasePartitionTables,
environment: &environment.Azure{},
}
vmdkDefaultImageConfig = &distro.ImageConfig{
Locale: common.ToPtr("en_US.UTF-8"),
EnabledServices: []string{
@ -294,7 +276,7 @@ var (
osPkgsKey: vmdkCommonPackageSet,
},
defaultImageConfig: vmdkDefaultImageConfig,
kernelOptions: defaultKernelOptions,
kernelOptions: cloudKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: diskImage,
@ -312,7 +294,7 @@ var (
osPkgsKey: vmdkCommonPackageSet,
},
defaultImageConfig: vmdkDefaultImageConfig,
kernelOptions: defaultKernelOptions,
kernelOptions: cloudKernelOptions,
bootable: true,
defaultSize: 2 * common.GibiByte,
image: diskImage,
@ -375,6 +357,12 @@ var (
packageSets: map[string]packageSetFunc{
osPkgsKey: minimalrpmPackageSet,
},
defaultImageConfig: &distro.ImageConfig{
EnabledServices: minimalRawServices,
// NOTE: temporary workaround for a bug in initial-setup that
// requires a kickstart file in the root directory.
Files: []*fsnode.File{initialSetupKickstart()},
},
rpmOstree: false,
kernelOptions: defaultKernelOptions,
bootable: true,
@ -383,7 +371,7 @@ var (
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: defaultBasePartitionTables,
basePartitionTables: minimalrawPartitionTables,
}
)
@ -572,6 +560,25 @@ func newDistro(version int) distro.Distro {
openstackImgType := qcow2ImgType
openstackImgType.name = "openstack"
vhdImgType := qcow2ImgType
vhdImgType.name = "vhd"
vhdImgType.filename = "disk.vhd"
vhdImgType.mimeType = "application/x-vhd"
vhdImgType.payloadPipelines = []string{"os", "image", "vpc"}
vhdImgType.exports = []string{"vpc"}
vhdImgType.environment = &environment.Azure{}
vhdImgType.packageSets = map[string]packageSetFunc{
osPkgsKey: vhdCommonPackageSet,
}
vhdConfig := distro.ImageConfig{
SshdConfig: &osbuild.SshdConfigStageOptions{
Config: osbuild.SshdConfigConfig{
ClientAliveInterval: common.ToPtr(120),
},
},
}
vhdImgType.defaultImageConfig = vhdConfig.InheritFrom(qcow2ImgType.defaultImageConfig)
x86_64.addImageTypes(
&platform.X86{
BIOS: true,
@ -716,10 +723,12 @@ func newDistro(version int) distro.Distro {
&platform.Aarch64{
BasePlatform: platform.BasePlatform{
FirmwarePackages: []string{
"arm-image-installer", // ??
"arm-image-installer",
"bcm283x-firmware",
"iwl7260-firmware",
"uboot-images-armv8", // ??
"brcmfmac-firmware",
"iwlwifi-mvm-firmware",
"realtek-firmware",
"uboot-images-armv8",
},
},
UEFIVendor: "fedora",
@ -731,7 +740,7 @@ func newDistro(version int) distro.Distro {
liveInstallerImgType,
)
aarch64.addImageTypes(
&platform.Aarch64_IoT{
&platform.Aarch64_Fedora{
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_RAW,
},
@ -781,10 +790,18 @@ func newDistro(version int) distro.Distro {
minimalrawImgType,
)
aarch64.addImageTypes(
&platform.Aarch64{
&platform.Aarch64_Fedora{
UEFIVendor: "fedora",
BasePlatform: platform.BasePlatform{
ImageFormat: platform.FORMAT_RAW,
FirmwarePackages: []string{
"arm-image-installer",
"bcm283x-firmware",
"uboot-images-armv8",
},
},
BootFiles: [][2]string{
{"/usr/share/uboot/rpi_arm64/u-boot.bin", "/boot/efi/rpi-u-boot.bin"},
},
},
minimalrawImgType,
@ -803,8 +820,10 @@ func newDistro(version int) distro.Distro {
"grub2-tools",
"grub2-tools-extra",
"grub2-tools-minimal",
"brcmfmac-firmware",
"iwlwifi-dvm-firmware",
"iwlwifi-mvm-firmware",
"realtek-firmware",
"microcode_ctl",
"syslinux",
"syslinux-nonlinux",
@ -826,6 +845,10 @@ func newDistro(version int) distro.Distro {
"grub2-tools",
"grub2-tools-extra",
"grub2-tools-minimal",
"brcmfmac-firmware",
"iwlwifi-dvm-firmware",
"iwlwifi-mvm-firmware",
"realtek-firmware",
"uboot-images-armv8",
},
},

View file

@ -6,6 +6,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/fdo"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/ignition"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/internal/users"
@ -224,6 +225,9 @@ func osCustomizations(
osc.PwQuality = imageConfig.PwQuality
osc.WSLConfig = imageConfig.WSLConfig
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)
return osc
}
@ -684,3 +688,13 @@ func makeOSTreePayloadCommit(options *ostree.ImageOptions, defaultRef string) (o
RHSM: options.RHSM,
}, nil
}
// initialSetupKickstart returns the File configuration for a kickstart file
// that's required to enable initial-setup to run on first boot.
func initialSetupKickstart() *fsnode.File {
file, err := fsnode.NewFile("/root/anaconda-ks.cfg", nil, "root", "root", []byte("# Run initial-setup on first boot\n# Created by osbuild\nfirstboot --reconfig\n"))
if err != nil {
panic(err)
}
return file
}

View file

@ -11,13 +11,12 @@ import (
"github.com/osbuild/images/pkg/rpmmd"
)
func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
func cloudBaseSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@Fedora Cloud Server",
"chrony", // not mentioned in the kickstart, anaconda pulls it when setting the timezone
"langpacks-en",
"qemu-guest-agent",
},
Exclude: []string{
"dracut-config-rescue",
@ -29,25 +28,22 @@ func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
}
}
func qcow2CommonPackageSet(t *imageType) rpmmd.PackageSet {
return cloudBaseSet(t).Append(
rpmmd.PackageSet{
Include: []string{
"qemu-guest-agent",
},
})
}
func vhdCommonPackageSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@core",
"chrony",
"langpacks-en",
"net-tools",
"ntfsprogs",
"libxcrypt-compat",
"initscripts",
"glibc-all-langpacks",
},
Exclude: []string{
"dracut-config-rescue",
"geolite2-city",
"geolite2-country",
"zram-generator-defaults",
},
}
return cloudBaseSet(t).Append(
rpmmd.PackageSet{
Include: []string{
"WALinuxAgent",
},
})
}
func vmdkCommonPackageSet(t *imageType) rpmmd.PackageSet {
@ -539,6 +535,12 @@ func minimalrpmPackageSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@core",
"initial-setup",
"libxkbcommon",
"NetworkManager-wifi",
"brcmfmac-firmware",
"realtek-firmware",
"iwlwifi-mvm-firmware",
},
}
}

View file

@ -108,6 +108,101 @@ var defaultBasePartitionTables = distro.BasePartitionTableMap{
},
}
var minimalrawPartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
StartOffset: 8 * common.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "0xc1748067",
Type: "dos",
StartOffset: 8 * common.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: "06",
Bootable: true,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: "83",
Payload: &disk.Filesystem{
Type: "ext4",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: "83",
Payload: &disk.Filesystem{
Type: "ext4",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
var iotBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",

View file

@ -4,6 +4,7 @@ import (
"fmt"
"reflect"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/shell"
"github.com/osbuild/images/pkg/osbuild"
"github.com/osbuild/images/pkg/subscription"
@ -62,6 +63,9 @@ type ImageConfig struct {
UdevRules *osbuild.UdevRulesStageOptions
GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions
WSLConfig *osbuild.WSLConfStageOptions
Files []*fsnode.File
Directories []*fsnode.Directory
}
// InheritFrom inherits unset values from the provided parent configuration and

View file

@ -213,6 +213,9 @@ func osCustomizations(
osc.UdevRules = imageConfig.UdevRules
osc.GCPGuestAgentConfig = imageConfig.GCPGuestAgentConfig
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)
return osc
}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/platform"
"github.com/osbuild/images/pkg/rpmmd"
@ -148,8 +149,14 @@ func minimalRawImgType(rd distribution) imageType {
packageSets: map[string]packageSetFunc{
osPkgsKey: minimalrpmPackageSet,
},
defaultImageConfig: &distro.ImageConfig{
EnabledServices: minimalrawServices(rd),
// NOTE: temporary workaround for a bug in initial-setup that
// requires a kickstart file in the root directory.
Files: []*fsnode.File{initialSetupKickstart()},
},
rpmOstree: false,
kernelOptions: "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0",
kernelOptions: "ro",
bootable: true,
defaultSize: 2 * common.GibiByte,
image: diskImage,
@ -401,3 +408,10 @@ func edgeServices(rd distribution) []string {
return edgeServices
}
func minimalrawServices(rd distribution) []string {
// Common Services
var minimalrawServices = []string{"NetworkManager.service", "firewalld.service", "sshd.service", "initial-setup.service"}
return minimalrawServices
}

View file

@ -5,6 +5,7 @@ import (
"math/rand"
"github.com/osbuild/images/internal/fdo"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/ignition"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/internal/users"
@ -251,6 +252,9 @@ func osCustomizations(
osc.GCPGuestAgentConfig = imageConfig.GCPGuestAgentConfig
osc.WSLConfig = imageConfig.WSLConfig
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)
return osc
}
@ -599,3 +603,13 @@ func makeOSTreePayloadCommit(options *ostree.ImageOptions, defaultRef string) (o
RHSM: options.RHSM,
}, nil
}
// initialSetupKickstart returns the File configuration for a kickstart file
// that's required to enable initial-setup to run on first boot.
func initialSetupKickstart() *fsnode.File {
file, err := fsnode.NewFile("/root/anaconda-ks.cfg", nil, "root", "root", []byte("# Run initial-setup on first boot\n# Created by osbuild\nfirstboot --reconfig\nlang en_US.UTF-8\n"))
if err != nil {
panic(err)
}
return file
}

View file

@ -78,6 +78,11 @@ func minimalrpmPackageSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@core",
"initial-setup",
"libxkbcommon",
"NetworkManager-wifi",
"iwl7260-firmware",
"iwl3160-firmware",
},
}
}

View file

@ -5,6 +5,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/environment"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/pkg/disk"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/osbuild"
@ -183,15 +184,22 @@ var (
packageSets: map[string]packageSetFunc{
osPkgsKey: minimalrpmPackageSet,
},
defaultImageConfig: &distro.ImageConfig{
EnabledServices: minimalrawServices,
SystemdUnit: systemdUnits,
// NOTE: temporary workaround for a bug in initial-setup that
// requires a kickstart file in the root directory.
Files: []*fsnode.File{initialSetupKickstart()},
},
rpmOstree: false,
kernelOptions: "ro no_timer_check console=ttyS0,115200n8 biosdevname=0 net.ifnames=0",
kernelOptions: "ro",
bootable: true,
defaultSize: 2 * common.GibiByte,
image: diskImage,
buildPipelines: []string{"build"},
payloadPipelines: []string{"os", "image", "xz"},
exports: []string{"xz"},
basePartitionTables: defaultBasePartitionTables,
basePartitionTables: minimalrawPartitionTables,
}
// Shared Services
@ -199,6 +207,9 @@ var (
// TODO(runcom): move fdo-client-linuxapp.service to presets?
"NetworkManager.service", "firewalld.service", "sshd.service", "fdo-client-linuxapp.service",
}
minimalrawServices = []string{
"NetworkManager.service", "firewalld.service", "sshd.service", "initial-setup.service",
}
//dropin to disable grub-boot-success.timer if greenboot present
systemdUnits = []*osbuild.SystemdUnitStageOptions{
{
@ -213,6 +224,102 @@ var (
},
}
// Partition tables
minimalrawPartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
StartOffset: 8 * common.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
platform.ARCH_AARCH64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
Type: "gpt",
StartOffset: 8 * common.MebiByte,
Partitions: []disk.Partition{
{
Size: 200 * common.MebiByte,
Type: disk.EFISystemPartitionGUID,
UUID: disk.EFISystemPartitionUUID,
Payload: &disk.Filesystem{
Type: "vfat",
UUID: disk.EFIFilesystemUUID,
Mountpoint: "/boot/efi",
Label: "EFI-SYSTEM",
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
FSTabFreq: 0,
FSTabPassNo: 2,
},
},
{
Size: 500 * common.MebiByte,
Type: disk.XBootLDRPartitionGUID,
UUID: disk.FilesystemDataUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Mountpoint: "/boot",
Label: "boot",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
{
Size: 2 * common.GibiByte,
Type: disk.FilesystemDataGUID,
UUID: disk.RootPartitionUUID,
Payload: &disk.Filesystem{
Type: "xfs",
Label: "root",
Mountpoint: "/",
FSTabOptions: "defaults",
FSTabFreq: 0,
FSTabPassNo: 0,
},
},
},
},
}
edgeBasePartitionTables = distro.BasePartitionTableMap{
platform.ARCH_X86_64.String(): disk.PartitionTable{
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",

View file

@ -6,6 +6,7 @@ import (
"github.com/osbuild/images/internal/common"
"github.com/osbuild/images/internal/fdo"
"github.com/osbuild/images/internal/fsnode"
"github.com/osbuild/images/internal/ignition"
"github.com/osbuild/images/internal/oscap"
"github.com/osbuild/images/internal/users"
@ -248,6 +249,9 @@ func osCustomizations(
osc.GCPGuestAgentConfig = imageConfig.GCPGuestAgentConfig
osc.WSLConfig = imageConfig.WSLConfig
osc.Files = append(osc.Files, imageConfig.Files...)
osc.Directories = append(osc.Directories, imageConfig.Directories...)
return osc
}
@ -647,3 +651,13 @@ func makeOSTreePayloadCommit(options *ostree.ImageOptions, defaultRef string) (o
RHSM: options.RHSM,
}, nil
}
// initialSetupKickstart returns the File configuration for a kickstart file
// that's required to enable initial-setup to run on first boot.
func initialSetupKickstart() *fsnode.File {
file, err := fsnode.NewFile("/root/anaconda-ks.cfg", nil, "root", "root", []byte("# Run initial-setup on first boot\n# Created by osbuild\nfirstboot --reconfig\nlang en_US.UTF-8\n"))
if err != nil {
panic(err)
}
return file
}

View file

@ -250,6 +250,11 @@ func minimalrpmPackageSet(t *imageType) rpmmd.PackageSet {
return rpmmd.PackageSet{
Include: []string{
"@core",
"initial-setup",
"libxkbcommon",
"NetworkManager-wifi",
"iwl7260-firmware",
"iwl3160-firmware",
},
}
}

View file

@ -1,14 +1,11 @@
package test_distro
import (
"crypto/sha256"
"errors"
"fmt"
"sort"
dnfjson_mock "github.com/osbuild/images/internal/mocks/dnfjson"
"github.com/osbuild/images/pkg/blueprint"
"github.com/osbuild/images/pkg/container"
"github.com/osbuild/images/pkg/distro"
"github.com/osbuild/images/pkg/distroregistry"
"github.com/osbuild/images/pkg/manifest"
@ -297,13 +294,13 @@ func (t *TestImageType) Manifest(b *blueprint.Blueprint, options distro.ImageOpt
return m, nil, nil
}
// newTestDistro returns a new instance of TestDistro with the
// NewTestDistro returns a new instance of TestDistro with the
// given name and modulePlatformID.
//
// It contains two architectures "test_arch" and "test_arch2".
// "test_arch" contains one image type "test_type".
// "test_arch2" contains two image types "test_type" and "test_type2".
func newTestDistro(name, modulePlatformID, releasever string) *TestDistro {
func NewTestDistro(name, modulePlatformID, releasever string) *TestDistro {
td := TestDistro{
name: name,
releasever: releasever,
@ -378,7 +375,7 @@ func newTestDistro(name, modulePlatformID, releasever string) *TestDistro {
// New returns new instance of TestDistro named "test-distro".
func New() *TestDistro {
return newTestDistro(TestDistroName, TestDistroModulePlatformID, TestDistroReleasever)
return NewTestDistro(TestDistroName, TestDistroModulePlatformID, TestDistroReleasever)
}
func NewRegistry() *distroregistry.Registry {
@ -392,47 +389,3 @@ func NewRegistry() *distroregistry.Registry {
registry.SetHostArchName(TestArchName)
return registry
}
// New2 returns new instance of TestDistro named "test-distro-2".
func New2() *TestDistro {
return newTestDistro(TestDistro2Name, TestDistro2ModulePlatformID, TestDistro2Releasever)
}
// ResolveContent transforms content source specs into resolved specs for serialization.
// For packages, it uses the dnfjson_mock.BaseDeps() every time, but retains
// the map keys from the input.
// For ostree commits it hashes the URL+Ref to create a checksum.
func ResolveContent(pkgs map[string][]rpmmd.PackageSet, containers map[string][]container.SourceSpec, commits map[string][]ostree.SourceSpec) (map[string][]rpmmd.PackageSpec, map[string][]container.Spec, map[string][]ostree.CommitSpec) {
pkgSpecs := make(map[string][]rpmmd.PackageSpec, len(pkgs))
for name := range pkgs {
pkgSpecs[name] = dnfjson_mock.BaseDeps()
}
containerSpecs := make(map[string][]container.Spec, len(containers))
for name := range containers {
containerSpecs[name] = make([]container.Spec, len(containers[name]))
for idx := range containers[name] {
containerSpecs[name][idx] = container.Spec{
Source: containers[name][idx].Source,
TLSVerify: containers[name][idx].TLSVerify,
LocalName: containers[name][idx].Name,
}
}
}
commitSpecs := make(map[string][]ostree.CommitSpec, len(commits))
for name := range commits {
commitSpecs[name] = make([]ostree.CommitSpec, len(commits[name]))
for idx := range commits[name] {
commitSpecs[name][idx] = ostree.CommitSpec{
Ref: commits[name][idx].Ref,
URL: commits[name][idx].URL,
Checksum: fmt.Sprintf("%x", sha256.Sum256([]byte(commits[name][idx].URL+commits[name][idx].Ref))),
}
fmt.Printf("Test distro spec: %+v\n", commitSpecs[name][idx])
}
}
return pkgSpecs, containerSpecs, commitSpecs
}

View file

@ -149,11 +149,16 @@ func (m Manifest) Serialize(packageSets map[string][]rpmmd.PackageSpec, containe
pipeline.serializeEnd()
}
sources, err := osbuild.GenSources(packages, commits, inline, containers)
if err != nil {
return nil, err
}
return json.Marshal(
osbuild.Manifest{
Version: "2",
Pipelines: pipelines,
Sources: osbuild.GenSources(packages, commits, inline, containers),
Sources: sources,
},
)
}

View file

@ -3,8 +3,14 @@ package osbuild
import (
"bytes"
"encoding/json"
"fmt"
"regexp"
"github.com/osbuild/images/pkg/rpmmd"
)
var curlDigestPattern = regexp.MustCompile(`(md5|sha1|sha256|sha384|sha512):[0-9a-f]{32,128}`)
type CurlSource struct {
Items map[string]CurlSourceItem `json:"items"`
}
@ -17,6 +23,38 @@ type CurlSourceItem interface {
isCurlSourceItem()
}
func NewCurlSource() *CurlSource {
return &CurlSource{
Items: make(map[string]CurlSourceItem),
}
}
func NewCurlPackageItem(pkg rpmmd.PackageSpec) (CurlSourceItem, error) {
if !curlDigestPattern.MatchString(pkg.Checksum) {
return nil, fmt.Errorf("curl package source item with name %q has invalid digest %q", pkg.Name, pkg.Checksum)
}
item := new(CurlSourceOptions)
item.URL = pkg.RemoteLocation
if pkg.Secrets == "org.osbuild.rhsm" {
item.Secrets = &URLSecrets{
Name: "org.osbuild.rhsm",
}
}
item.Insecure = pkg.IgnoreSSL
return item, nil
}
// AddPackage adds a pkg to the curl source to download. Will return an error
// if any of the supplied options are invalid or missing.
func (source *CurlSource) AddPackage(pkg rpmmd.PackageSpec) error {
item, err := NewCurlPackageItem(pkg)
if err != nil {
return err
}
source.Items[pkg.Checksum] = item
return nil
}
type URL string
func (URL) isCurlSourceItem() {}

View file

@ -7,6 +7,7 @@ import (
"io"
"os"
"os/exec"
"strings"
)
// Run an instance of osbuild, returning a parsed osbuild.Result.
@ -84,3 +85,18 @@ func RunOSBuild(manifest []byte, store, outputDirectory string, exports, checkpo
return &res, nil
}
// OSBuildVersion returns the version of osbuild.
func OSBuildVersion() (string, error) {
var stdoutBuffer bytes.Buffer
cmd := exec.Command("osbuild", "--version")
cmd.Stdout = &stdoutBuffer
err := cmd.Run()
if err != nil {
return "", fmt.Errorf("running osbuild failed: %v", err)
}
// osbuild --version prints the version in the form of "osbuild VERSION". Extract the version.
return strings.TrimPrefix(stdoutBuffer.String(), "osbuild "), nil
}

View file

@ -1,5 +1,7 @@
package osbuild
import "github.com/osbuild/images/pkg/ostree"
// The commits to fetch indexed their checksum
type OSTreeSource struct {
Items map[string]OSTreeSourceItem `json:"items"`
@ -23,3 +25,26 @@ type OSTreeSourceRemote struct {
type OSTreeSourceRemoteSecrets struct {
Name string `json:"name"`
}
func NewOSTreeSource() *OSTreeSource {
return &OSTreeSource{
Items: make(map[string]OSTreeSourceItem),
}
}
func NewOSTreeSourceItem(commit ostree.CommitSpec) *OSTreeSourceItem {
item := new(OSTreeSourceItem)
item.Remote.URL = commit.URL
item.Remote.ContentURL = commit.ContentURL
if commit.Secrets == "org.osbuild.rhsm.consumer" {
item.Remote.Secrets = &OSTreeSourceRemoteSecrets{
Name: "org.osbuild.rhsm.consumer",
}
}
return item
}
func (source *OSTreeSource) AddItem(commit ostree.CommitSpec) {
item := NewOSTreeSourceItem(commit)
source.Items[commit.Checksum] = *item
}

View file

@ -32,7 +32,9 @@ func NewSkopeoSourceItem(name, digest string, tlsVerify *bool) SkopeoSourceItem
TLSVerify: tlsVerify,
},
}
if err := item.validate(); err != nil {
panic(err)
}
return item
}
@ -60,14 +62,8 @@ func NewSkopeoSource() *SkopeoSource {
// if any of the supplied options are invalid or missing
func (source *SkopeoSource) AddItem(name, digest, image string, tlsVerify *bool) {
item := NewSkopeoSourceItem(name, digest, tlsVerify)
if err := item.validate(); err != nil {
panic(err)
}
if !skopeoDigestPattern.MatchString(image) {
panic("item has invalid image id")
}
source.Items[image] = item
}

View file

@ -54,44 +54,33 @@ func (sources *Sources) UnmarshalJSON(data []byte) error {
return nil
}
func GenSources(packages []rpmmd.PackageSpec, ostreeCommits []ostree.CommitSpec, inlineData []string, containers []container.Spec) Sources {
func GenSources(packages []rpmmd.PackageSpec, ostreeCommits []ostree.CommitSpec, inlineData []string, containers []container.Spec) (Sources, error) {
sources := Sources{}
curl := &CurlSource{
Items: make(map[string]CurlSourceItem),
}
for _, pkg := range packages {
item := new(CurlSourceOptions)
item.URL = pkg.RemoteLocation
if pkg.Secrets == "org.osbuild.rhsm" {
item.Secrets = &URLSecrets{
Name: "org.osbuild.rhsm",
// collect rpm package sources
if len(packages) > 0 {
curl := NewCurlSource()
for _, pkg := range packages {
err := curl.AddPackage(pkg)
if err != nil {
return nil, err
}
}
item.Insecure = pkg.IgnoreSSL
curl.Items[pkg.Checksum] = item
}
if len(curl.Items) > 0 {
sources["org.osbuild.curl"] = curl
}
ostree := &OSTreeSource{
Items: make(map[string]OSTreeSourceItem),
}
for _, commit := range ostreeCommits {
item := new(OSTreeSourceItem)
item.Remote.URL = commit.URL
item.Remote.ContentURL = commit.ContentURL
if commit.Secrets == "org.osbuild.rhsm.consumer" {
item.Remote.Secrets = &OSTreeSourceRemoteSecrets{
Name: "org.osbuild.rhsm.consumer",
}
// collect ostree commit sources
if len(ostreeCommits) > 0 {
ostree := NewOSTreeSource()
for _, commit := range ostreeCommits {
ostree.AddItem(commit)
}
if len(ostree.Items) > 0 {
sources["org.osbuild.ostree"] = ostree
}
ostree.Items[commit.Checksum] = *item
}
if len(ostree.Items) > 0 {
sources["org.osbuild.ostree"] = ostree
}
// collect inline data sources
if len(inlineData) > 0 {
ils := NewInlineSource()
for _, data := range inlineData {
@ -101,23 +90,25 @@ func GenSources(packages []rpmmd.PackageSpec, ostreeCommits []ostree.CommitSpec,
sources["org.osbuild.inline"] = ils
}
skopeo := NewSkopeoSource()
skopeoIndex := NewSkopeoIndexSource()
for _, c := range containers {
skopeo.AddItem(c.Source, c.Digest, c.ImageID, c.TLSVerify)
// collect skopeo container sources
if len(containers) > 0 {
skopeo := NewSkopeoSource()
skopeoIndex := NewSkopeoIndexSource()
for _, c := range containers {
skopeo.AddItem(c.Source, c.Digest, c.ImageID, c.TLSVerify)
// if we have a list digest, add a skopeo-index source as well
if c.ListDigest != "" {
skopeoIndex.AddItem(c.Source, c.ListDigest, c.TLSVerify)
// if we have a list digest, add a skopeo-index source as well
if c.ListDigest != "" {
skopeoIndex.AddItem(c.Source, c.ListDigest, c.TLSVerify)
}
}
if len(skopeo.Items) > 0 {
sources["org.osbuild.skopeo"] = skopeo
}
if len(skopeoIndex.Items) > 0 {
sources["org.osbuild.skopeo-index"] = skopeoIndex
}
}
if len(skopeo.Items) > 0 {
sources["org.osbuild.skopeo"] = skopeo
}
if len(skopeoIndex.Items) > 0 {
sources["org.osbuild.skopeo-index"] = skopeoIndex
}
return sources
return sources, nil
}

View file

@ -28,21 +28,21 @@ func (p *Aarch64) GetPackages() []string {
return packages
}
type Aarch64_IoT struct {
type Aarch64_Fedora struct {
BasePlatform
UEFIVendor string
BootFiles [][2]string
}
func (p *Aarch64_IoT) GetArch() Arch {
func (p *Aarch64_Fedora) GetArch() Arch {
return ARCH_AARCH64
}
func (p *Aarch64_IoT) GetUEFIVendor() string {
func (p *Aarch64_Fedora) GetUEFIVendor() string {
return p.UEFIVendor
}
func (p *Aarch64_IoT) GetPackages() []string {
func (p *Aarch64_Fedora) GetPackages() []string {
packages := p.BasePlatform.FirmwarePackages
if p.UEFIVendor != "" {
@ -57,6 +57,6 @@ func (p *Aarch64_IoT) GetPackages() []string {
return packages
}
func (p *Aarch64_IoT) GetBootFiles() [][2]string {
func (p *Aarch64_Fedora) GetBootFiles() [][2]string {
return p.BootFiles
}

View file

@ -245,7 +245,8 @@ func loadRepositoriesFromFile(filename string) (map[string][]RepoConfig, error)
}
for arch, repos := range reposMap {
for _, repo := range repos {
for idx := range repos {
repo := repos[idx]
var urls []string
if repo.BaseURL != "" {
urls = []string{repo.BaseURL}