The problem: osbuild-composer used to have a rather uncomplete logic for selecting client certificates and keys while fetching data from repositories that use the "subscription model". In this scenario, every repo requires the user to use a client-side TLS certificate. The problem is that every repo can use its own CA and require a different pair of a certificate and a key. This case wasn't handled at all in composer. Furthermore, osbuild-composer can use remote workers which complicates things even more. Assumptions: The problem outlined above is hard to solve in the general case, but Red Hat Subscription Manager places certain limitations on how subscriptions might be used. For example, a subscription must be tight to a host system, so there is no way to use such a repository in osbuild-composer without it being available on the host system as well. Also, if a user wishes to use a certain repository in osbuild-composer it must be available on both hosts: the composer and the worker. It will come with different pair of a client certificate and a key but otherwise, its configuration remains the same. The solution: Expect all the subscriptions to be registered in the /etc/yum.repos.d/redhat.repo file. Read the mapping of URLs to certificates and keys from there and use it. Don't change the manifest format and let osbuild guess the appropriate subscription to use.
555 lines
16 KiB
Go
555 lines
16 KiB
Go
package rpmmd
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"log"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/gobwas/glob"
|
|
"github.com/osbuild/osbuild-composer/internal/rhsm"
|
|
)
|
|
|
|
type repository struct {
|
|
Name string `json:"name"`
|
|
BaseURL string `json:"baseurl,omitempty"`
|
|
Metalink string `json:"metalink,omitempty"`
|
|
MirrorList string `json:"mirrorlist,omitempty"`
|
|
GPGKey string `json:"gpgkey,omitempty"`
|
|
CheckGPG bool `json:"check_gpg,omitempty"`
|
|
RHSM bool `json:"rhsm,omitempty"`
|
|
MetadataExpire string `json:"metadata_expire,omitempty"`
|
|
ImageTypeTags []string `json:"image_type_tags,omitempty"`
|
|
}
|
|
|
|
type dnfRepoConfig struct {
|
|
ID string `json:"id"`
|
|
BaseURL string `json:"baseurl,omitempty"`
|
|
Metalink string `json:"metalink,omitempty"`
|
|
MirrorList string `json:"mirrorlist,omitempty"`
|
|
GPGKey string `json:"gpgkey,omitempty"`
|
|
IgnoreSSL bool `json:"ignoressl"`
|
|
SSLCACert string `json:"sslcacert,omitempty"`
|
|
SSLClientKey string `json:"sslclientkey,omitempty"`
|
|
SSLClientCert string `json:"sslclientcert,omitempty"`
|
|
MetadataExpire string `json:"metadata_expire,omitempty"`
|
|
}
|
|
|
|
type RepoConfig struct {
|
|
Name string
|
|
BaseURL string
|
|
Metalink string
|
|
MirrorList string
|
|
GPGKey string
|
|
CheckGPG bool
|
|
IgnoreSSL bool
|
|
MetadataExpire string
|
|
RHSM bool
|
|
ImageTypeTags []string
|
|
}
|
|
|
|
type DistrosRepoConfigs map[string]map[string][]RepoConfig
|
|
|
|
type PackageList []Package
|
|
|
|
type Package struct {
|
|
Name string
|
|
Summary string
|
|
Description string
|
|
URL string
|
|
Epoch uint
|
|
Version string
|
|
Release string
|
|
Arch string
|
|
BuildTime time.Time
|
|
License string
|
|
}
|
|
|
|
func (pkg Package) ToPackageBuild() PackageBuild {
|
|
// Convert the time to the API time format
|
|
return PackageBuild{
|
|
Arch: pkg.Arch,
|
|
BuildTime: pkg.BuildTime.Format("2006-01-02T15:04:05"),
|
|
Epoch: pkg.Epoch,
|
|
Release: pkg.Release,
|
|
Changelog: "CHANGELOG_NEEDED", // the same value as lorax-composer puts here
|
|
BuildConfigRef: "BUILD_CONFIG_REF", // the same value as lorax-composer puts here
|
|
BuildEnvRef: "BUILD_ENV_REF", // the same value as lorax-composer puts here
|
|
Source: PackageSource{
|
|
License: pkg.License,
|
|
Version: pkg.Version,
|
|
SourceRef: "SOURCE_REF", // the same value as lorax-composer puts here
|
|
},
|
|
}
|
|
}
|
|
|
|
func (pkg Package) ToPackageInfo() PackageInfo {
|
|
return PackageInfo{
|
|
Name: pkg.Name,
|
|
Summary: pkg.Summary,
|
|
Description: pkg.Description,
|
|
Homepage: pkg.URL,
|
|
UpstreamVCS: "UPSTREAM_VCS", // the same value as lorax-composer puts here
|
|
Builds: []PackageBuild{pkg.ToPackageBuild()},
|
|
Dependencies: nil,
|
|
}
|
|
}
|
|
|
|
// The inputs to depsolve, a set of packages to include and a set of
|
|
// packages to exclude.
|
|
type PackageSet struct {
|
|
Include []string
|
|
Exclude []string
|
|
}
|
|
|
|
// Append the Include and Exclude package list from another PackageSet and
|
|
// return the result.
|
|
func (ps PackageSet) Append(other PackageSet) PackageSet {
|
|
ps.Include = append(ps.Include, other.Include...)
|
|
ps.Exclude = append(ps.Exclude, other.Exclude...)
|
|
return ps
|
|
}
|
|
|
|
// TODO: the public API of this package should not be reused for serialization.
|
|
type PackageSpec struct {
|
|
Name string `json:"name"`
|
|
Epoch uint `json:"epoch"`
|
|
Version string `json:"version,omitempty"`
|
|
Release string `json:"release,omitempty"`
|
|
Arch string `json:"arch,omitempty"`
|
|
RemoteLocation string `json:"remote_location,omitempty"`
|
|
Checksum string `json:"checksum,omitempty"`
|
|
Secrets string `json:"secrets,omitempty"`
|
|
CheckGPG bool `json:"check_gpg,omitempty"`
|
|
}
|
|
|
|
type dnfPackageSpec struct {
|
|
Name string `json:"name"`
|
|
Epoch uint `json:"epoch"`
|
|
Version string `json:"version,omitempty"`
|
|
Release string `json:"release,omitempty"`
|
|
Arch string `json:"arch,omitempty"`
|
|
RepoID string `json:"repo_id,omitempty"`
|
|
Path string `json:"path,omitempty"`
|
|
RemoteLocation string `json:"remote_location,omitempty"`
|
|
Checksum string `json:"checksum,omitempty"`
|
|
Secrets string `json:"secrets,omitempty"`
|
|
}
|
|
|
|
type PackageSource struct {
|
|
License string `json:"license"`
|
|
Version string `json:"version"`
|
|
SourceRef string `json:"source_ref"`
|
|
Metadata struct{} `json:"metadata"` // it's just an empty struct in lorax-composer
|
|
}
|
|
|
|
type PackageBuild struct {
|
|
Arch string `json:"arch"`
|
|
BuildTime string `json:"build_time"`
|
|
Epoch uint `json:"epoch"`
|
|
Release string `json:"release"`
|
|
Source PackageSource `json:"source"`
|
|
Changelog string `json:"changelog"`
|
|
BuildConfigRef string `json:"build_config_ref"`
|
|
BuildEnvRef string `json:"build_env_ref"`
|
|
Metadata struct{} `json:"metadata"` // it's just an empty struct in lorax-composer
|
|
}
|
|
|
|
type PackageInfo struct {
|
|
Name string `json:"name"`
|
|
Summary string `json:"summary"`
|
|
Description string `json:"description"`
|
|
Homepage string `json:"homepage"`
|
|
UpstreamVCS string `json:"upstream_vcs"`
|
|
Builds []PackageBuild `json:"builds"`
|
|
Dependencies []PackageSpec `json:"dependencies,omitempty"`
|
|
}
|
|
|
|
type RPMMD interface {
|
|
// FetchMetadata returns all metadata about the repositories we use in the code. Specifically it is a
|
|
// list of packages and dictionary of checksums of the repositories.
|
|
FetchMetadata(repos []RepoConfig, modulePlatformID, arch, releasever string) (PackageList, map[string]string, error)
|
|
|
|
// Depsolve takes a list of required content (specs), explicitly unwanted content (excludeSpecs), list
|
|
// or repositories, and platform ID for modularity. It returns a list of all packages (with solved
|
|
// dependencies) that will be installed into the system.
|
|
Depsolve(packageSet PackageSet, repos []RepoConfig, modulePlatformID, arch, releasever string) ([]PackageSpec, map[string]string, error)
|
|
}
|
|
|
|
type DNFError struct {
|
|
Kind string `json:"kind"`
|
|
Reason string `json:"reason"`
|
|
}
|
|
|
|
func (err *DNFError) Error() string {
|
|
return fmt.Sprintf("DNF error occured: %s: %s", err.Kind, err.Reason)
|
|
}
|
|
|
|
type RepositoryError struct {
|
|
msg string
|
|
}
|
|
|
|
func (re *RepositoryError) Error() string {
|
|
return re.msg
|
|
}
|
|
|
|
func loadRepositoriesFromFile(filename string) (map[string][]RepoConfig, error) {
|
|
f, err := os.Open(filename)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer f.Close()
|
|
|
|
var reposMap map[string][]repository
|
|
repoConfigs := make(map[string][]RepoConfig)
|
|
|
|
err = json.NewDecoder(f).Decode(&reposMap)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
for arch, repos := range reposMap {
|
|
for _, repo := range repos {
|
|
config := RepoConfig{
|
|
Name: repo.Name,
|
|
BaseURL: repo.BaseURL,
|
|
Metalink: repo.Metalink,
|
|
MirrorList: repo.MirrorList,
|
|
GPGKey: repo.GPGKey,
|
|
CheckGPG: repo.CheckGPG,
|
|
RHSM: repo.RHSM,
|
|
MetadataExpire: repo.MetadataExpire,
|
|
ImageTypeTags: repo.ImageTypeTags,
|
|
}
|
|
|
|
repoConfigs[arch] = append(repoConfigs[arch], config)
|
|
}
|
|
}
|
|
|
|
return repoConfigs, nil
|
|
}
|
|
|
|
// LoadAllRepositories loads all repositories for given distros from the given list of paths.
|
|
// Behavior is the same as with the LoadRepositories() method.
|
|
func LoadAllRepositories(confPaths []string) (DistrosRepoConfigs, error) {
|
|
distrosRepoConfigs := DistrosRepoConfigs{}
|
|
|
|
for _, confPath := range confPaths {
|
|
reposPath := filepath.Join(confPath, "repositories")
|
|
|
|
fileEntries, err := ioutil.ReadDir(reposPath)
|
|
if os.IsNotExist(err) {
|
|
continue
|
|
} else if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
for _, fileEntry := range fileEntries {
|
|
// Skip all directories
|
|
if fileEntry.IsDir() {
|
|
continue
|
|
}
|
|
|
|
// distro repositories definition is expected to be named "<distro_name>.json"
|
|
if strings.HasSuffix(fileEntry.Name(), ".json") {
|
|
distro := strings.TrimSuffix(fileEntry.Name(), ".json")
|
|
|
|
// skip the distro repos definition, if it has been already read
|
|
_, ok := distrosRepoConfigs[distro]
|
|
if ok {
|
|
continue
|
|
}
|
|
|
|
distroRepos, err := loadRepositoriesFromFile(filepath.Join(reposPath, fileEntry.Name()))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
distrosRepoConfigs[distro] = distroRepos
|
|
}
|
|
}
|
|
}
|
|
|
|
return distrosRepoConfigs, nil
|
|
}
|
|
|
|
// LoadRepositories loads distribution repositories from the given list of paths.
|
|
// If there are duplicate distro repositories definitions found in multiple paths, the first
|
|
// encounter is preferred. For this reason, the order of paths in the passed list should
|
|
// reflect the desired preference.
|
|
func LoadRepositories(confPaths []string, distro string) (map[string][]RepoConfig, error) {
|
|
var repoConfigs map[string][]RepoConfig
|
|
path := "/repositories/" + distro + ".json"
|
|
|
|
for _, confPath := range confPaths {
|
|
var err error
|
|
repoConfigs, err = loadRepositoriesFromFile(confPath + path)
|
|
if os.IsNotExist(err) {
|
|
continue
|
|
} else if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Found the distro repository configs in the current path
|
|
if repoConfigs != nil {
|
|
break
|
|
}
|
|
}
|
|
|
|
if repoConfigs == nil {
|
|
return nil, &RepositoryError{"LoadRepositories failed: none of the provided paths contain distro configuration"}
|
|
}
|
|
|
|
return repoConfigs, nil
|
|
}
|
|
|
|
func runDNF(dnfJsonPath string, command string, arguments interface{}, result interface{}) error {
|
|
var call = struct {
|
|
Command string `json:"command"`
|
|
Arguments interface{} `json:"arguments,omitempty"`
|
|
}{
|
|
command,
|
|
arguments,
|
|
}
|
|
|
|
cmd := exec.Command(dnfJsonPath)
|
|
|
|
stdin, err := cmd.StdinPipe()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
cmd.Stderr = os.Stderr
|
|
stdout, err := cmd.StdoutPipe()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = cmd.Start()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = json.NewEncoder(stdin).Encode(call)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
stdin.Close()
|
|
|
|
output, err := ioutil.ReadAll(stdout)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = cmd.Wait()
|
|
|
|
const DnfErrorExitCode = 10
|
|
if runError, ok := err.(*exec.ExitError); ok && runError.ExitCode() == DnfErrorExitCode {
|
|
var dnfError DNFError
|
|
err = json.Unmarshal(output, &dnfError)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return &dnfError
|
|
}
|
|
|
|
err = json.Unmarshal(output, result)
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
type rpmmdImpl struct {
|
|
CacheDir string
|
|
subscriptions *rhsm.Subscriptions
|
|
dnfJsonPath string
|
|
}
|
|
|
|
func NewRPMMD(cacheDir, dnfJsonPath string) RPMMD {
|
|
subscriptions, err := rhsm.LoadSystemSubscriptions()
|
|
if err != nil {
|
|
log.Println("Failed to load subscriptions. osbuild-composer will fail to build images if the "+
|
|
"configured repositories require them:", err)
|
|
} else if err == nil && subscriptions == nil {
|
|
log.Println("This host is not subscribed to any RPM repositories. This is fine as long as " +
|
|
"the configured sources don't enable \"rhsm\".")
|
|
}
|
|
return &rpmmdImpl{
|
|
CacheDir: cacheDir,
|
|
subscriptions: subscriptions,
|
|
dnfJsonPath: dnfJsonPath,
|
|
}
|
|
}
|
|
|
|
func (repo RepoConfig) toDNFRepoConfig(rpmmd *rpmmdImpl, i int, arch, releasever string) (dnfRepoConfig, error) {
|
|
id := strconv.Itoa(i)
|
|
dnfRepo := dnfRepoConfig{
|
|
ID: id,
|
|
BaseURL: repo.BaseURL,
|
|
Metalink: repo.Metalink,
|
|
MirrorList: repo.MirrorList,
|
|
GPGKey: repo.GPGKey,
|
|
IgnoreSSL: repo.IgnoreSSL,
|
|
MetadataExpire: repo.MetadataExpire,
|
|
}
|
|
if repo.RHSM {
|
|
secrets, err := rpmmd.subscriptions.GetSecretsForBaseurl(repo.BaseURL, arch, releasever)
|
|
if err != nil {
|
|
return dnfRepoConfig{}, fmt.Errorf("RHSM secrets not found on the host for this baseurl: %s", repo.BaseURL)
|
|
}
|
|
dnfRepo.SSLCACert = secrets.SSLCACert
|
|
dnfRepo.SSLClientKey = secrets.SSLClientKey
|
|
dnfRepo.SSLClientCert = secrets.SSLClientCert
|
|
}
|
|
return dnfRepo, nil
|
|
}
|
|
|
|
func (r *rpmmdImpl) FetchMetadata(repos []RepoConfig, modulePlatformID, arch, releasever string) (PackageList, map[string]string, error) {
|
|
var dnfRepoConfigs []dnfRepoConfig
|
|
for i, repo := range repos {
|
|
dnfRepo, err := repo.toDNFRepoConfig(r, i, arch, releasever)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
dnfRepoConfigs = append(dnfRepoConfigs, dnfRepo)
|
|
}
|
|
|
|
var arguments = struct {
|
|
Repos []dnfRepoConfig `json:"repos"`
|
|
CacheDir string `json:"cachedir"`
|
|
ModulePlatformID string `json:"module_platform_id"`
|
|
Arch string `json:"arch"`
|
|
}{dnfRepoConfigs, r.CacheDir, modulePlatformID, arch}
|
|
var reply struct {
|
|
Checksums map[string]string `json:"checksums"`
|
|
Packages PackageList `json:"packages"`
|
|
}
|
|
|
|
err := runDNF(r.dnfJsonPath, "dump", arguments, &reply)
|
|
|
|
sort.Slice(reply.Packages, func(i, j int) bool {
|
|
return reply.Packages[i].Name < reply.Packages[j].Name
|
|
})
|
|
checksums := make(map[string]string)
|
|
for i, repo := range repos {
|
|
checksums[repo.Name] = reply.Checksums[strconv.Itoa(i)]
|
|
}
|
|
return reply.Packages, checksums, err
|
|
}
|
|
|
|
func (r *rpmmdImpl) Depsolve(packageSet PackageSet, repos []RepoConfig, modulePlatformID, arch, releasever string) ([]PackageSpec, map[string]string, error) {
|
|
var dnfRepoConfigs []dnfRepoConfig
|
|
|
|
for i, repo := range repos {
|
|
dnfRepo, err := repo.toDNFRepoConfig(r, i, arch, releasever)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
dnfRepoConfigs = append(dnfRepoConfigs, dnfRepo)
|
|
}
|
|
|
|
var arguments = struct {
|
|
PackageSpecs []string `json:"package-specs"`
|
|
ExcludSpecs []string `json:"exclude-specs"`
|
|
Repos []dnfRepoConfig `json:"repos"`
|
|
CacheDir string `json:"cachedir"`
|
|
ModulePlatformID string `json:"module_platform_id"`
|
|
Arch string `json:"arch"`
|
|
}{packageSet.Include, packageSet.Exclude, dnfRepoConfigs, r.CacheDir, modulePlatformID, arch}
|
|
var reply struct {
|
|
Checksums map[string]string `json:"checksums"`
|
|
Dependencies []dnfPackageSpec `json:"dependencies"`
|
|
}
|
|
err := runDNF(r.dnfJsonPath, "depsolve", arguments, &reply)
|
|
|
|
dependencies := make([]PackageSpec, len(reply.Dependencies))
|
|
for i, pack := range reply.Dependencies {
|
|
id, err := strconv.Atoi(pack.RepoID)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
repo := repos[id]
|
|
dep := reply.Dependencies[i]
|
|
dependencies[i].Name = dep.Name
|
|
dependencies[i].Epoch = dep.Epoch
|
|
dependencies[i].Version = dep.Version
|
|
dependencies[i].Release = dep.Release
|
|
dependencies[i].Arch = dep.Arch
|
|
dependencies[i].RemoteLocation = dep.RemoteLocation
|
|
dependencies[i].Checksum = dep.Checksum
|
|
dependencies[i].CheckGPG = repo.CheckGPG
|
|
if repo.RHSM {
|
|
dependencies[i].Secrets = "org.osbuild.rhsm"
|
|
}
|
|
}
|
|
|
|
return dependencies, reply.Checksums, err
|
|
}
|
|
|
|
func (packages PackageList) Search(globPatterns ...string) (PackageList, error) {
|
|
var globs []glob.Glob
|
|
|
|
for _, globPattern := range globPatterns {
|
|
g, err := glob.Compile(globPattern)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
globs = append(globs, g)
|
|
}
|
|
|
|
var foundPackages PackageList
|
|
|
|
for _, pkg := range packages {
|
|
for _, g := range globs {
|
|
if g.Match(pkg.Name) {
|
|
foundPackages = append(foundPackages, pkg)
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
sort.Slice(packages, func(i, j int) bool {
|
|
return packages[i].Name < packages[j].Name
|
|
})
|
|
|
|
return foundPackages, nil
|
|
}
|
|
|
|
func (packages PackageList) ToPackageInfos() []PackageInfo {
|
|
resultsNames := make(map[string]int)
|
|
var results []PackageInfo
|
|
|
|
for _, pkg := range packages {
|
|
if index, ok := resultsNames[pkg.Name]; ok {
|
|
foundPkg := &results[index]
|
|
|
|
foundPkg.Builds = append(foundPkg.Builds, pkg.ToPackageBuild())
|
|
} else {
|
|
newIndex := len(results)
|
|
resultsNames[pkg.Name] = newIndex
|
|
|
|
packageInfo := pkg.ToPackageInfo()
|
|
|
|
results = append(results, packageInfo)
|
|
}
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
func (pkg *PackageInfo) FillDependencies(rpmmd RPMMD, repos []RepoConfig, modulePlatformID, arch, releasever string) (err error) {
|
|
pkg.Dependencies, _, err = rpmmd.Depsolve(PackageSet{Include: []string{pkg.Name}}, repos, modulePlatformID, arch, releasever)
|
|
return
|
|
}
|