split: replace internal packages with images library
Remove all the internal package that are now in the github.com/osbuild/images package and vendor it. A new function in internal/blueprint/ converts from an osbuild-composer blueprint to an images blueprint. This is necessary for keeping the blueprint implementation in both packages. In the future, the images package will change the blueprint (and most likely rename it) and it will only be part of the osbuild-composer internals and interface. The Convert() function will be responsible for converting the blueprint into the new configuration object.
This commit is contained in:
parent
d59199670f
commit
0e4a9e586f
446 changed files with 5690 additions and 13312 deletions
|
|
@ -17,15 +17,15 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/images/pkg/blueprint"
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/images/pkg/rhsm/facts"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/rhsm/facts"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
type multiValue []string
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/images/pkg/distro/distro_test_common"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -21,10 +21,10 @@ import (
|
|||
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
|
||||
"github.com/osbuild/osbuild-composer/pkg/jobqueue/dbjobqueue"
|
||||
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/auth"
|
||||
"github.com/osbuild/osbuild-composer/internal/cloudapi"
|
||||
v2 "github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/jobqueue/fsjobqueue"
|
||||
"github.com/osbuild/osbuild-composer/internal/weldr"
|
||||
|
|
|
|||
|
|
@ -10,13 +10,13 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
rhel "github.com/osbuild/osbuild-composer/internal/distro/rhel8"
|
||||
"github.com/osbuild/images/pkg/blueprint"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
rhel "github.com/osbuild/images/pkg/distro/rhel8"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
// This test loads all the repositories available in /repositories directory
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/koji"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/koji"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -8,10 +8,9 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
@ -53,7 +52,7 @@ func main() {
|
|||
URL: "https://example.com", // required by some image types
|
||||
},
|
||||
}
|
||||
manifest, _, err := image.Manifest(&blueprint.Blueprint{}, options, nil, 0)
|
||||
manifest, _, err := image.Manifest(nil, options, nil, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,15 +8,15 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
type repository struct {
|
||||
|
|
@ -195,7 +195,8 @@ func main() {
|
|||
// let the cache grow to fit much more repository metadata than we usually allow
|
||||
solver.SetMaxCacheSize(3 * 1024 * 1024 * 1024)
|
||||
|
||||
manifest, _, err := imageType.Manifest(&composeRequest.Blueprint, options, repos, seedArg)
|
||||
ibp := blueprint.Convert(composeRequest.Blueprint)
|
||||
manifest, _, err := imageType.Manifest(&ibp, options, repos, seedArg)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/image"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/image"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
var ImageTypes = make(map[string]image.ImageKind)
|
||||
|
|
|
|||
|
|
@ -3,11 +3,11 @@ package main
|
|||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/artifact"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/runner"
|
||||
"github.com/osbuild/images/pkg/artifact"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/platform"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/images/pkg/runner"
|
||||
)
|
||||
|
||||
// MyContainer contains the arguments passed in as a JSON blob.
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ package main
|
|||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/artifact"
|
||||
"github.com/osbuild/osbuild-composer/internal/disk"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/runner"
|
||||
"github.com/osbuild/images/pkg/artifact"
|
||||
"github.com/osbuild/images/pkg/disk"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/platform"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/images/pkg/runner"
|
||||
)
|
||||
|
||||
type MyImage struct {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
package main
|
||||
|
||||
import "github.com/osbuild/osbuild-composer/internal/disk"
|
||||
import "github.com/osbuild/images/pkg/disk"
|
||||
|
||||
var basePT = disk.PartitionTable{
|
||||
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
|
||||
|
|
|
|||
|
|
@ -6,12 +6,12 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/image"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/osbuild"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/image"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/runner"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,20 +9,21 @@ import (
|
|||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distro/fedora"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/fedora"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/store"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
)
|
||||
|
||||
func getManifest(bp blueprint.Blueprint, t distro.ImageType, a distro.Arch, d distro.Distro, cacheDir string, repos []rpmmd.RepoConfig) (manifest.OSBuildManifest, []rpmmd.PackageSpec) {
|
||||
manifest, _, err := t.Manifest(&bp, distro.ImageOptions{}, repos, 0)
|
||||
ibp := blueprint.Convert(bp)
|
||||
manifest, _, err := t.Manifest(&ibp, distro.ImageOptions{}, repos, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/osbuild"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/koji"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ import (
|
|||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
"github.com/osbuild/images/pkg/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/upload/oci"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
|
||||
)
|
||||
|
|
|
|||
11
go.mod
11
go.mod
|
|
@ -13,8 +13,6 @@ require (
|
|||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
|
||||
github.com/BurntSushi/toml v1.2.1
|
||||
github.com/aws/aws-sdk-go v1.44.230
|
||||
github.com/containers/common v0.49.1
|
||||
github.com/containers/image/v5 v5.22.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/deepmap/oapi-codegen v1.8.2
|
||||
|
|
@ -31,10 +29,9 @@ require (
|
|||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
|
||||
github.com/labstack/echo/v4 v4.10.2
|
||||
github.com/labstack/gommon v0.4.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/openshift-online/ocm-sdk-go v0.1.315
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0
|
||||
github.com/osbuild/images v0.0.0-20230710155525-7045e8251769
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/segmentio/ksuid v1.0.4
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
|
|
@ -47,7 +44,6 @@ require (
|
|||
golang.org/x/sync v0.2.0
|
||||
golang.org/x/sys v0.8.0
|
||||
google.golang.org/api v0.126.0
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -71,6 +67,8 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containers/common v0.49.1 // indirect
|
||||
github.com/containers/image/v5 v5.22.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/containers/ocicrypt v1.1.5 // indirect
|
||||
github.com/containers/storage v1.42.0 // indirect
|
||||
|
|
@ -125,6 +123,8 @@ require (
|
|||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
|
||||
github.com/opencontainers/runc v1.1.5 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
|
|
@ -163,6 +163,7 @@ require (
|
|||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
|
||||
google.golang.org/grpc v1.55.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
|
|||
2
go.sum
2
go.sum
|
|
@ -1090,6 +1090,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.315/go.mod h1:KYOw8kAKAHyPrJcQoVR82C
|
|||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
|
||||
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
|
||||
github.com/osbuild/images v0.0.0-20230710155525-7045e8251769 h1:+Q98coEA32epRH7CqbU/1OlP4UeI40o/lqj7ivZd8us=
|
||||
github.com/osbuild/images v0.0.0-20230710155525-7045e8251769/go.mod h1:HT4eobIaYIL7CXyyg3Ol8m0Ao7yw5CVNh3BG7q0xLtk=
|
||||
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
|
||||
|
|
|
|||
|
|
@ -5,9 +5,10 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/crypt"
|
||||
"github.com/osbuild/images/pkg/crypt"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
iblueprint "github.com/osbuild/images/pkg/blueprint"
|
||||
)
|
||||
|
||||
// A Blueprint is a high-level description of an image.
|
||||
|
|
@ -182,3 +183,150 @@ func (b *Blueprint) CryptPasswords() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert(bp Blueprint) iblueprint.Blueprint {
|
||||
pkgs := make([]iblueprint.Package, len(bp.Packages))
|
||||
for idx := range bp.Packages {
|
||||
pkgs[idx] = iblueprint.Package(bp.Packages[idx])
|
||||
}
|
||||
|
||||
modules := make([]iblueprint.Package, len(bp.Modules))
|
||||
for idx := range bp.Modules {
|
||||
modules[idx] = iblueprint.Package(bp.Modules[idx])
|
||||
}
|
||||
|
||||
groups := make([]iblueprint.Group, len(bp.Groups))
|
||||
for idx := range bp.Groups {
|
||||
groups[idx] = iblueprint.Group(bp.Groups[idx])
|
||||
}
|
||||
|
||||
containers := make([]iblueprint.Container, len(bp.Containers))
|
||||
for idx := range bp.Containers {
|
||||
containers[idx] = iblueprint.Container(bp.Containers[idx])
|
||||
}
|
||||
|
||||
customizations := iblueprint.Customizations{}
|
||||
if c := bp.Customizations; c != nil {
|
||||
customizations = iblueprint.Customizations{
|
||||
Hostname: c.Hostname,
|
||||
InstallationDevice: c.InstallationDevice,
|
||||
}
|
||||
|
||||
if fdo := c.FDO; fdo != nil {
|
||||
ifdo := iblueprint.FDOCustomization(*fdo)
|
||||
customizations.FDO = &ifdo
|
||||
}
|
||||
if oscap := c.OpenSCAP; oscap != nil {
|
||||
ioscap := iblueprint.OpenSCAPCustomization(*oscap)
|
||||
customizations.OpenSCAP = &ioscap
|
||||
}
|
||||
if ign := c.Ignition; ign != nil {
|
||||
iign := iblueprint.IgnitionCustomization{}
|
||||
if embed := ign.Embedded; embed != nil {
|
||||
iembed := iblueprint.EmbeddedIgnitionCustomization(*embed)
|
||||
iign.Embedded = &iembed
|
||||
}
|
||||
if fb := ign.FirstBoot; fb != nil {
|
||||
ifb := iblueprint.FirstBootIgnitionCustomization(*fb)
|
||||
iign.FirstBoot = &ifb
|
||||
}
|
||||
customizations.Ignition = &iign
|
||||
}
|
||||
if dirs := c.Directories; dirs != nil {
|
||||
idirs := make([]iblueprint.DirectoryCustomization, len(dirs))
|
||||
for idx := range dirs {
|
||||
idirs[idx] = iblueprint.DirectoryCustomization(dirs[idx])
|
||||
}
|
||||
customizations.Directories = idirs
|
||||
}
|
||||
if files := c.Files; files != nil {
|
||||
ifiles := make([]iblueprint.FileCustomization, len(files))
|
||||
for idx := range files {
|
||||
ifiles[idx] = iblueprint.FileCustomization(files[idx])
|
||||
}
|
||||
customizations.Files = ifiles
|
||||
}
|
||||
if repos := c.Repositories; repos != nil {
|
||||
irepos := make([]iblueprint.RepositoryCustomization, len(repos))
|
||||
for idx := range repos {
|
||||
irepos[idx] = iblueprint.RepositoryCustomization(repos[idx])
|
||||
}
|
||||
customizations.Repositories = irepos
|
||||
}
|
||||
if kernel := c.Kernel; kernel != nil {
|
||||
ikernel := iblueprint.KernelCustomization(*kernel)
|
||||
customizations.Kernel = &ikernel
|
||||
}
|
||||
if sshkeys := c.SSHKey; sshkeys != nil {
|
||||
isshkeys := make([]iblueprint.SSHKeyCustomization, len(sshkeys))
|
||||
for idx := range sshkeys {
|
||||
isshkeys[idx] = iblueprint.SSHKeyCustomization(sshkeys[idx])
|
||||
}
|
||||
customizations.SSHKey = isshkeys
|
||||
}
|
||||
if users := c.User; users != nil {
|
||||
iusers := make([]iblueprint.UserCustomization, len(users))
|
||||
for idx := range users {
|
||||
iusers[idx] = iblueprint.UserCustomization(users[idx])
|
||||
}
|
||||
customizations.User = iusers
|
||||
}
|
||||
if groups := c.Group; groups != nil {
|
||||
igroups := make([]iblueprint.GroupCustomization, len(groups))
|
||||
for idx := range groups {
|
||||
igroups[idx] = iblueprint.GroupCustomization(groups[idx])
|
||||
}
|
||||
customizations.Group = igroups
|
||||
}
|
||||
if fs := c.Filesystem; fs != nil {
|
||||
ifs := make([]iblueprint.FilesystemCustomization, len(fs))
|
||||
for idx := range fs {
|
||||
ifs[idx] = iblueprint.FilesystemCustomization(fs[idx])
|
||||
}
|
||||
customizations.Filesystem = ifs
|
||||
}
|
||||
if tz := c.Timezone; tz != nil {
|
||||
itz := iblueprint.TimezoneCustomization(*tz)
|
||||
customizations.Timezone = &itz
|
||||
}
|
||||
if locale := c.Locale; locale != nil {
|
||||
ilocale := iblueprint.LocaleCustomization(*locale)
|
||||
customizations.Locale = &ilocale
|
||||
}
|
||||
if fw := c.Firewall; fw != nil {
|
||||
ifw := iblueprint.FirewallCustomization{
|
||||
Ports: fw.Ports,
|
||||
}
|
||||
if services := fw.Services; services != nil {
|
||||
iservices := iblueprint.FirewallServicesCustomization(*services)
|
||||
ifw.Services = &iservices
|
||||
}
|
||||
if zones := fw.Zones; zones != nil {
|
||||
izones := make([]iblueprint.FirewallZoneCustomization, len(zones))
|
||||
for idx := range zones {
|
||||
izones[idx] = iblueprint.FirewallZoneCustomization(zones[idx])
|
||||
}
|
||||
ifw.Zones = izones
|
||||
}
|
||||
customizations.Firewall = &ifw
|
||||
}
|
||||
if services := c.Services; services != nil {
|
||||
iservices := iblueprint.ServicesCustomization(*services)
|
||||
customizations.Services = &iservices
|
||||
}
|
||||
}
|
||||
|
||||
ibp := iblueprint.Blueprint{
|
||||
Name: bp.Name,
|
||||
Description: bp.Description,
|
||||
Version: bp.Version,
|
||||
Packages: pkgs,
|
||||
Modules: modules,
|
||||
Groups: groups,
|
||||
Containers: containers,
|
||||
Customizations: &customizations,
|
||||
Distro: bp.Distro,
|
||||
}
|
||||
|
||||
return ibp
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/fsnode"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
type RepositoryCustomization struct {
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/fsnode"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"net/http"
|
||||
// "strings"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/weldr"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"net/http"
|
||||
// "strings"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/weldr"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,13 +16,13 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/test_distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/images/pkg/distro/test_distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
dnfjson_mock "github.com/osbuild/osbuild-composer/internal/mocks/dnfjson"
|
||||
rpmmd_mock "github.com/osbuild/osbuild-composer/internal/mocks/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/reporegistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/weldr"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ package cloudapi
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
|
||||
v2 "github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
|
|
|
|||
|
|
@ -15,15 +15,15 @@ import (
|
|||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/osbuild"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/images/pkg/rhsm/facts"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/images/pkg/subscription"
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/rhsm/facts"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/subscription"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
|
||||
|
|
|
|||
|
|
@ -19,14 +19,14 @@ import (
|
|||
|
||||
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
|
||||
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/auth"
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/manifest"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/prometheus"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
|
|
@ -117,7 +117,8 @@ func (s *Server) enqueueCompose(distribution distro.Distro, bp blueprint.Bluepri
|
|||
}
|
||||
ir := irs[0]
|
||||
|
||||
manifestSource, _, err := ir.imageType.Manifest(&bp, ir.imageOptions, ir.repositories, manifestSeed)
|
||||
ibp := blueprint.Convert(bp)
|
||||
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, manifestSeed)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
|
@ -237,7 +238,8 @@ func (s *Server) enqueueKojiCompose(taskID uint64, server, name, version, releas
|
|||
var kojiFilenames []string
|
||||
var buildIDs []uuid.UUID
|
||||
for _, ir := range irs {
|
||||
manifestSource, _, err := ir.imageType.Manifest(&bp, ir.imageOptions, ir.repositories, manifestSeed)
|
||||
ibp := blueprint.Convert(bp)
|
||||
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, manifestSeed)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
|
|
|||
564
internal/cloudapi/v2/server.go.orig
Normal file
564
internal/cloudapi/v2/server.go.orig
Normal file
|
|
@ -0,0 +1,564 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/getkin/kin-openapi/openapi3"
|
||||
"github.com/getkin/kin-openapi/routers"
|
||||
legacyrouter "github.com/getkin/kin-openapi/routers/legacy"
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/labstack/echo/v4/middleware"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
|
||||
|
||||
<<<<<<< HEAD
|
||||
"github.com/osbuild/osbuild-composer/internal/auth"
|
||||
=======
|
||||
"github.com/osbuild/images/pkg/container"
|
||||
"github.com/osbuild/images/pkg/distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
"github.com/osbuild/images/pkg/manifest"
|
||||
"github.com/osbuild/images/pkg/ostree"
|
||||
>>>>>>> 294a8e564 (split: wip)
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/prometheus"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker/clienterrors"
|
||||
)
|
||||
|
||||
// Server represents the state of the cloud Server
|
||||
type Server struct {
|
||||
workers *worker.Server
|
||||
distros *distroregistry.Registry
|
||||
config ServerConfig
|
||||
router routers.Router
|
||||
|
||||
goroutinesCtx context.Context
|
||||
goroutinesCtxCancel context.CancelFunc
|
||||
goroutinesGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
TenantProviderFields []string
|
||||
JWTEnabled bool
|
||||
}
|
||||
|
||||
func NewServer(workers *worker.Server, distros *distroregistry.Registry, config ServerConfig) *Server {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
spec, err := GetSwagger()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
loader := openapi3.NewLoader()
|
||||
if err := spec.Validate(loader.Context); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
router, err := legacyrouter.NewRouter(spec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
workers: workers,
|
||||
distros: distros,
|
||||
config: config,
|
||||
router: router,
|
||||
|
||||
goroutinesCtx: ctx,
|
||||
goroutinesCtxCancel: cancel,
|
||||
}
|
||||
return server
|
||||
}
|
||||
|
||||
func (s *Server) Handler(path string) http.Handler {
|
||||
e := echo.New()
|
||||
e.Binder = binder{}
|
||||
e.HTTPErrorHandler = s.HTTPErrorHandler
|
||||
e.Pre(common.OperationIDMiddleware)
|
||||
e.Use(middleware.Recover())
|
||||
e.Logger = common.Logger()
|
||||
|
||||
handler := apiHandlers{
|
||||
server: s,
|
||||
}
|
||||
|
||||
mws := []echo.MiddlewareFunc{
|
||||
prometheus.StatusMiddleware(prometheus.ComposerSubsystem),
|
||||
}
|
||||
if s.config.JWTEnabled {
|
||||
mws = append(mws, auth.TenantChannelMiddleware(s.config.TenantProviderFields, HTTPError(ErrorTenantNotFound)))
|
||||
}
|
||||
mws = append(mws,
|
||||
prometheus.HTTPDurationMiddleware(prometheus.ComposerSubsystem),
|
||||
prometheus.MetricsMiddleware, s.ValidateRequest)
|
||||
RegisterHandlers(e.Group(path, mws...), &handler)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown() {
|
||||
s.goroutinesCtxCancel()
|
||||
s.goroutinesGroup.Wait()
|
||||
}
|
||||
|
||||
func (s *Server) enqueueCompose(distribution distro.Distro, bp blueprint.Blueprint, manifestSeed int64, irs []imageRequest, channel string) (uuid.UUID, error) {
|
||||
var id uuid.UUID
|
||||
if len(irs) != 1 {
|
||||
return id, HTTPError(ErrorInvalidNumberOfImageBuilds)
|
||||
}
|
||||
ir := irs[0]
|
||||
|
||||
ibp := blueprint.Convert(bp)
|
||||
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, manifestSeed)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
depsolveJobID, err := s.workers.EnqueueDepsolve(&worker.DepsolveJob{
|
||||
PackageSets: manifestSource.GetPackageSetChains(),
|
||||
ModulePlatformID: distribution.ModulePlatformID(),
|
||||
Arch: ir.arch.Name(),
|
||||
Releasever: distribution.Releasever(),
|
||||
}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
dependencies := []uuid.UUID{depsolveJobID}
|
||||
|
||||
var containerResolveJobID uuid.UUID
|
||||
containerSources := manifestSource.GetContainerSourceSpecs()
|
||||
if len(containerSources) > 1 {
|
||||
// only one pipeline can embed containers
|
||||
pipelines := make([]string, 0, len(containerSources))
|
||||
for name := range containerSources {
|
||||
pipelines = append(pipelines, name)
|
||||
}
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, fmt.Errorf("manifest returned %d pipelines with containers (at most 1 is supported): %s", len(containerSources), strings.Join(pipelines, ", ")))
|
||||
}
|
||||
|
||||
for _, sources := range containerSources {
|
||||
workerResolveSpecs := make([]worker.ContainerSpec, len(sources))
|
||||
for idx, source := range sources {
|
||||
workerResolveSpecs[idx] = worker.ContainerSpec{
|
||||
Source: source.Source,
|
||||
Name: source.Name,
|
||||
TLSVerify: source.TLSVerify,
|
||||
}
|
||||
}
|
||||
|
||||
job := worker.ContainerResolveJob{
|
||||
Arch: ir.arch.Name(),
|
||||
Specs: workerResolveSpecs,
|
||||
}
|
||||
|
||||
jobId, err := s.workers.EnqueueContainerResolveJob(&job, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
containerResolveJobID = jobId
|
||||
dependencies = append(dependencies, containerResolveJobID)
|
||||
break // there can be only one
|
||||
}
|
||||
|
||||
var ostreeResolveJobID uuid.UUID
|
||||
commitSources := manifestSource.GetOSTreeSourceSpecs()
|
||||
if len(commitSources) > 1 {
|
||||
// only one pipeline can specify an ostree commit for content
|
||||
pipelines := make([]string, 0, len(commitSources))
|
||||
for name := range commitSources {
|
||||
pipelines = append(pipelines, name)
|
||||
}
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, fmt.Errorf("manifest returned %d pipelines with ostree commits (at most 1 is supported): %s", len(commitSources), strings.Join(pipelines, ", ")))
|
||||
}
|
||||
for _, sources := range commitSources {
|
||||
workerResolveSpecs := make([]worker.OSTreeResolveSpec, len(sources))
|
||||
for idx, source := range sources {
|
||||
// ostree.SourceSpec is directly convertible to worker.OSTreeResolveSpec
|
||||
workerResolveSpecs[idx] = worker.OSTreeResolveSpec(source)
|
||||
}
|
||||
jobID, err := s.workers.EnqueueOSTreeResolveJob(&worker.OSTreeResolveJob{Specs: workerResolveSpecs}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
ostreeResolveJobID = jobID
|
||||
dependencies = append(dependencies, ostreeResolveJobID)
|
||||
break // there can be only one
|
||||
}
|
||||
|
||||
manifestJobID, err := s.workers.EnqueueManifestJobByID(&worker.ManifestJobByID{}, dependencies, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
id, err = s.workers.EnqueueOSBuildAsDependency(ir.arch.Name(), &worker.OSBuildJob{
|
||||
Targets: []*target.Target{ir.target},
|
||||
PipelineNames: &worker.PipelineNames{
|
||||
Build: ir.imageType.BuildPipelines(),
|
||||
Payload: ir.imageType.PayloadPipelines(),
|
||||
},
|
||||
}, []uuid.UUID{manifestJobID}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
s.goroutinesGroup.Add(1)
|
||||
go func() {
|
||||
serializeManifest(s.goroutinesCtx, manifestSource, s.workers, depsolveJobID, containerResolveJobID, ostreeResolveJobID, manifestJobID, manifestSeed)
|
||||
defer s.goroutinesGroup.Done()
|
||||
}()
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (s *Server) enqueueKojiCompose(taskID uint64, server, name, version, release string, distribution distro.Distro, bp blueprint.Blueprint, manifestSeed int64, irs []imageRequest, channel string) (uuid.UUID, error) {
|
||||
var id uuid.UUID
|
||||
kojiDirectory := "osbuild-cg/osbuild-composer-koji-" + uuid.New().String()
|
||||
|
||||
initID, err := s.workers.EnqueueKojiInit(&worker.KojiInitJob{
|
||||
Server: server,
|
||||
Name: name,
|
||||
Version: version,
|
||||
Release: release,
|
||||
}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
var kojiFilenames []string
|
||||
var buildIDs []uuid.UUID
|
||||
for _, ir := range irs {
|
||||
ibp := blueprint.Convert(bp)
|
||||
manifestSource, _, err := ir.imageType.Manifest(&ibp, ir.imageOptions, ir.repositories, manifestSeed)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
depsolveJobID, err := s.workers.EnqueueDepsolve(&worker.DepsolveJob{
|
||||
PackageSets: manifestSource.GetPackageSetChains(),
|
||||
ModulePlatformID: distribution.ModulePlatformID(),
|
||||
Arch: ir.arch.Name(),
|
||||
Releasever: distribution.Releasever(),
|
||||
}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
dependencies := []uuid.UUID{depsolveJobID}
|
||||
|
||||
var containerResolveJobID uuid.UUID
|
||||
containerSources := manifestSource.GetContainerSourceSpecs()
|
||||
if len(containerSources) > 1 {
|
||||
// only one pipeline can embed containers
|
||||
pipelines := make([]string, 0, len(containerSources))
|
||||
for name := range containerSources {
|
||||
pipelines = append(pipelines, name)
|
||||
}
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, fmt.Errorf("manifest returned %d pipelines with containers (at most 1 is supported): %s", len(containerSources), strings.Join(pipelines, ", ")))
|
||||
}
|
||||
|
||||
for _, sources := range containerSources {
|
||||
workerResolveSpecs := make([]worker.ContainerSpec, len(sources))
|
||||
for idx, source := range sources {
|
||||
workerResolveSpecs[idx] = worker.ContainerSpec{
|
||||
Source: source.Source,
|
||||
Name: source.Name,
|
||||
TLSVerify: source.TLSVerify,
|
||||
}
|
||||
}
|
||||
|
||||
job := worker.ContainerResolveJob{
|
||||
Arch: ir.arch.Name(),
|
||||
Specs: make([]worker.ContainerSpec, len(bp.Containers)),
|
||||
}
|
||||
|
||||
jobId, err := s.workers.EnqueueContainerResolveJob(&job, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
containerResolveJobID = jobId
|
||||
dependencies = append(dependencies, containerResolveJobID)
|
||||
break // there can be only one
|
||||
}
|
||||
|
||||
var ostreeResolveJobID uuid.UUID
|
||||
commitSources := manifestSource.GetOSTreeSourceSpecs()
|
||||
if len(commitSources) > 1 {
|
||||
// only one pipeline can specify an ostree commit for content
|
||||
pipelines := make([]string, 0, len(commitSources))
|
||||
for name := range commitSources {
|
||||
pipelines = append(pipelines, name)
|
||||
}
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, fmt.Errorf("manifest returned %d pipelines with ostree commits (at most 1 is supported): %s", len(commitSources), strings.Join(pipelines, ", ")))
|
||||
}
|
||||
for _, sources := range commitSources {
|
||||
workerResolveSpecs := make([]worker.OSTreeResolveSpec, len(sources))
|
||||
for idx, source := range sources {
|
||||
// ostree.SourceSpec is directly convertible to worker.OSTreeResolveSpec
|
||||
workerResolveSpecs[idx] = worker.OSTreeResolveSpec(source)
|
||||
}
|
||||
jobID, err := s.workers.EnqueueOSTreeResolveJob(&worker.OSTreeResolveJob{Specs: workerResolveSpecs}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
ostreeResolveJobID = jobID
|
||||
dependencies = append(dependencies, ostreeResolveJobID)
|
||||
break // there can be only one
|
||||
}
|
||||
|
||||
manifestJobID, err := s.workers.EnqueueManifestJobByID(&worker.ManifestJobByID{}, dependencies, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
kojiFilename := fmt.Sprintf(
|
||||
"%s-%s-%s.%s%s",
|
||||
name,
|
||||
version,
|
||||
release,
|
||||
ir.arch.Name(),
|
||||
splitExtension(ir.imageType.Filename()),
|
||||
)
|
||||
|
||||
kojiTarget := target.NewKojiTarget(&target.KojiTargetOptions{
|
||||
Server: server,
|
||||
UploadDirectory: kojiDirectory,
|
||||
})
|
||||
kojiTarget.OsbuildArtifact.ExportFilename = ir.imageType.Filename()
|
||||
kojiTarget.OsbuildArtifact.ExportName = ir.imageType.Exports()[0]
|
||||
kojiTarget.ImageName = kojiFilename
|
||||
|
||||
targets := []*target.Target{kojiTarget}
|
||||
// add any cloud upload target if defined
|
||||
if ir.target != nil {
|
||||
targets = append(targets, ir.target)
|
||||
}
|
||||
|
||||
buildID, err := s.workers.EnqueueOSBuildAsDependency(ir.arch.Name(), &worker.OSBuildJob{
|
||||
PipelineNames: &worker.PipelineNames{
|
||||
Build: ir.imageType.BuildPipelines(),
|
||||
Payload: ir.imageType.PayloadPipelines(),
|
||||
},
|
||||
Targets: targets,
|
||||
ManifestDynArgsIdx: common.ToPtr(1),
|
||||
}, []uuid.UUID{initID, manifestJobID}, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
kojiFilenames = append(kojiFilenames, kojiFilename)
|
||||
buildIDs = append(buildIDs, buildID)
|
||||
|
||||
// copy the image request while passing it into the goroutine to prevent data races
|
||||
s.goroutinesGroup.Add(1)
|
||||
go func(ir imageRequest) {
|
||||
serializeManifest(s.goroutinesCtx, manifestSource, s.workers, depsolveJobID, containerResolveJobID, ostreeResolveJobID, manifestJobID, manifestSeed)
|
||||
defer s.goroutinesGroup.Done()
|
||||
}(ir)
|
||||
}
|
||||
id, err = s.workers.EnqueueKojiFinalize(&worker.KojiFinalizeJob{
|
||||
Server: server,
|
||||
Name: name,
|
||||
Version: version,
|
||||
Release: release,
|
||||
KojiFilenames: kojiFilenames,
|
||||
KojiDirectory: kojiDirectory,
|
||||
TaskID: taskID,
|
||||
StartTime: uint64(time.Now().Unix()),
|
||||
}, initID, buildIDs, channel)
|
||||
if err != nil {
|
||||
return id, HTTPErrorWithInternal(ErrorEnqueueingJob, err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func serializeManifest(ctx context.Context, manifestSource *manifest.Manifest, workers *worker.Server, depsolveJobID, containerResolveJobID, ostreeResolveJobID, manifestJobID uuid.UUID, seed int64) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Minute*5)
|
||||
defer cancel()
|
||||
|
||||
// wait until job is in a pending state
|
||||
var token uuid.UUID
|
||||
var dynArgs []json.RawMessage
|
||||
var err error
|
||||
logWithId := logrus.WithField("jobId", manifestJobID)
|
||||
for {
|
||||
_, token, _, _, dynArgs, err = workers.RequestJobById(ctx, "", manifestJobID)
|
||||
if err == jobqueue.ErrNotPending {
|
||||
logWithId.Debug("Manifest job not pending, waiting for depsolve job to finish")
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logWithId.Warning("Manifest job dependencies took longer than 5 minutes to finish, or the server is shutting down, returning to avoid dangling routines")
|
||||
break
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
logWithId.Errorf("Error requesting manifest job: %v", err)
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
jobResult := &worker.ManifestJobByIDResult{
|
||||
Manifest: nil,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if jobResult.JobError != nil {
|
||||
logWithId.Errorf("Error in manifest job %v: %v", jobResult.JobError.Reason, err)
|
||||
}
|
||||
|
||||
result, err := json.Marshal(jobResult)
|
||||
if err != nil {
|
||||
logWithId.Errorf("Error marshalling manifest job results: %v", err)
|
||||
}
|
||||
|
||||
err = workers.FinishJob(token, result)
|
||||
if err != nil {
|
||||
logWithId.Errorf("Error finishing manifest job: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if len(dynArgs) == 0 {
|
||||
reason := "No dynamic arguments"
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorNoDynamicArgs, reason, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var depsolveResults worker.DepsolveJobResult
|
||||
err = json.Unmarshal(dynArgs[0], &depsolveResults)
|
||||
if err != nil {
|
||||
reason := "Error parsing dynamic arguments"
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorParsingDynamicArgs, reason, nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = workers.DepsolveJobInfo(depsolveJobID, &depsolveResults)
|
||||
if err != nil {
|
||||
reason := "Error reading depsolve status"
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorReadingJobStatus, reason, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if jobErr := depsolveResults.JobError; jobErr != nil {
|
||||
if jobErr.ID == clienterrors.ErrorDNFDepsolveError || jobErr.ID == clienterrors.ErrorDNFMarkingErrors {
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorDepsolveDependency, "Error in depsolve job dependency input, bad package set requested", nil)
|
||||
return
|
||||
}
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorDepsolveDependency, "Error in depsolve job dependency", nil)
|
||||
return
|
||||
}
|
||||
|
||||
if len(depsolveResults.PackageSpecs) == 0 {
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorEmptyPackageSpecs, "Received empty package specs", nil)
|
||||
return
|
||||
}
|
||||
|
||||
var containerSpecs map[string][]container.Spec
|
||||
if containerResolveJobID != uuid.Nil {
|
||||
// Container resolve job
|
||||
var result worker.ContainerResolveJobResult
|
||||
_, err := workers.ContainerResolveJobInfo(containerResolveJobID, &result)
|
||||
|
||||
if err != nil {
|
||||
reason := "Error reading container resolve job status"
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorReadingJobStatus, reason, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if jobErr := result.JobError; jobErr != nil {
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorContainerDependency, "Error in container resolve job dependency", nil)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: The container resolve job doesn't hold the pipeline name for
|
||||
// the container embedding, so we need to get it from the manifest
|
||||
// content field. There should be only one.
|
||||
var containerEmbedPipeline string
|
||||
for name := range manifestSource.GetContainerSourceSpecs() {
|
||||
containerEmbedPipeline = name
|
||||
break
|
||||
}
|
||||
|
||||
pipelineSpecs := make([]container.Spec, len(result.Specs))
|
||||
for idx, resultSpec := range result.Specs {
|
||||
pipelineSpecs[idx] = container.Spec{
|
||||
Source: resultSpec.Source,
|
||||
Digest: resultSpec.Digest,
|
||||
LocalName: resultSpec.Name,
|
||||
TLSVerify: resultSpec.TLSVerify,
|
||||
ImageID: resultSpec.ImageID,
|
||||
ListDigest: resultSpec.ListDigest,
|
||||
}
|
||||
|
||||
}
|
||||
containerSpecs = map[string][]container.Spec{
|
||||
containerEmbedPipeline: pipelineSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
var ostreeCommitSpecs map[string][]ostree.CommitSpec
|
||||
if ostreeResolveJobID != uuid.Nil {
|
||||
var result worker.OSTreeResolveJobResult
|
||||
_, err := workers.OSTreeResolveJobInfo(ostreeResolveJobID, &result)
|
||||
|
||||
if err != nil {
|
||||
reason := "Error reading ostree resolve job status"
|
||||
logrus.Errorf("%s: %v", reason, err)
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorReadingJobStatus, reason, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if jobErr := result.JobError; jobErr != nil {
|
||||
jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorOSTreeDependency, "Error in ostree resolve job dependency", nil)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: The ostree resolve job doesn't hold the pipeline name for the
|
||||
// ostree commits, so we need to get it from the manifest content
|
||||
// field. There should be only one.
|
||||
var ostreeCommitPipeline string
|
||||
for name := range manifestSource.GetOSTreeSourceSpecs() {
|
||||
ostreeCommitPipeline = name
|
||||
break
|
||||
}
|
||||
|
||||
commitSpecs := make([]ostree.CommitSpec, len(result.Specs))
|
||||
for idx, resultSpec := range result.Specs {
|
||||
commitSpecs[idx] = ostree.CommitSpec{
|
||||
Ref: resultSpec.Ref,
|
||||
URL: resultSpec.URL,
|
||||
Checksum: resultSpec.Checksum,
|
||||
}
|
||||
if resultSpec.RHSM {
|
||||
// NOTE: Older workers don't set the Secrets string in the result
|
||||
// spec so let's add it here for backwards compatibility. This
|
||||
// should be removed after a few versions when all workers have
|
||||
// been updated.
|
||||
resultSpec.Secrets = "org.osbuild.rhsm.consumer"
|
||||
}
|
||||
}
|
||||
ostreeCommitSpecs = map[string][]ostree.CommitSpec{
|
||||
ostreeCommitPipeline: commitSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
ms, err := manifestSource.Serialize(depsolveResults.PackageSpecs, containerSpecs, ostreeCommitSpecs)
|
||||
|
||||
jobResult.Manifest = ms
|
||||
}
|
||||
|
|
@ -6,8 +6,8 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
func TestSplitExtension(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -11,9 +11,9 @@ import (
|
|||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/images/pkg/distro/test_distro"
|
||||
"github.com/osbuild/images/pkg/osbuild"
|
||||
v2 "github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/test_distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/test"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
|
||||
|
||||
"github.com/osbuild/images/pkg/distro/test_distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/test_distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/test"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -12,13 +12,13 @@ import (
|
|||
|
||||
"github.com/osbuild/osbuild-composer/pkg/jobqueue"
|
||||
|
||||
"github.com/osbuild/images/pkg/distro/test_distro"
|
||||
"github.com/osbuild/images/pkg/osbuild"
|
||||
"github.com/osbuild/images/pkg/ostree/mock_ostree_repo"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
v2 "github.com/osbuild/osbuild-composer/internal/cloudapi/v2"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/test_distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/jobqueue/fsjobqueue"
|
||||
distro_mock "github.com/osbuild/osbuild-composer/internal/mocks/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree/mock_ostree_repo"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/target"
|
||||
"github.com/osbuild/osbuild-composer/internal/test"
|
||||
"github.com/osbuild/osbuild-composer/internal/worker"
|
||||
|
|
|
|||
|
|
@ -1,103 +0,0 @@
|
|||
package container_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
//
|
||||
|
||||
func TestClientResolve(t *testing.T) {
|
||||
|
||||
registry := NewTestRegistry()
|
||||
defer registry.Close()
|
||||
|
||||
repo := registry.AddRepo("library/osbuild")
|
||||
listDigest := repo.AddImage(
|
||||
[]Blob{NewDataBlobFromBase64(rootLayer)},
|
||||
[]string{"amd64", "ppc64le"},
|
||||
"cool container",
|
||||
time.Time{})
|
||||
|
||||
ref := registry.GetRef("library/osbuild")
|
||||
client, err := container.NewClient(ref)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
|
||||
client.SkipTLSVerify()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
client.SetArchitectureChoice("amd64")
|
||||
spec, err := client.Resolve(ctx, "")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, container.Spec{
|
||||
Source: ref,
|
||||
Digest: "sha256:f29b6cd42a94a574583439addcd6694e6224f0e4b32044c9e3aee4c4856c2a50",
|
||||
ImageID: "sha256:c2ecf25cf190e76b12b07436ad5140d4ba53d8a136d498705e57a006837a720f",
|
||||
TLSVerify: client.GetTLSVerify(),
|
||||
LocalName: client.Target.String(),
|
||||
ListDigest: listDigest,
|
||||
}, spec)
|
||||
|
||||
client.SetArchitectureChoice("ppc64le")
|
||||
spec, err = client.Resolve(ctx, "")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, container.Spec{
|
||||
Source: ref,
|
||||
Digest: "sha256:d49eebefb6c7ce5505594bef652bd4adc36f413861bd44209d9b9486310b1264",
|
||||
ImageID: "sha256:d2ab8fea7f08a22f03b30c13c6ea443121f25e87202a7496e93736efa6fe345a",
|
||||
TLSVerify: client.GetTLSVerify(),
|
||||
LocalName: client.Target.String(),
|
||||
ListDigest: listDigest,
|
||||
}, spec)
|
||||
|
||||
// don't have that architecture
|
||||
client.SetArchitectureChoice("s390x")
|
||||
_, err = client.Resolve(ctx, "")
|
||||
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestClientAuthFilePath(t *testing.T) {
|
||||
|
||||
client, err := container.NewClient("quay.io/osbuild/osbuild")
|
||||
assert.NoError(t, err)
|
||||
|
||||
authFilePath := client.GetAuthFilePath()
|
||||
assert.NotEmpty(t, authFilePath)
|
||||
assert.Equal(t, authFilePath, container.GetDefaultAuthFile())
|
||||
|
||||
// make sure the file is accessible
|
||||
_, err = os.ReadFile(authFilePath)
|
||||
assert.True(t, err == nil || os.IsNotExist(err))
|
||||
|
||||
t.Run("XDG_RUNTIME_DIR", func(t *testing.T) {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
|
||||
if runtimeDir == "" {
|
||||
t.Skip("XDG_RUNTIME_DIR not set, skipping test")
|
||||
return
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
os.Setenv("XDG_RUNTIME_DIR", runtimeDir)
|
||||
})
|
||||
|
||||
os.Unsetenv("XDG_RUNTIME_DIR")
|
||||
|
||||
authFilePath := container.GetDefaultAuthFile()
|
||||
assert.NotEmpty(t, authFilePath)
|
||||
_, err = os.ReadFile(authFilePath)
|
||||
assert.True(t, err == nil || os.IsNotExist(err))
|
||||
})
|
||||
|
||||
}
|
||||
|
|
@ -1,385 +0,0 @@
|
|||
package container_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
)
|
||||
|
||||
const rootLayer = `H4sIAAAJbogA/+SWUYqDMBCG53lP4V5g9x8dzRX2Bvtc0VIhEIhKe/wSKxgU6ktjC/O9hMzAQDL8
|
||||
/8yltdb9DLeB0gEGKhHCg/UJsBAL54zKFBAC54ZzyrCUSMfYDydPgHfu6R/s5VePilOfzF/of/bv
|
||||
vG2+lqhyFNGPddP53yjyegCBKcuNROZ77AmBoP+CmbIyqpEM5fqf+3/ubJtsCuz7P1b+L1Du/4f5
|
||||
v+vrsVPu/Vq9P3ANk//d+x/MZv8TKNf/Qfqf9v9v5fLXK3/lKEc5ypm4AwAA//8DAE6E6nIAEgAA
|
||||
`
|
||||
|
||||
// The following code implements a toy container registry to test with
|
||||
|
||||
// Blob interface
|
||||
type Blob interface {
|
||||
GetSize() int64
|
||||
GetMediaType() string
|
||||
GetDigest() digest.Digest
|
||||
|
||||
Reader() io.Reader
|
||||
}
|
||||
|
||||
// dataBlob //
|
||||
type dataBlob struct {
|
||||
Data []byte
|
||||
MediaType string
|
||||
}
|
||||
|
||||
func NewDataBlobFromBase64(text string) dataBlob {
|
||||
data, err := base64.StdEncoding.DecodeString(text)
|
||||
|
||||
if err != nil {
|
||||
panic("decoding of text failed")
|
||||
}
|
||||
|
||||
return dataBlob{
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// Blob interface implementation
|
||||
func (b dataBlob) GetSize() int64 {
|
||||
return int64(len(b.Data))
|
||||
}
|
||||
|
||||
func (b dataBlob) GetMediaType() string {
|
||||
if b.MediaType != "" {
|
||||
return b.MediaType
|
||||
}
|
||||
|
||||
return manifest.DockerV2Schema2LayerMediaType
|
||||
}
|
||||
|
||||
func (b dataBlob) GetDigest() digest.Digest {
|
||||
return digest.FromBytes(b.Data)
|
||||
}
|
||||
|
||||
func (b dataBlob) Reader() io.Reader {
|
||||
return bytes.NewReader(b.Data)
|
||||
}
|
||||
|
||||
func MakeDescriptorForBlob(b Blob) manifest.Schema2Descriptor {
|
||||
return manifest.Schema2Descriptor{
|
||||
MediaType: b.GetMediaType(),
|
||||
Size: b.GetSize(),
|
||||
Digest: b.GetDigest(),
|
||||
}
|
||||
}
|
||||
|
||||
// Repo //
|
||||
type Repo struct {
|
||||
blobs map[string]Blob
|
||||
manifests map[string]*manifest.Schema2
|
||||
images map[string]*manifest.Schema2List
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
func NewRepo() *Repo {
|
||||
return &Repo{
|
||||
blobs: make(map[string]Blob),
|
||||
manifests: make(map[string]*manifest.Schema2),
|
||||
tags: make(map[string]string),
|
||||
images: make(map[string]*manifest.Schema2List),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Repo) AddBlob(b Blob) manifest.Schema2Descriptor {
|
||||
desc := MakeDescriptorForBlob(b)
|
||||
r.blobs[desc.Digest.String()] = b
|
||||
return desc
|
||||
}
|
||||
|
||||
func (r *Repo) AddObject(v interface{}, mediaType string) manifest.Schema2Descriptor {
|
||||
data, err := json.MarshalIndent(v, "", " ")
|
||||
if err != nil {
|
||||
panic("could not marshal image object")
|
||||
}
|
||||
|
||||
blob := dataBlob{
|
||||
Data: data,
|
||||
MediaType: mediaType,
|
||||
}
|
||||
|
||||
return r.AddBlob(blob)
|
||||
}
|
||||
|
||||
func (r *Repo) AddManifest(mf *manifest.Schema2) manifest.Schema2Descriptor {
|
||||
desc := r.AddObject(mf, mf.MediaType)
|
||||
|
||||
r.manifests[desc.Digest.String()] = mf
|
||||
|
||||
return desc
|
||||
}
|
||||
|
||||
func (r *Repo) AddImage(layers []Blob, arches []string, comment string, ctime time.Time) string {
|
||||
|
||||
blobs := make([]manifest.Schema2Descriptor, len(layers))
|
||||
|
||||
for i, layer := range layers {
|
||||
blobs[i] = r.AddBlob(layer)
|
||||
}
|
||||
|
||||
manifests := make([]manifest.Schema2ManifestDescriptor, len(arches))
|
||||
|
||||
for i, arch := range arches {
|
||||
img := manifest.Schema2V1Image{
|
||||
Architecture: arch,
|
||||
OS: "linux",
|
||||
Author: "osbuild",
|
||||
Comment: comment,
|
||||
Created: ctime,
|
||||
}
|
||||
|
||||
// Add the config object
|
||||
config := r.AddObject(img, manifest.DockerV2Schema2ConfigMediaType)
|
||||
|
||||
// make and add the manifest object
|
||||
schema := manifest.Schema2FromComponents(config, blobs)
|
||||
mf := r.AddManifest(schema)
|
||||
|
||||
desc := manifest.Schema2ManifestDescriptor{
|
||||
Schema2Descriptor: mf,
|
||||
Platform: manifest.Schema2PlatformSpec{
|
||||
Architecture: arch,
|
||||
OS: "linux",
|
||||
},
|
||||
}
|
||||
|
||||
manifests[i] = desc
|
||||
}
|
||||
|
||||
list := manifest.Schema2ListFromComponents(manifests)
|
||||
desc := r.AddObject(list, list.MediaType)
|
||||
checksum := desc.Digest.String()
|
||||
|
||||
r.images[checksum] = list
|
||||
r.tags["latest"] = checksum
|
||||
|
||||
return checksum
|
||||
}
|
||||
|
||||
func (r *Repo) AddTag(checksum, tag string) {
|
||||
|
||||
if _, ok := r.images[checksum]; !ok {
|
||||
panic("cannot tag: image not found: " + checksum)
|
||||
}
|
||||
|
||||
r.tags[tag] = checksum
|
||||
}
|
||||
|
||||
func WriteBlob(blob Blob, w http.ResponseWriter) {
|
||||
w.Header().Add("Content-Type", blob.GetMediaType())
|
||||
w.Header().Add("Content-Length", fmt.Sprintf("%d", blob.GetSize()))
|
||||
w.Header().Add("Docker-Content-Digest", blob.GetDigest().String())
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
reader := blob.Reader()
|
||||
|
||||
_, err := io.Copy(w, reader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error writing blob: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BlobIsManifest(blob Blob) bool {
|
||||
mt := blob.GetMediaType()
|
||||
return mt == manifest.DockerV2Schema2MediaType || mt == manifest.DockerV2ListMediaType
|
||||
}
|
||||
|
||||
func (r *Repo) ServeManifest(ref string, w http.ResponseWriter, req *http.Request) {
|
||||
if checksum, ok := r.tags[ref]; ok {
|
||||
ref = checksum
|
||||
}
|
||||
|
||||
blob, ok := r.blobs[ref]
|
||||
if !ok || !BlobIsManifest(blob) {
|
||||
fmt.Fprintf(os.Stderr, "manifest %s not found", ref)
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
WriteBlob(blob, w)
|
||||
}
|
||||
|
||||
func (r *Repo) ServeBlob(ref string, w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
blob, ok := r.blobs[ref]
|
||||
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "blob %s not found", ref)
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
WriteBlob(blob, w)
|
||||
}
|
||||
|
||||
// Registry //
|
||||
|
||||
type Registry struct {
|
||||
server *httptest.Server
|
||||
repos map[string]*Repo
|
||||
}
|
||||
|
||||
func (reg *Registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
parts := strings.SplitN(req.URL.Path, "?", 1)
|
||||
paths := strings.Split(strings.Trim(parts[0], "/"), "/")
|
||||
|
||||
// Possbile routes
|
||||
// [1] version-check: /v2/
|
||||
// [2] blobs: /v2/<repo_name>/blobs/<digest>
|
||||
// [3] manifest: /v2/<repo_name>/manifests/<ref>
|
||||
//
|
||||
// we need at least 4 path components and path has to start with "/v2"
|
||||
|
||||
if len(paths) < 1 || paths[0] != "v2" {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// [1] version check
|
||||
if len(paths) == 1 {
|
||||
w.WriteHeader(200)
|
||||
return
|
||||
} else if len(paths) < 4 {
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// we asserted that we have at least 4 path components
|
||||
ref := paths[len(paths)-1]
|
||||
cmd := paths[len(paths)-2]
|
||||
|
||||
repoName := strings.Join(paths[1:len(paths)-2], "/")
|
||||
|
||||
repo, ok := reg.repos[repoName]
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "repo %s not found", repoName)
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
if cmd == "manifests" {
|
||||
repo.ServeManifest(ref, w, req)
|
||||
} else if cmd == "blobs" {
|
||||
repo.ServeBlob(ref, w, req)
|
||||
} else {
|
||||
http.NotFound(w, req)
|
||||
}
|
||||
}
|
||||
|
||||
func NewTestRegistry() *Registry {
|
||||
|
||||
reg := &Registry{
|
||||
repos: make(map[string]*Repo),
|
||||
}
|
||||
reg.server = httptest.NewTLSServer(reg)
|
||||
|
||||
return reg
|
||||
}
|
||||
|
||||
func (reg *Registry) AddRepo(name string) *Repo {
|
||||
repo := NewRepo()
|
||||
reg.repos[name] = repo
|
||||
return repo
|
||||
}
|
||||
|
||||
func (reg *Registry) GetRef(repo string) string {
|
||||
return fmt.Sprintf("%s/%s", reg.server.Listener.Addr().String(), repo)
|
||||
}
|
||||
|
||||
func (reg *Registry) Resolve(target, arch string) (container.Spec, error) {
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(target)
|
||||
if err != nil {
|
||||
return container.Spec{}, fmt.Errorf("failed to parse '%s': %w", target, err)
|
||||
}
|
||||
|
||||
domain := reference.Domain(ref)
|
||||
|
||||
tag := "latest"
|
||||
var checksum string
|
||||
|
||||
if tagged, ok := ref.(reference.NamedTagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
if digested, ok := ref.(reference.Digested); ok {
|
||||
checksum = string(digested.Digest())
|
||||
}
|
||||
|
||||
if domain != reg.server.Listener.Addr().String() {
|
||||
return container.Spec{}, fmt.Errorf("unknown domain")
|
||||
}
|
||||
|
||||
ref = reference.TrimNamed(ref)
|
||||
path := reference.Path(ref)
|
||||
|
||||
repo, ok := reg.repos[path]
|
||||
if !ok {
|
||||
return container.Spec{}, fmt.Errorf("unknown repo")
|
||||
}
|
||||
|
||||
if checksum == "" {
|
||||
checksum, ok = repo.tags[tag]
|
||||
if !ok {
|
||||
return container.Spec{}, fmt.Errorf("unknown tag")
|
||||
}
|
||||
}
|
||||
|
||||
lst, ok := repo.images[checksum]
|
||||
listDigest := checksum
|
||||
|
||||
if ok {
|
||||
checksum = ""
|
||||
|
||||
for _, m := range lst.Manifests {
|
||||
if m.Platform.Architecture == arch {
|
||||
checksum = m.Digest.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if checksum == "" {
|
||||
return container.Spec{}, fmt.Errorf("unsupported architecture")
|
||||
}
|
||||
}
|
||||
|
||||
mf, ok := repo.manifests[checksum]
|
||||
if !ok {
|
||||
return container.Spec{}, fmt.Errorf("unknown digest")
|
||||
}
|
||||
|
||||
return container.Spec{
|
||||
Source: ref.String(),
|
||||
Digest: checksum,
|
||||
ImageID: mf.ConfigDescriptor.Digest.String(),
|
||||
LocalName: target,
|
||||
TLSVerify: common.ToPtr(false),
|
||||
ListDigest: listDigest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (reg *Registry) Close() {
|
||||
reg.server.Close()
|
||||
}
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
package container_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
)
|
||||
|
||||
type lessCompare func(i, j int) bool
|
||||
|
||||
func makeSpecSorter(specs []container.Spec) lessCompare {
|
||||
return func(i, j int) bool {
|
||||
return specs[i].Digest < specs[j].Digest
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolver(t *testing.T) {
|
||||
|
||||
registry := NewTestRegistry()
|
||||
defer registry.Close()
|
||||
|
||||
repo := registry.AddRepo("library/osbuild")
|
||||
ref := registry.GetRef("library/osbuild")
|
||||
|
||||
refs := make([]string, 10)
|
||||
for i := 0; i < len(refs); i++ {
|
||||
checksum := repo.AddImage(
|
||||
[]Blob{NewDataBlobFromBase64(rootLayer)},
|
||||
[]string{"amd64", "ppc64le"},
|
||||
fmt.Sprintf("image %d", i),
|
||||
time.Time{})
|
||||
|
||||
tag := fmt.Sprintf("%d", i)
|
||||
repo.AddTag(checksum, tag)
|
||||
refs[i] = fmt.Sprintf("%s:%s", ref, tag)
|
||||
}
|
||||
|
||||
resolver := container.NewResolver("amd64")
|
||||
|
||||
for _, r := range refs {
|
||||
resolver.Add(container.SourceSpec{r, "", common.ToPtr(false)})
|
||||
}
|
||||
|
||||
have, err := resolver.Finish()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, have)
|
||||
|
||||
assert.Len(t, have, len(refs))
|
||||
|
||||
want := make([]container.Spec, len(refs))
|
||||
for i, r := range refs {
|
||||
spec, err := registry.Resolve(r, "amd64")
|
||||
assert.NoError(t, err)
|
||||
want[i] = spec
|
||||
}
|
||||
|
||||
sort.Slice(have, makeSpecSorter(have))
|
||||
sort.Slice(want, makeSpecSorter(want))
|
||||
|
||||
assert.ElementsMatch(t, have, want)
|
||||
}
|
||||
|
||||
func TestResolverFail(t *testing.T) {
|
||||
resolver := container.NewResolver("amd64")
|
||||
|
||||
resolver.Add(container.SourceSpec{"invalid-reference@${IMAGE_DIGEST}", "", common.ToPtr(false)})
|
||||
|
||||
specs, err := resolver.Finish()
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, specs, 0)
|
||||
|
||||
registry := NewTestRegistry()
|
||||
defer registry.Close()
|
||||
|
||||
resolver.Add(container.SourceSpec{registry.GetRef("repo"), "", common.ToPtr(false)})
|
||||
specs, err = resolver.Finish()
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, specs, 0)
|
||||
}
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
//go:build !darwin
|
||||
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_crypt_PasswordIsCrypted(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
password string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "bcrypt",
|
||||
password: "$2b$04$123465789012345678901uac5A8egfBuZVHMrDZsQzR96IqNBivCy",
|
||||
want: true,
|
||||
}, {
|
||||
name: "sha256",
|
||||
password: "$5$1234567890123456$v.2bOKKLlpmUSKn0rxJmgnh.e3wOKivAVNZmNrOsoA3",
|
||||
want: true,
|
||||
}, {
|
||||
name: "sha512",
|
||||
password: "$6$1234567890123456$d.pgKQFaiD8bRiExg5NesbGR/3u51YvxeYaQXPzx4C6oSYREw8VoReiuYZjx0V9OhGVTZFqhc6emAxT1RC5BV.",
|
||||
want: true,
|
||||
}, {
|
||||
name: "scrypt",
|
||||
password: "$7$123456789012345", //not actual hash output from scrypt
|
||||
want: false,
|
||||
}, {
|
||||
name: "plain",
|
||||
password: "password",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if got := PasswordIsCrypted(test.password); got != test.want {
|
||||
t.Errorf("PasswordIsCrypted() =%v, want %v", got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCryptSHA512(t *testing.T) {
|
||||
retPassFirst, err := CryptSHA512("testPass")
|
||||
assert.NoError(t, err)
|
||||
retPassSecond, _ := CryptSHA512("testPass")
|
||||
expectedPassStart := "$6$"
|
||||
assert.Equal(t, expectedPassStart, retPassFirst[0:3])
|
||||
assert.NotEqual(t, retPassFirst, retPassSecond)
|
||||
}
|
||||
|
||||
func TestGenSalt(t *testing.T) {
|
||||
length := 10
|
||||
retSaltFirst, err := genSalt(length)
|
||||
assert.NoError(t, err)
|
||||
retSaltSecond, _ := genSalt(length)
|
||||
assert.NotEqual(t, retSaltFirst, retSaltSecond)
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,40 +0,0 @@
|
|||
package disk
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLVMVCreateMountpoint(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
vg := &LVMVolumeGroup{
|
||||
Name: "root",
|
||||
Description: "root volume group",
|
||||
}
|
||||
|
||||
entity, err := vg.CreateMountpoint("/", 0)
|
||||
assert.NoError(err)
|
||||
rootlv := entity.(*LVMLogicalVolume)
|
||||
assert.Equal("rootlv", rootlv.Name)
|
||||
|
||||
_, err = vg.CreateMountpoint("/home_test", 0)
|
||||
assert.NoError(err)
|
||||
|
||||
entity, err = vg.CreateMountpoint("/home/test", 0)
|
||||
assert.NoError(err)
|
||||
|
||||
dedup := entity.(*LVMLogicalVolume)
|
||||
assert.Equal("home_testlv00", dedup.Name)
|
||||
|
||||
// Lets collide it
|
||||
for i := 0; i < 98; i++ {
|
||||
_, err = vg.CreateMountpoint("/home/test", 0)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
_, err = vg.CreateMountpoint("/home/test", 0)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
|
@ -1,566 +0,0 @@
|
|||
package distro_test
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/container"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/osbuild-composer/internal/ostree"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDistro_Manifest(t *testing.T) {
|
||||
|
||||
distro_test_common.TestDistro_Manifest(
|
||||
t,
|
||||
"../../test/data/manifests/",
|
||||
"*",
|
||||
distroregistry.NewDefault(),
|
||||
false, // This test case does not check for changes in the imageType package sets!
|
||||
"",
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
// Ensure that all package sets defined in the package set chains are defined for the image type
|
||||
func TestImageType_PackageSetsChains(t *testing.T) {
|
||||
distros := distroregistry.NewDefault()
|
||||
for _, distroName := range distros.List() {
|
||||
d := distros.GetDistro(distroName)
|
||||
for _, archName := range d.ListArches() {
|
||||
arch, err := d.GetArch(archName)
|
||||
require.Nil(t, err)
|
||||
for _, imageTypeName := range arch.ListImageTypes() {
|
||||
t.Run(fmt.Sprintf("%s/%s/%s", distroName, archName, imageTypeName), func(t *testing.T) {
|
||||
imageType, err := arch.GetImageType(imageTypeName)
|
||||
require.Nil(t, err)
|
||||
|
||||
// set up bare minimum args for image type
|
||||
var customizations *blueprint.Customizations
|
||||
if imageType.Name() == "edge-simplified-installer" {
|
||||
customizations = &blueprint.Customizations{
|
||||
InstallationDevice: "/dev/null",
|
||||
}
|
||||
}
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: customizations,
|
||||
}
|
||||
options := distro.ImageOptions{
|
||||
OSTree: &ostree.ImageOptions{
|
||||
URL: "https://example.com", // required by some image types
|
||||
},
|
||||
}
|
||||
manifest, _, err := imageType.Manifest(&bp, options, nil, 0)
|
||||
require.NoError(t, err)
|
||||
imagePkgSets := manifest.GetPackageSetChains()
|
||||
for packageSetName := range imageType.PackageSetsChains() {
|
||||
_, ok := imagePkgSets[packageSetName]
|
||||
if !ok {
|
||||
// in the new pipeline generation logic the name of the package
|
||||
// set chains are taken from the pipelines and do not match the
|
||||
// package set names.
|
||||
// TODO: redefine package set chains to make this unneccesary
|
||||
switch packageSetName {
|
||||
case "packages":
|
||||
_, ok = imagePkgSets["os"]
|
||||
if !ok {
|
||||
_, ok = imagePkgSets["ostree-tree"]
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Truef(t, ok, "package set %q defined in a package set chain is not present in the image package sets", packageSetName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure all image types report the correct names for their pipelines.
|
||||
// Each image type contains a list of build and payload pipelines. They are
|
||||
// needed for knowing the names of pipelines from the static object without
|
||||
// having access to a manifest, which we need when parsing metadata from build
|
||||
// results.
|
||||
func TestImageTypePipelineNames(t *testing.T) {
|
||||
// types for parsing the opaque manifest with just the fields we care about
|
||||
type rpmStageOptions struct {
|
||||
GPGKeys []string `json:"gpgkeys"`
|
||||
}
|
||||
type stage struct {
|
||||
Type string `json:"type"`
|
||||
Options rpmStageOptions `json:"options"`
|
||||
}
|
||||
type pipeline struct {
|
||||
Name string `json:"name"`
|
||||
Stages []stage `json:"stages"`
|
||||
}
|
||||
type manifest struct {
|
||||
Pipelines []pipeline `json:"pipelines"`
|
||||
}
|
||||
|
||||
assert := assert.New(t)
|
||||
distros := distroregistry.NewDefault()
|
||||
for _, distroName := range distros.List() {
|
||||
d := distros.GetDistro(distroName)
|
||||
for _, archName := range d.ListArches() {
|
||||
arch, err := d.GetArch(archName)
|
||||
assert.Nil(err)
|
||||
for _, imageTypeName := range arch.ListImageTypes() {
|
||||
t.Run(fmt.Sprintf("%s/%s/%s", distroName, archName, imageTypeName), func(t *testing.T) {
|
||||
imageType, err := arch.GetImageType(imageTypeName)
|
||||
assert.Nil(err)
|
||||
|
||||
// set up bare minimum args for image type
|
||||
var customizations *blueprint.Customizations
|
||||
if imageType.Name() == "edge-simplified-installer" {
|
||||
customizations = &blueprint.Customizations{
|
||||
InstallationDevice: "/dev/null",
|
||||
}
|
||||
}
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: customizations,
|
||||
}
|
||||
options := distro.ImageOptions{}
|
||||
// this repo's gpg keys should get included in the os
|
||||
// pipeline's rpm stage
|
||||
repos := []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "payload",
|
||||
BaseURLs: []string{"http://payload.example.com"},
|
||||
PackageSets: imageType.PayloadPackageSets(),
|
||||
GPGKeys: []string{"payload-gpg-key"},
|
||||
CheckGPG: common.ToPtr(true),
|
||||
},
|
||||
}
|
||||
seed := int64(0)
|
||||
|
||||
// Add ostree options for image types that require them
|
||||
options.OSTree = &ostree.ImageOptions{
|
||||
URL: "https://example.com",
|
||||
}
|
||||
|
||||
// Pipelines that require package sets will fail if none
|
||||
// are defined. OS pipelines require a kernel.
|
||||
// Add kernel and filesystem to every pipeline so that the
|
||||
// manifest creation doesn't fail.
|
||||
allPipelines := append(imageType.BuildPipelines(), imageType.PayloadPipelines()...)
|
||||
minimalPackageSet := []rpmmd.PackageSpec{
|
||||
{Name: "kernel", Checksum: "sha256:a0c936696eb7d5ee3192bf53b9d281cecbb40ca9db520de72cb95817ad92ac72"},
|
||||
{Name: "filesystem", Checksum: "sha256:6b4bf18ba28ccbdd49f2716c9f33c9211155ff703fa6c195c78a07bd160da0eb"},
|
||||
}
|
||||
|
||||
packageSets := make(map[string][]rpmmd.PackageSpec, len(allPipelines))
|
||||
for _, plName := range allPipelines {
|
||||
packageSets[plName] = minimalPackageSet
|
||||
}
|
||||
|
||||
m, _, err := imageType.Manifest(&bp, options, repos, seed)
|
||||
assert.NoError(err)
|
||||
|
||||
containers := make(map[string][]container.Spec, 0)
|
||||
|
||||
ostreeSources := m.GetOSTreeSourceSpecs()
|
||||
commits := make(map[string][]ostree.CommitSpec, len(ostreeSources))
|
||||
for name, commitSources := range ostreeSources {
|
||||
commitSpecs := make([]ostree.CommitSpec, len(commitSources))
|
||||
for idx, commitSource := range commitSources {
|
||||
commitSpecs[idx] = ostree.CommitSpec{
|
||||
Ref: commitSource.Ref,
|
||||
URL: commitSource.URL,
|
||||
Checksum: fmt.Sprintf("%x", sha256.Sum256([]byte(commitSource.URL+commitSource.Ref))),
|
||||
}
|
||||
}
|
||||
commits[name] = commitSpecs
|
||||
}
|
||||
mf, err := m.Serialize(packageSets, containers, commits)
|
||||
assert.NoError(err)
|
||||
pm := new(manifest)
|
||||
err = json.Unmarshal(mf, pm)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(len(allPipelines), len(pm.Pipelines))
|
||||
for idx := range pm.Pipelines {
|
||||
// manifest pipeline names should be identical to the ones
|
||||
// defined in the image type and in the same order
|
||||
assert.Equal(allPipelines[idx], pm.Pipelines[idx].Name)
|
||||
|
||||
if pm.Pipelines[idx].Name == "os" {
|
||||
rpmStagePresent := false
|
||||
for _, s := range pm.Pipelines[idx].Stages {
|
||||
if s.Type == "org.osbuild.rpm" {
|
||||
rpmStagePresent = true
|
||||
if imageTypeName != "azure-eap7-rhui" {
|
||||
// NOTE (akoutsou): Ideally, at some point we will
|
||||
// have a good way of reading what's supported by
|
||||
// each image type and we can skip or adapt tests
|
||||
// based on this information. For image types with
|
||||
// a preset workload, payload packages are ignored
|
||||
// and dropped and so are the payload
|
||||
// repo gpg keys.
|
||||
assert.Equal(repos[0].GPGKeys, s.Options.GPGKeys)
|
||||
}
|
||||
}
|
||||
}
|
||||
// make sure the gpg keys check was reached
|
||||
assert.True(rpmStagePresent)
|
||||
}
|
||||
}
|
||||
|
||||
// The last pipeline should match the export pipeline.
|
||||
// This might change in the future, but for now, let's make
|
||||
// sure they match.
|
||||
assert.Equal(imageType.Exports()[0], pm.Pipelines[len(pm.Pipelines)-1].Name)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure repositories are assigned to package sets properly.
|
||||
//
|
||||
// Each package set should include all the global repositories as well as any
|
||||
// pipeline/package-set specific repositories.
|
||||
func TestPipelineRepositories(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
type testCase struct {
|
||||
// Repo configs for pipeline generator
|
||||
repos []rpmmd.RepoConfig
|
||||
|
||||
// Expected result: map of pipelines to repo names (we only check names for the test).
|
||||
// Use the pipeline name * for global repos.
|
||||
result map[string][]stringSet
|
||||
}
|
||||
|
||||
testCases := map[string]testCase{
|
||||
"globalonly": { // only global repos: most common scenario
|
||||
repos: []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "global-1",
|
||||
BaseURLs: []string{"http://global-1.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "global-2",
|
||||
BaseURLs: []string{"http://global-2.example.com"},
|
||||
},
|
||||
},
|
||||
result: map[string][]stringSet{
|
||||
"*": {newStringSet([]string{"global-1", "global-2"})},
|
||||
},
|
||||
},
|
||||
"global+build": { // global repos with build-specific repos: secondary common scenario
|
||||
repos: []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "global-11",
|
||||
BaseURLs: []string{"http://global-11.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "global-12",
|
||||
BaseURLs: []string{"http://global-12.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "build-1",
|
||||
BaseURLs: []string{"http://build-1.example.com"},
|
||||
PackageSets: []string{"build"},
|
||||
},
|
||||
{
|
||||
Name: "build-2",
|
||||
BaseURLs: []string{"http://build-2.example.com"},
|
||||
PackageSets: []string{"build"},
|
||||
},
|
||||
},
|
||||
result: map[string][]stringSet{
|
||||
"*": {newStringSet([]string{"global-11", "global-12"})},
|
||||
"build": {newStringSet([]string{"build-1", "build-2"})},
|
||||
},
|
||||
},
|
||||
"global+os": { // global repos with os-specific repos
|
||||
repos: []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "global-21",
|
||||
BaseURLs: []string{"http://global-11.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "global-22",
|
||||
BaseURLs: []string{"http://global-12.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "os-1",
|
||||
BaseURLs: []string{"http://os-1.example.com"},
|
||||
PackageSets: []string{"os"},
|
||||
},
|
||||
{
|
||||
Name: "os-2",
|
||||
BaseURLs: []string{"http://os-2.example.com"},
|
||||
PackageSets: []string{"os"},
|
||||
},
|
||||
},
|
||||
result: map[string][]stringSet{
|
||||
"*": {newStringSet([]string{"global-21", "global-22"})},
|
||||
"os": {newStringSet([]string{"os-1", "os-2"}), newStringSet([]string{"os-1", "os-2"})},
|
||||
},
|
||||
},
|
||||
"global+os+payload": { // global repos with os-specific repos and (user-defined) payload repositories
|
||||
repos: []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "global-21",
|
||||
BaseURLs: []string{"http://global-11.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "global-22",
|
||||
BaseURLs: []string{"http://global-12.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "os-1",
|
||||
BaseURLs: []string{"http://os-1.example.com"},
|
||||
PackageSets: []string{"os"},
|
||||
},
|
||||
{
|
||||
Name: "os-2",
|
||||
BaseURLs: []string{"http://os-2.example.com"},
|
||||
PackageSets: []string{"os"},
|
||||
},
|
||||
{
|
||||
Name: "payload",
|
||||
BaseURLs: []string{"http://payload.example.com"},
|
||||
// User-defined payload repositories automatically get the "blueprint" key.
|
||||
// This is handled by the APIs.
|
||||
PackageSets: []string{"blueprint"},
|
||||
},
|
||||
},
|
||||
result: map[string][]stringSet{
|
||||
"*": {newStringSet([]string{"global-21", "global-22"})},
|
||||
"os": {
|
||||
// chain with payload repo only in the second set for the blueprint package depsolve
|
||||
newStringSet([]string{"os-1", "os-2"}),
|
||||
newStringSet([]string{"os-1", "os-2", "payload"})},
|
||||
},
|
||||
},
|
||||
"noglobal": { // no global repositories; only pipeline restricted ones (unrealistic but technically valid)
|
||||
repos: []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "build-1",
|
||||
BaseURLs: []string{"http://build-1.example.com"},
|
||||
PackageSets: []string{"build"},
|
||||
},
|
||||
{
|
||||
Name: "build-2",
|
||||
BaseURLs: []string{"http://build-2.example.com"},
|
||||
PackageSets: []string{"build"},
|
||||
},
|
||||
{
|
||||
Name: "os-1",
|
||||
BaseURLs: []string{"http://os-1.example.com"},
|
||||
PackageSets: []string{"os"},
|
||||
},
|
||||
{
|
||||
Name: "os-2",
|
||||
BaseURLs: []string{"http://os-2.example.com"},
|
||||
PackageSets: []string{"os"},
|
||||
},
|
||||
{
|
||||
Name: "anaconda-1",
|
||||
BaseURLs: []string{"http://anaconda-1.example.com"},
|
||||
PackageSets: []string{"anaconda-tree"},
|
||||
},
|
||||
{
|
||||
Name: "container-1",
|
||||
BaseURLs: []string{"http://container-1.example.com"},
|
||||
PackageSets: []string{"container-tree"},
|
||||
},
|
||||
{
|
||||
Name: "coi-1",
|
||||
BaseURLs: []string{"http://coi-1.example.com"},
|
||||
PackageSets: []string{"coi-tree"},
|
||||
},
|
||||
},
|
||||
result: map[string][]stringSet{
|
||||
"*": nil,
|
||||
"build": {newStringSet([]string{"build-1", "build-2"})},
|
||||
"os": {newStringSet([]string{"os-1", "os-2"}), newStringSet([]string{"os-1", "os-2"})},
|
||||
"anaconda-tree": {newStringSet([]string{"anaconda-1"})},
|
||||
"container-tree": {newStringSet([]string{"container-1"})},
|
||||
"coi-tree": {newStringSet([]string{"coi-1"})},
|
||||
},
|
||||
},
|
||||
"global+unknown": { // package set names that don't match a pipeline are ignored
|
||||
repos: []rpmmd.RepoConfig{
|
||||
{
|
||||
Name: "global-1",
|
||||
BaseURLs: []string{"http://global-1.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "global-2",
|
||||
BaseURLs: []string{"http://global-2.example.com"},
|
||||
},
|
||||
{
|
||||
Name: "custom-1",
|
||||
BaseURLs: []string{"http://custom.example.com"},
|
||||
PackageSets: []string{"notapipeline"},
|
||||
},
|
||||
},
|
||||
result: map[string][]stringSet{
|
||||
"*": {newStringSet([]string{"global-1", "global-2"})},
|
||||
},
|
||||
},
|
||||
"none": { // empty
|
||||
repos: []rpmmd.RepoConfig{},
|
||||
result: map[string][]stringSet{},
|
||||
},
|
||||
}
|
||||
|
||||
distros := distroregistry.NewDefault()
|
||||
for tName, tCase := range testCases {
|
||||
t.Run(tName, func(t *testing.T) {
|
||||
for _, distroName := range distros.List() {
|
||||
d := distros.GetDistro(distroName)
|
||||
for _, archName := range d.ListArches() {
|
||||
arch, err := d.GetArch(archName)
|
||||
require.Nil(err)
|
||||
for _, imageTypeName := range arch.ListImageTypes() {
|
||||
if imageTypeName == "azure-eap7-rhui" {
|
||||
// NOTE (akoutsou): Ideally, at some point we will
|
||||
// have a good way of reading what's supported by
|
||||
// each image type and we can skip or adapt tests
|
||||
// based on this information. For image types with
|
||||
// a preset workload, payload packages are ignored
|
||||
// and dropped.
|
||||
continue
|
||||
}
|
||||
t.Run(fmt.Sprintf("%s/%s/%s", distroName, archName, imageTypeName), func(t *testing.T) {
|
||||
imageType, err := arch.GetImageType(imageTypeName)
|
||||
require.Nil(err)
|
||||
|
||||
// set up bare minimum args for image type
|
||||
var customizations *blueprint.Customizations
|
||||
if imageType.Name() == "edge-simplified-installer" {
|
||||
customizations = &blueprint.Customizations{
|
||||
InstallationDevice: "/dev/null",
|
||||
}
|
||||
}
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: customizations,
|
||||
Packages: []blueprint.Package{
|
||||
{Name: "filesystem"},
|
||||
},
|
||||
}
|
||||
options := distro.ImageOptions{}
|
||||
|
||||
// Add ostree options for image types that require them
|
||||
options.OSTree = &ostree.ImageOptions{
|
||||
URL: "https://example.com",
|
||||
}
|
||||
|
||||
repos := tCase.repos
|
||||
manifest, _, err := imageType.Manifest(&bp, options, repos, 0)
|
||||
require.NoError(err)
|
||||
packageSets := manifest.GetPackageSetChains()
|
||||
|
||||
var globals stringSet
|
||||
if len(tCase.result["*"]) > 0 {
|
||||
globals = tCase.result["*"][0]
|
||||
}
|
||||
for psName, psChain := range packageSets {
|
||||
|
||||
expChain := tCase.result[psName]
|
||||
if len(expChain) > 0 {
|
||||
// if we specified an expected chain it should match the returned.
|
||||
if len(expChain) != len(psChain) {
|
||||
t.Fatalf("expected %d package sets in the %q chain; got %d", len(expChain), psName, len(psChain))
|
||||
}
|
||||
} else {
|
||||
// if we didn't, initialise to empty before merging globals
|
||||
expChain = make([]stringSet, len(psChain))
|
||||
}
|
||||
|
||||
for idx := range expChain {
|
||||
// merge the globals into each expected set
|
||||
expChain[idx] = expChain[idx].Merge(globals)
|
||||
}
|
||||
|
||||
for setIdx, set := range psChain {
|
||||
// collect repositories in the package set
|
||||
repoNamesSet := newStringSet(nil)
|
||||
for _, repo := range set.Repositories {
|
||||
repoNamesSet.Add(repo.Name)
|
||||
}
|
||||
|
||||
// expected set for current package set should be merged with globals
|
||||
expected := expChain[setIdx]
|
||||
if !repoNamesSet.Equals(expected) {
|
||||
t.Errorf("repos for package set %q [idx: %d] %s (distro %q image type %q) do not match expected %s", psName, setIdx, repoNamesSet, d.Name(), imageType.Name(), expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// a very basic implementation of a Set of strings
|
||||
type stringSet struct {
|
||||
elems map[string]bool
|
||||
}
|
||||
|
||||
func newStringSet(init []string) stringSet {
|
||||
s := stringSet{elems: make(map[string]bool)}
|
||||
for _, elem := range init {
|
||||
s.Add(elem)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s stringSet) String() string {
|
||||
elemSlice := make([]string, 0, len(s.elems))
|
||||
for elem := range s.elems {
|
||||
elemSlice = append(elemSlice, elem)
|
||||
}
|
||||
return "{" + strings.Join(elemSlice, ", ") + "}"
|
||||
}
|
||||
|
||||
func (s stringSet) Add(elem string) {
|
||||
s.elems[elem] = true
|
||||
}
|
||||
|
||||
func (s stringSet) Contains(elem string) bool {
|
||||
return s.elems[elem]
|
||||
}
|
||||
|
||||
func (s stringSet) Equals(other stringSet) bool {
|
||||
if len(s.elems) != len(other.elems) {
|
||||
return false
|
||||
}
|
||||
|
||||
for elem := range s.elems {
|
||||
if !other.Contains(elem) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s stringSet) Merge(other stringSet) stringSet {
|
||||
merged := newStringSet(nil)
|
||||
for elem := range s.elems {
|
||||
merged.Add(elem)
|
||||
}
|
||||
for elem := range other.elems {
|
||||
merged.Add(elem)
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
|
@ -1,815 +0,0 @@
|
|||
package fedora_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/fedora"
|
||||
)
|
||||
|
||||
type fedoraFamilyDistro struct {
|
||||
name string
|
||||
distro distro.Distro
|
||||
}
|
||||
|
||||
var fedoraFamilyDistros = []fedoraFamilyDistro{
|
||||
{
|
||||
name: "fedora",
|
||||
distro: fedora.NewF37(),
|
||||
},
|
||||
{
|
||||
name: "fedora",
|
||||
distro: fedora.NewF38(),
|
||||
},
|
||||
{
|
||||
name: "fedora",
|
||||
distro: fedora.NewF39(),
|
||||
},
|
||||
}
|
||||
|
||||
func TestFilenameFromType(t *testing.T) {
|
||||
type args struct {
|
||||
outputFormat string
|
||||
}
|
||||
type wantResult struct {
|
||||
filename string
|
||||
mimeType string
|
||||
wantErr bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "ami",
|
||||
args: args{"ami"},
|
||||
want: wantResult{
|
||||
filename: "image.raw",
|
||||
mimeType: "application/octet-stream",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "qcow2",
|
||||
args: args{"qcow2"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "openstack",
|
||||
args: args{"openstack"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vhd",
|
||||
args: args{"vhd"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd",
|
||||
mimeType: "application/x-vhd",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vmdk",
|
||||
args: args{"vmdk"},
|
||||
want: wantResult{
|
||||
filename: "disk.vmdk",
|
||||
mimeType: "application/x-vmdk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ova",
|
||||
args: args{"ova"},
|
||||
want: wantResult{
|
||||
filename: "image.ova",
|
||||
mimeType: "application/ovf",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "container",
|
||||
args: args{"container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "iot-commit",
|
||||
args: args{"iot-commit"},
|
||||
want: wantResult{
|
||||
filename: "commit.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{ // Alias
|
||||
name: "fedora-iot-commit",
|
||||
args: args{"fedora-iot-commit"},
|
||||
want: wantResult{
|
||||
filename: "commit.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "iot-container",
|
||||
args: args{"iot-container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{ // Alias
|
||||
name: "fedora-iot-container",
|
||||
args: args{"fedora-iot-container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "iot-installer",
|
||||
args: args{"iot-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{ // Alias
|
||||
name: "fedora-iot-installer",
|
||||
args: args{"fedora-iot-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "live-installer",
|
||||
args: args{"live-installer"},
|
||||
want: wantResult{
|
||||
filename: "live-installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "image-installer",
|
||||
args: args{"image-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{ // Alias
|
||||
name: "fedora-image-installer",
|
||||
args: args{"fedora-image-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-output-type",
|
||||
args: args{"foobar"},
|
||||
want: wantResult{wantErr: true},
|
||||
},
|
||||
{
|
||||
name: "minimal-raw",
|
||||
args: args{"minimal-raw"},
|
||||
want: wantResult{
|
||||
filename: "raw.img",
|
||||
mimeType: "application/disk",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, dist := range fedoraFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
arch, _ := dist.GetArch("x86_64")
|
||||
imgType, err := arch.GetImageType(tt.args.outputFormat)
|
||||
if (err != nil) != tt.want.wantErr {
|
||||
t.Errorf("Arch.GetImageType() error = %v, wantErr %v", err, tt.want.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.want.wantErr {
|
||||
gotFilename := imgType.Filename()
|
||||
gotMIMEType := imgType.MIMEType()
|
||||
if gotFilename != tt.want.filename {
|
||||
t.Errorf("ImageType.Filename() got = %v, want %v", gotFilename, tt.want.filename)
|
||||
}
|
||||
if gotMIMEType != tt.want.mimeType {
|
||||
t.Errorf("ImageType.MIMEType() got1 = %v, want %v", gotMIMEType, tt.want.mimeType)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_BuildPackages(t *testing.T) {
|
||||
x8664BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"policycoreutils",
|
||||
"qemu-img",
|
||||
"selinux-policy-targeted",
|
||||
"systemd",
|
||||
"tar",
|
||||
"xz",
|
||||
"grub2-pc",
|
||||
}
|
||||
aarch64BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"policycoreutils",
|
||||
"qemu-img",
|
||||
"selinux-policy-targeted",
|
||||
"systemd",
|
||||
"tar",
|
||||
"xz",
|
||||
}
|
||||
buildPackages := map[string][]string{
|
||||
"x86_64": x8664BuildPackages,
|
||||
"aarch64": aarch64BuildPackages,
|
||||
}
|
||||
for _, dist := range fedoraFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
d := dist.distro
|
||||
for _, archLabel := range d.ListArches() {
|
||||
archStruct, err := d.GetArch(archLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
for _, itLabel := range archStruct.ListImageTypes() {
|
||||
itStruct, err := archStruct.GetImageType(itLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
manifest, _, err := itStruct.Manifest(&blueprint.Blueprint{}, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
buildPkgs := manifest.GetPackageSetChains()["build"]
|
||||
assert.NotNil(t, buildPkgs)
|
||||
assert.Len(t, buildPkgs, 1)
|
||||
assert.ElementsMatch(t, buildPackages[archLabel], buildPkgs[0].Include)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_Name(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"vmdk",
|
||||
"ova",
|
||||
"ami",
|
||||
"iot-commit",
|
||||
"iot-container",
|
||||
"iot-installer",
|
||||
"iot-raw-image",
|
||||
"oci",
|
||||
"image-installer",
|
||||
"live-installer",
|
||||
"minimal-raw",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "aarch64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"ami",
|
||||
"oci",
|
||||
"iot-commit",
|
||||
"iot-container",
|
||||
"iot-installer",
|
||||
"iot-raw-image",
|
||||
"image-installer",
|
||||
"minimal-raw",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range fedoraFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
if mapping.arch == "s390x" {
|
||||
continue
|
||||
}
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
if assert.NoError(t, err) {
|
||||
for _, imgName := range mapping.imgNames {
|
||||
if imgName == "iot-commit" {
|
||||
continue
|
||||
}
|
||||
imgType, err := arch.GetImageType(imgName)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equalf(t, imgName, imgType.Name(), "arch: %s", mapping.arch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageTypeAliases(t *testing.T) {
|
||||
type args struct {
|
||||
imageTypeAliases []string
|
||||
}
|
||||
type wantResult struct {
|
||||
imageTypeName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "iot-commit aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"fedora-iot-commit"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "iot-commit",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "iot-container aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"fedora-iot-container"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "iot-container",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "iot-installer aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"fedora-iot-installer"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "iot-installer",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, dist := range fedoraFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
for _, archName := range dist.ListArches() {
|
||||
t.Run(archName, func(t *testing.T) {
|
||||
arch, err := dist.GetArch(archName)
|
||||
require.Nilf(t, err,
|
||||
"failed to get architecture '%s', previously listed as supported for the distro '%s'",
|
||||
archName, dist.Name())
|
||||
// Test image type aliases only if the aliased image type is supported for the arch
|
||||
if _, err = arch.GetImageType(tt.want.imageTypeName); err != nil {
|
||||
t.Skipf("aliased image type '%s' is not supported for architecture '%s'",
|
||||
tt.want.imageTypeName, archName)
|
||||
}
|
||||
for _, alias := range tt.args.imageTypeAliases {
|
||||
t.Run(fmt.Sprintf("'%s' alias for image type '%s'", alias, tt.want.imageTypeName),
|
||||
func(t *testing.T) {
|
||||
gotImage, err := arch.GetImageType(alias)
|
||||
require.Nilf(t, err, "arch.GetImageType() for image type alias '%s' failed: %v",
|
||||
alias, err)
|
||||
assert.Equalf(t, tt.want.imageTypeName, gotImage.Name(),
|
||||
"got unexpected image type name for alias '%s'. got = %s, want = %s",
|
||||
alias, tt.want.imageTypeName, gotImage.Name())
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check that Manifest() function returns an error for unsupported
|
||||
// configurations.
|
||||
func TestDistro_ManifestError(t *testing.T) {
|
||||
// Currently, the only unsupported configuration is OSTree commit types
|
||||
// with Kernel boot options
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Kernel: &blueprint.KernelCustomization{
|
||||
Append: "debug",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
imgOpts := distro.ImageOptions{
|
||||
Size: imgType.Size(0),
|
||||
}
|
||||
_, _, err := imgType.Manifest(&bp, imgOpts, nil, 0)
|
||||
if imgTypeName == "iot-commit" || imgTypeName == "iot-container" {
|
||||
assert.EqualError(t, err, "kernel boot parameter customizations are not supported for ostree types")
|
||||
} else if imgTypeName == "iot-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("boot ISO image type \"%s\" requires specifying a URL from which to retrieve the OSTree commit", imgTypeName))
|
||||
} else if imgTypeName == "image-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: User, Group)", imgTypeName))
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else if imgTypeName == "iot-raw-image" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for image type %q: (allowed: User, Group, Directories, Files, Services)", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchitecture_ListImageTypes(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
fedoraAdditionalImageTypes []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"vmdk",
|
||||
"ova",
|
||||
"ami",
|
||||
"iot-commit",
|
||||
"iot-container",
|
||||
"iot-installer",
|
||||
"iot-raw-image",
|
||||
"oci",
|
||||
"container",
|
||||
"image-installer",
|
||||
"live-installer",
|
||||
"minimal-raw",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "aarch64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"ami",
|
||||
"iot-commit",
|
||||
"iot-container",
|
||||
"iot-installer",
|
||||
"iot-raw-image",
|
||||
"oci",
|
||||
"container",
|
||||
"image-installer",
|
||||
"live-installer",
|
||||
"minimal-raw",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range fedoraFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
require.NoError(t, err)
|
||||
imageTypes := arch.ListImageTypes()
|
||||
|
||||
var expectedImageTypes []string
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.imgNames...)
|
||||
if dist.name == "fedora" {
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.fedoraAdditionalImageTypes...)
|
||||
}
|
||||
|
||||
require.ElementsMatch(t, expectedImageTypes, imageTypes)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFedora_ListArches(t *testing.T) {
|
||||
arches := fedora.NewF37().ListArches()
|
||||
assert.Equal(t, []string{"aarch64", "x86_64"}, arches)
|
||||
}
|
||||
|
||||
func TestFedora37_GetArch(t *testing.T) {
|
||||
arches := []struct {
|
||||
name string
|
||||
errorExpected bool
|
||||
errorExpectedInCentos bool
|
||||
}{
|
||||
{
|
||||
name: "x86_64",
|
||||
},
|
||||
{
|
||||
name: "aarch64",
|
||||
},
|
||||
{
|
||||
name: "s390x",
|
||||
errorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "ppc64le",
|
||||
errorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "foo-arch",
|
||||
errorExpected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range fedoraFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, a := range arches {
|
||||
actualArch, err := dist.distro.GetArch(a.name)
|
||||
if a.errorExpected {
|
||||
assert.Nil(t, actualArch)
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.Equal(t, a.name, actualArch.Name())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFedora37_Name(t *testing.T) {
|
||||
distro := fedora.NewF37()
|
||||
assert.Equal(t, "fedora-37", distro.Name())
|
||||
}
|
||||
|
||||
func TestFedora37_KernelOption(t *testing.T) {
|
||||
distro_test_common.TestDistro_KernelOption(t, fedora.NewF37())
|
||||
}
|
||||
|
||||
func TestFedora_OSTreeOptions(t *testing.T) {
|
||||
distro_test_common.TestDistro_OSTreeOptions(t, fedora.NewF37())
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemManifestError(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/etc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "iot-commit" || imgTypeName == "iot-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "iot-raw-image" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for image type %q: (allowed: User, Group, Directories, Files, Services)", imgTypeName))
|
||||
} else if imgTypeName == "iot-installer" || imgTypeName == "image-installer" {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/etc\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_TestRootMountPoint(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "iot-commit" || imgTypeName == "iot-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "iot-raw-image" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for image type %q: (allowed: User, Group, Directories, Files, Services)", imgTypeName))
|
||||
} else if imgTypeName == "iot-installer" || imgTypeName == "image-installer" {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemSubDirectories(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if strings.HasPrefix(imgTypeName, "iot-") || strings.HasPrefix(imgTypeName, "image-") {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_MountpointsWithArbitraryDepthAllowed(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c/d",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if strings.HasPrefix(imgTypeName, "iot-") || strings.HasPrefix(imgTypeName, "image-") {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_DirtyMountpointsNotAllowed(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//log/audit/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if strings.HasPrefix(imgTypeName, "iot-") || strings.HasPrefix(imgTypeName, "image-") {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"//\" \"/var//\" \"/var//log/audit/\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemPatternMatching(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "iot-commit" || imgTypeName == "iot-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "iot-raw-image" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for image type %q: (allowed: User, Group, Directories, Files, Services)", imgTypeName))
|
||||
} else if imgTypeName == "iot-installer" || imgTypeName == "image-installer" {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/variable\" \"/variable/log/audit\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomUsrPartitionNotLargeEnough(t *testing.T) {
|
||||
fedoraDistro := fedora.NewF37()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/usr",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range fedoraDistro.ListArches() {
|
||||
arch, _ := fedoraDistro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "iot-commit" || imgTypeName == "iot-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "iot-raw-image" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for image type %q: (allowed: User, Group, Directories, Files, Services)", imgTypeName))
|
||||
} else if imgTypeName == "iot-installer" || imgTypeName == "image-installer" {
|
||||
continue
|
||||
} else if imgTypeName == "live-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("unsupported blueprint customizations found for boot ISO image type \"%s\": (allowed: None)", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,218 +0,0 @@
|
|||
package distro
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
)
|
||||
|
||||
func TestImageConfigInheritFrom(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
distroConfig *ImageConfig
|
||||
imageConfig *ImageConfig
|
||||
expectedConfig *ImageConfig
|
||||
}{
|
||||
{
|
||||
name: "inheritance with overridden values",
|
||||
distroConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
Sysconfig: []*osbuild.SysconfigStageOptions{
|
||||
{
|
||||
Kernel: &osbuild.SysconfigKernelOptions{
|
||||
UpdateDefault: true,
|
||||
DefaultKernel: "kernel",
|
||||
},
|
||||
Network: &osbuild.SysconfigNetworkOptions{
|
||||
Networking: true,
|
||||
NoZeroConf: true,
|
||||
},
|
||||
NetworkScripts: &osbuild.NetworkScriptsOptions{
|
||||
IfcfgFiles: map[string]osbuild.IfcfgFile{
|
||||
"eth0": {
|
||||
Device: "eth0",
|
||||
Bootproto: osbuild.IfcfgBootprotoDHCP,
|
||||
OnBoot: common.ToPtr(true),
|
||||
Type: osbuild.IfcfgTypeEthernet,
|
||||
UserCtl: common.ToPtr(true),
|
||||
PeerDNS: common.ToPtr(true),
|
||||
IPv6Init: common.ToPtr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
imageConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("UTC"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{
|
||||
{
|
||||
Hostname: "169.254.169.123",
|
||||
Prefer: common.ToPtr(true),
|
||||
Iburst: common.ToPtr(true),
|
||||
Minpoll: common.ToPtr(4),
|
||||
Maxpoll: common.ToPtr(4),
|
||||
},
|
||||
},
|
||||
LeapsecTz: common.ToPtr(""),
|
||||
},
|
||||
},
|
||||
expectedConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("UTC"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{
|
||||
{
|
||||
Hostname: "169.254.169.123",
|
||||
Prefer: common.ToPtr(true),
|
||||
Iburst: common.ToPtr(true),
|
||||
Minpoll: common.ToPtr(4),
|
||||
Maxpoll: common.ToPtr(4),
|
||||
},
|
||||
},
|
||||
LeapsecTz: common.ToPtr(""),
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
Sysconfig: []*osbuild.SysconfigStageOptions{
|
||||
{
|
||||
Kernel: &osbuild.SysconfigKernelOptions{
|
||||
UpdateDefault: true,
|
||||
DefaultKernel: "kernel",
|
||||
},
|
||||
Network: &osbuild.SysconfigNetworkOptions{
|
||||
Networking: true,
|
||||
NoZeroConf: true,
|
||||
},
|
||||
NetworkScripts: &osbuild.NetworkScriptsOptions{
|
||||
IfcfgFiles: map[string]osbuild.IfcfgFile{
|
||||
"eth0": {
|
||||
Device: "eth0",
|
||||
Bootproto: osbuild.IfcfgBootprotoDHCP,
|
||||
OnBoot: common.ToPtr(true),
|
||||
Type: osbuild.IfcfgTypeEthernet,
|
||||
UserCtl: common.ToPtr(true),
|
||||
PeerDNS: common.ToPtr(true),
|
||||
IPv6Init: common.ToPtr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty image type configuration",
|
||||
distroConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
},
|
||||
imageConfig: &ImageConfig{},
|
||||
expectedConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty distro configuration",
|
||||
distroConfig: &ImageConfig{},
|
||||
imageConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
},
|
||||
expectedConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty distro configuration",
|
||||
distroConfig: nil,
|
||||
imageConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
},
|
||||
expectedConfig: &ImageConfig{
|
||||
Timezone: common.ToPtr("America/New_York"),
|
||||
TimeSynchronization: &osbuild.ChronyStageOptions{
|
||||
Servers: []osbuild.ChronyConfigServer{{Hostname: "127.0.0.1"}},
|
||||
},
|
||||
Locale: common.ToPtr("en_US.UTF-8"),
|
||||
Keyboard: &osbuild.KeymapStageOptions{
|
||||
Keymap: "us",
|
||||
},
|
||||
EnabledServices: []string{"sshd"},
|
||||
DisabledServices: []string{"named"},
|
||||
DefaultTarget: common.ToPtr("multi-user.target"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.expectedConfig, tt.imageConfig.InheritFrom(tt.distroConfig), "test case %q failed (idx %d)", tt.name, idx)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,452 +0,0 @@
|
|||
package rhel7_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/rhel7"
|
||||
)
|
||||
|
||||
type rhelFamilyDistro struct {
|
||||
name string
|
||||
distro distro.Distro
|
||||
}
|
||||
|
||||
var rhelFamilyDistros = []rhelFamilyDistro{
|
||||
{
|
||||
name: "rhel",
|
||||
distro: rhel7.New(),
|
||||
},
|
||||
}
|
||||
|
||||
func TestFilenameFromType(t *testing.T) {
|
||||
type args struct {
|
||||
outputFormat string
|
||||
}
|
||||
type wantResult struct {
|
||||
filename string
|
||||
mimeType string
|
||||
wantErr bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "qcow2",
|
||||
args: args{"qcow2"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "azure-rhui",
|
||||
args: args{"azure-rhui"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-output-type",
|
||||
args: args{"foobar"},
|
||||
want: wantResult{wantErr: true},
|
||||
},
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
arch, _ := dist.GetArch("x86_64")
|
||||
imgType, err := arch.GetImageType(tt.args.outputFormat)
|
||||
if (err != nil) != tt.want.wantErr {
|
||||
t.Errorf("Arch.GetImageType() error = %v, wantErr %v", err, tt.want.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.want.wantErr {
|
||||
gotFilename := imgType.Filename()
|
||||
gotMIMEType := imgType.MIMEType()
|
||||
if gotFilename != tt.want.filename {
|
||||
t.Errorf("ImageType.Filename() got = %v, want %v", gotFilename, tt.want.filename)
|
||||
}
|
||||
if gotMIMEType != tt.want.mimeType {
|
||||
t.Errorf("ImageType.MIMEType() got1 = %v, want %v", gotMIMEType, tt.want.mimeType)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_BuildPackages(t *testing.T) {
|
||||
x8664BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"grub2-efi-x64",
|
||||
"grub2-pc",
|
||||
"policycoreutils",
|
||||
"shim-x64",
|
||||
"systemd",
|
||||
"tar",
|
||||
"qemu-img",
|
||||
"xz",
|
||||
}
|
||||
buildPackages := map[string][]string{
|
||||
"x86_64": x8664BuildPackages,
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
d := dist.distro
|
||||
for _, archLabel := range d.ListArches() {
|
||||
archStruct, err := d.GetArch(archLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
for _, itLabel := range archStruct.ListImageTypes() {
|
||||
itStruct, err := archStruct.GetImageType(itLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
manifest, _, err := itStruct.Manifest(&blueprint.Blueprint{}, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
buildPkgs := manifest.GetPackageSetChains()["build"]
|
||||
assert.NotNil(t, buildPkgs)
|
||||
assert.Len(t, buildPkgs, 1)
|
||||
assert.ElementsMatch(t, buildPackages[archLabel], buildPkgs[0].Include)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_Name(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"azure-rhui",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
if assert.NoError(t, err) {
|
||||
for _, imgName := range mapping.imgNames {
|
||||
imgType, err := arch.GetImageType(imgName)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equalf(t, imgName, imgType.Name(), "arch: %s", mapping.arch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check that Manifest() function returns an error for unsupported
|
||||
// configurations.
|
||||
func TestDistro_ManifestError(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Kernel: &blueprint.KernelCustomization{
|
||||
Append: "debug",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
imgOpts := distro.ImageOptions{
|
||||
Size: imgType.Size(0),
|
||||
}
|
||||
_, _, err := imgType.Manifest(&bp, imgOpts, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchitecture_ListImageTypes(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
rhelAdditionalImageTypes []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"azure-rhui",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
require.NoError(t, err)
|
||||
imageTypes := arch.ListImageTypes()
|
||||
|
||||
var expectedImageTypes []string
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.imgNames...)
|
||||
if dist.name == "rhel" {
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.rhelAdditionalImageTypes...)
|
||||
}
|
||||
|
||||
require.ElementsMatch(t, expectedImageTypes, imageTypes)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRhel7_ListArches(t *testing.T) {
|
||||
arches := rhel7.New().ListArches()
|
||||
assert.Equal(t, []string{"x86_64"}, arches)
|
||||
}
|
||||
|
||||
func TestRhel7_GetArch(t *testing.T) {
|
||||
arches := []struct {
|
||||
name string
|
||||
errorExpected bool
|
||||
errorExpectedInCentos bool
|
||||
}{
|
||||
{
|
||||
name: "x86_64",
|
||||
},
|
||||
{
|
||||
name: "foo-arch",
|
||||
errorExpected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, a := range arches {
|
||||
actualArch, err := dist.distro.GetArch(a.name)
|
||||
if a.errorExpected || (a.errorExpectedInCentos && dist.name == "centos") {
|
||||
assert.Nil(t, actualArch)
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.Equal(t, a.name, actualArch.Name())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRhel7_Name(t *testing.T) {
|
||||
distro := rhel7.New()
|
||||
assert.Equal(t, "rhel-7", distro.Name())
|
||||
}
|
||||
|
||||
func TestRhel7_ModulePlatformID(t *testing.T) {
|
||||
distro := rhel7.New()
|
||||
assert.Equal(t, "platform:el7", distro.ModulePlatformID())
|
||||
}
|
||||
|
||||
func TestRhel7_KernelOption(t *testing.T) {
|
||||
distro_test_common.TestDistro_KernelOption(t, rhel7.New())
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemManifestError(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/etc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/etc\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_TestRootMountPoint(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemSubDirectories(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_MountpointsWithArbitraryDepthAllowed(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c/d",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_DirtyMountpointsNotAllowed(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//log/audit/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"//\" \"/var//\" \"/var//log/audit/\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemPatternMatching(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/variable\" \"/variable/log/audit\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomUsrPartitionNotLargeEnough(t *testing.T) {
|
||||
r7distro := rhel7.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/usr",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r7distro.ListArches() {
|
||||
arch, _ := r7distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
package rhel8
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testBasicImageType = imageType{
|
||||
name: "test",
|
||||
basePartitionTables: defaultBasePartitionTables,
|
||||
}
|
||||
|
||||
var testEc2ImageType = imageType{
|
||||
name: "test_ec2",
|
||||
basePartitionTables: ec2BasePartitionTables,
|
||||
}
|
||||
|
||||
var mountpoints = []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/usr",
|
||||
},
|
||||
}
|
||||
|
||||
// math/rand is good enough in this case
|
||||
/* #nosec G404 */
|
||||
var rng = rand.New(rand.NewSource(0))
|
||||
|
||||
func TestDistro_UnsupportedArch(t *testing.T) {
|
||||
testBasicImageType.arch = &architecture{
|
||||
name: "unsupported_arch",
|
||||
}
|
||||
_, err := testBasicImageType.getPartitionTable(mountpoints, distro.ImageOptions{}, rng)
|
||||
require.EqualError(t, err, fmt.Sprintf("no partition table defined for architecture %q for image type %q", testBasicImageType.arch.name, testBasicImageType.name))
|
||||
}
|
||||
|
||||
func TestDistro_DefaultPartitionTables(t *testing.T) {
|
||||
rhel8distro := New()
|
||||
for _, archName := range rhel8distro.ListArches() {
|
||||
testBasicImageType.arch = &architecture{
|
||||
name: archName,
|
||||
}
|
||||
pt, err := testBasicImageType.getPartitionTable(mountpoints, distro.ImageOptions{}, rng)
|
||||
require.Nil(t, err)
|
||||
for _, m := range mountpoints {
|
||||
assert.True(t, pt.ContainsMountpoint(m.Mountpoint))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_Ec2PartitionTables(t *testing.T) {
|
||||
rhel8distro := New()
|
||||
for _, archName := range rhel8distro.ListArches() {
|
||||
testEc2ImageType.arch = &architecture{
|
||||
name: archName,
|
||||
}
|
||||
pt, err := testEc2ImageType.getPartitionTable(mountpoints, distro.ImageOptions{}, rng)
|
||||
if _, exists := testEc2ImageType.basePartitionTables[archName]; exists {
|
||||
require.Nil(t, err)
|
||||
for _, m := range mountpoints {
|
||||
assert.True(t, pt.ContainsMountpoint(m.Mountpoint))
|
||||
}
|
||||
} else {
|
||||
require.EqualError(t, err, fmt.Sprintf("no partition table defined for architecture %q for image type %q", testEc2ImageType.arch.name, testEc2ImageType.name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,907 +0,0 @@
|
|||
package rhel8_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/rhel8"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
)
|
||||
|
||||
type rhelFamilyDistro struct {
|
||||
name string
|
||||
distro distro.Distro
|
||||
}
|
||||
|
||||
var rhelFamilyDistros = []rhelFamilyDistro{
|
||||
{
|
||||
name: "rhel",
|
||||
distro: rhel8.New(),
|
||||
},
|
||||
}
|
||||
|
||||
func TestFilenameFromType(t *testing.T) {
|
||||
type args struct {
|
||||
outputFormat string
|
||||
}
|
||||
type wantResult struct {
|
||||
filename string
|
||||
mimeType string
|
||||
wantErr bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "ami",
|
||||
args: args{"ami"},
|
||||
want: wantResult{
|
||||
filename: "image.raw",
|
||||
mimeType: "application/octet-stream",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ec2",
|
||||
args: args{"ec2"},
|
||||
want: wantResult{
|
||||
filename: "image.raw.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ec2-ha",
|
||||
args: args{"ec2-ha"},
|
||||
want: wantResult{
|
||||
filename: "image.raw.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ec2-sap",
|
||||
args: args{"ec2-sap"},
|
||||
want: wantResult{
|
||||
filename: "image.raw.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "qcow2",
|
||||
args: args{"qcow2"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "openstack",
|
||||
args: args{"openstack"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vhd",
|
||||
args: args{"vhd"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd",
|
||||
mimeType: "application/x-vhd",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "azure-rhui",
|
||||
args: args{"azure-rhui"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "azure-sap-rhui",
|
||||
args: args{"azure-sap-rhui"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vmdk",
|
||||
args: args{"vmdk"},
|
||||
want: wantResult{
|
||||
filename: "disk.vmdk",
|
||||
mimeType: "application/x-vmdk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ova",
|
||||
args: args{"ova"},
|
||||
want: wantResult{
|
||||
filename: "image.ova",
|
||||
mimeType: "application/ovf",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tar",
|
||||
args: args{"tar"},
|
||||
want: wantResult{
|
||||
filename: "root.tar.xz",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "image-installer",
|
||||
args: args{"image-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-commit",
|
||||
args: args{"edge-commit"},
|
||||
want: wantResult{
|
||||
filename: "commit.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
// Alias
|
||||
{
|
||||
name: "rhel-edge-commit",
|
||||
args: args{"rhel-edge-commit"},
|
||||
want: wantResult{
|
||||
filename: "commit.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-container",
|
||||
args: args{"edge-container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
// Alias
|
||||
{
|
||||
name: "rhel-edge-container",
|
||||
args: args{"rhel-edge-container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-installer",
|
||||
args: args{"edge-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
// Alias
|
||||
{
|
||||
name: "rhel-edge-installer",
|
||||
args: args{"rhel-edge-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gce",
|
||||
args: args{"gce"},
|
||||
want: wantResult{
|
||||
filename: "image.tar.gz",
|
||||
mimeType: "application/gzip",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gce-rhui",
|
||||
args: args{"gce-rhui"},
|
||||
want: wantResult{
|
||||
filename: "image.tar.gz",
|
||||
mimeType: "application/gzip",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-output-type",
|
||||
args: args{"foobar"},
|
||||
want: wantResult{wantErr: true},
|
||||
},
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
arch, _ := dist.GetArch("x86_64")
|
||||
imgType, err := arch.GetImageType(tt.args.outputFormat)
|
||||
if (err != nil) != tt.want.wantErr {
|
||||
t.Errorf("Arch.GetImageType() error = %v, wantErr %v", err, tt.want.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.want.wantErr {
|
||||
gotFilename := imgType.Filename()
|
||||
gotMIMEType := imgType.MIMEType()
|
||||
if gotFilename != tt.want.filename {
|
||||
t.Errorf("ImageType.Filename() got = %v, want %v", gotFilename, tt.want.filename)
|
||||
}
|
||||
if gotMIMEType != tt.want.mimeType {
|
||||
t.Errorf("ImageType.MIMEType() got1 = %v, want %v", gotMIMEType, tt.want.mimeType)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_BuildPackages(t *testing.T) {
|
||||
x8664BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"grub2-efi-x64",
|
||||
"grub2-pc",
|
||||
"policycoreutils",
|
||||
"shim-x64",
|
||||
"systemd",
|
||||
"tar",
|
||||
"qemu-img",
|
||||
"xz",
|
||||
}
|
||||
aarch64BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"policycoreutils",
|
||||
"qemu-img",
|
||||
"systemd",
|
||||
"tar",
|
||||
"xz",
|
||||
}
|
||||
buildPackages := map[string][]string{
|
||||
"x86_64": x8664BuildPackages,
|
||||
"aarch64": aarch64BuildPackages,
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
d := dist.distro
|
||||
for _, archLabel := range d.ListArches() {
|
||||
archStruct, err := d.GetArch(archLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
for _, itLabel := range archStruct.ListImageTypes() {
|
||||
itStruct, err := archStruct.GetImageType(itLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
manifest, _, err := itStruct.Manifest(&blueprint.Blueprint{}, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
buildPkgs := manifest.GetPackageSetChains()["build"]
|
||||
assert.NotNil(t, buildPkgs)
|
||||
assert.Len(t, buildPkgs, 1)
|
||||
assert.ElementsMatch(t, buildPackages[archLabel], buildPkgs[0].Include)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_Name(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
"azure-sap-rhui",
|
||||
"azure-eap7-rhui",
|
||||
"vmdk",
|
||||
"ova",
|
||||
"ami",
|
||||
"ec2",
|
||||
"ec2-ha",
|
||||
"ec2-sap",
|
||||
"gce",
|
||||
"gce-rhui",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"edge-installer",
|
||||
"tar",
|
||||
"image-installer",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "aarch64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
"ami",
|
||||
"ec2",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "ppc64le",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "s390x",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
if mapping.arch == platform.ARCH_S390X.String() && dist.name == "centos" {
|
||||
continue
|
||||
}
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
if assert.NoError(t, err) {
|
||||
for _, imgName := range mapping.imgNames {
|
||||
if imgName == "edge-commit" && dist.name == "centos" {
|
||||
continue
|
||||
}
|
||||
imgType, err := arch.GetImageType(imgName)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equalf(t, imgName, imgType.Name(), "arch: %s", mapping.arch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageTypeAliases(t *testing.T) {
|
||||
type args struct {
|
||||
imageTypeAliases []string
|
||||
}
|
||||
type wantResult struct {
|
||||
imageTypeName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "edge-commit aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"rhel-edge-commit"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "edge-commit",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-container aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"rhel-edge-container"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "edge-container",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-installer aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"rhel-edge-installer"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "edge-installer",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
for _, archName := range dist.ListArches() {
|
||||
t.Run(archName, func(t *testing.T) {
|
||||
arch, err := dist.GetArch(archName)
|
||||
require.Nilf(t, err,
|
||||
"failed to get architecture '%s', previously listed as supported for the distro '%s'",
|
||||
archName, dist.Name())
|
||||
// Test image type aliases only if the aliased image type is supported for the arch
|
||||
if _, err = arch.GetImageType(tt.want.imageTypeName); err != nil {
|
||||
t.Skipf("aliased image type '%s' is not supported for architecture '%s'",
|
||||
tt.want.imageTypeName, archName)
|
||||
}
|
||||
for _, alias := range tt.args.imageTypeAliases {
|
||||
t.Run(fmt.Sprintf("'%s' alias for image type '%s'", alias, tt.want.imageTypeName),
|
||||
func(t *testing.T) {
|
||||
gotImage, err := arch.GetImageType(alias)
|
||||
require.Nilf(t, err, "arch.GetImageType() for image type alias '%s' failed: %v",
|
||||
alias, err)
|
||||
assert.Equalf(t, tt.want.imageTypeName, gotImage.Name(),
|
||||
"got unexpected image type name for alias '%s'. got = %s, want = %s",
|
||||
alias, tt.want.imageTypeName, gotImage.Name())
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check that Manifest() function returns an error for unsupported
|
||||
// configurations.
|
||||
func TestDistro_ManifestError(t *testing.T) {
|
||||
// Currently, the only unsupported configuration is OSTree commit types
|
||||
// with Kernel boot options
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Kernel: &blueprint.KernelCustomization{
|
||||
Append: "debug",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
imgOpts := distro.ImageOptions{
|
||||
Size: imgType.Size(0),
|
||||
}
|
||||
_, _, err := imgType.Manifest(&bp, imgOpts, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "kernel boot parameter customizations are not supported for ostree types")
|
||||
} else if imgTypeName == "edge-raw-image" {
|
||||
assert.EqualError(t, err, "edge raw images require specifying a URL from which to retrieve the OSTree commit")
|
||||
} else if imgTypeName == "edge-installer" || imgTypeName == "edge-simplified-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("boot ISO image type \"%s\" requires specifying a URL from which to retrieve the OSTree commit", imgTypeName))
|
||||
} else if imgTypeName == "azure-eap7-rhui" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("image type \"%s\" does not support customizations", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchitecture_ListImageTypes(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
rhelAdditionalImageTypes []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
"azure-sap-rhui",
|
||||
"azure-eap7-rhui",
|
||||
"vmdk",
|
||||
"ova",
|
||||
"ami",
|
||||
"ec2",
|
||||
"ec2-ha",
|
||||
"ec2-sap",
|
||||
"gce",
|
||||
"gce-rhui",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"edge-installer",
|
||||
"edge-raw-image",
|
||||
"edge-simplified-installer",
|
||||
"tar",
|
||||
"image-installer",
|
||||
"oci",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "aarch64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
"ami",
|
||||
"ec2",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"edge-installer",
|
||||
"edge-simplified-installer",
|
||||
"edge-raw-image",
|
||||
"tar",
|
||||
"image-installer",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "ppc64le",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "s390x",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
require.NoError(t, err)
|
||||
imageTypes := arch.ListImageTypes()
|
||||
|
||||
var expectedImageTypes []string
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.imgNames...)
|
||||
if dist.name == "rhel" {
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.rhelAdditionalImageTypes...)
|
||||
}
|
||||
|
||||
require.ElementsMatch(t, expectedImageTypes, imageTypes)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRHEL8_ListArches(t *testing.T) {
|
||||
arches := rhel8.New().ListArches()
|
||||
assert.Equal(t, []string{"aarch64", "ppc64le", "s390x", "x86_64"}, arches)
|
||||
}
|
||||
|
||||
func TestRHEL8_GetArch(t *testing.T) {
|
||||
arches := []struct {
|
||||
name string
|
||||
errorExpected bool
|
||||
errorExpectedInCentos bool
|
||||
}{
|
||||
{
|
||||
name: "x86_64",
|
||||
},
|
||||
{
|
||||
name: "aarch64",
|
||||
},
|
||||
{
|
||||
name: "ppc64le",
|
||||
},
|
||||
{
|
||||
name: "s390x",
|
||||
},
|
||||
{
|
||||
name: "foo-arch",
|
||||
errorExpected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, a := range arches {
|
||||
actualArch, err := dist.distro.GetArch(a.name)
|
||||
if a.errorExpected || (a.errorExpectedInCentos && dist.name == "centos") {
|
||||
assert.Nil(t, actualArch)
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.Equal(t, a.name, actualArch.Name())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRhel8_Name(t *testing.T) {
|
||||
distro := rhel8.New()
|
||||
assert.Equal(t, "rhel-8", distro.Name())
|
||||
}
|
||||
|
||||
func TestRhel8_ModulePlatformID(t *testing.T) {
|
||||
distro := rhel8.New()
|
||||
assert.Equal(t, "platform:el8", distro.ModulePlatformID())
|
||||
}
|
||||
|
||||
func TestRhel86_KernelOption(t *testing.T) {
|
||||
distro_test_common.TestDistro_KernelOption(t, rhel8.New())
|
||||
}
|
||||
|
||||
func TestRhel8_OSTreeOptions(t *testing.T) {
|
||||
distro_test_common.TestDistro_OSTreeOptions(t, rhel8.New())
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemManifestError(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/etc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/etc\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_TestRootMountPoint(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemSubDirectories(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-commit": true,
|
||||
"edge-container": true,
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_MountpointsWithArbitraryDepthAllowed(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c/d",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-commit": true,
|
||||
"edge-container": true,
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_DirtyMountpointsNotAllowed(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//log/audit/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-commit": true,
|
||||
"edge-container": true,
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"//\" \"/var//\" \"/var//log/audit/\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemPatternMatching(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/variable\" \"/variable/log/audit\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomUsrPartitionNotLargeEnough(t *testing.T) {
|
||||
r8distro := rhel8.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/usr",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsupported := map[string]bool{
|
||||
"edge-installer": true,
|
||||
"edge-simplified-installer": true,
|
||||
"edge-raw-image": true,
|
||||
"azure-eap7-rhui": true,
|
||||
}
|
||||
for _, archName := range r8distro.ListArches() {
|
||||
arch, _ := r8distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if unsupported[imgTypeName] {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,848 +0,0 @@
|
|||
package rhel9_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/rhel9"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
)
|
||||
|
||||
type rhelFamilyDistro struct {
|
||||
name string
|
||||
distro distro.Distro
|
||||
}
|
||||
|
||||
var rhelFamilyDistros = []rhelFamilyDistro{
|
||||
{
|
||||
name: "rhel",
|
||||
distro: rhel9.New(),
|
||||
},
|
||||
}
|
||||
|
||||
func TestFilenameFromType(t *testing.T) {
|
||||
type args struct {
|
||||
outputFormat string
|
||||
}
|
||||
type wantResult struct {
|
||||
filename string
|
||||
mimeType string
|
||||
wantErr bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "ami",
|
||||
args: args{"ami"},
|
||||
want: wantResult{
|
||||
filename: "image.raw",
|
||||
mimeType: "application/octet-stream",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ec2",
|
||||
args: args{"ec2"},
|
||||
want: wantResult{
|
||||
filename: "image.raw.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ec2-ha",
|
||||
args: args{"ec2-ha"},
|
||||
want: wantResult{
|
||||
filename: "image.raw.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ec2-sap",
|
||||
args: args{"ec2-sap"},
|
||||
want: wantResult{
|
||||
filename: "image.raw.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "qcow2",
|
||||
args: args{"qcow2"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "openstack",
|
||||
args: args{"openstack"},
|
||||
want: wantResult{
|
||||
filename: "disk.qcow2",
|
||||
mimeType: "application/x-qemu-disk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vhd",
|
||||
args: args{"vhd"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd",
|
||||
mimeType: "application/x-vhd",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "azure-rhui",
|
||||
args: args{"azure-rhui"},
|
||||
want: wantResult{
|
||||
filename: "disk.vhd.xz",
|
||||
mimeType: "application/xz",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vmdk",
|
||||
args: args{"vmdk"},
|
||||
want: wantResult{
|
||||
filename: "disk.vmdk",
|
||||
mimeType: "application/x-vmdk",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ova",
|
||||
args: args{"ova"},
|
||||
want: wantResult{
|
||||
filename: "image.ova",
|
||||
mimeType: "application/ovf",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "tar",
|
||||
args: args{"tar"},
|
||||
want: wantResult{
|
||||
filename: "root.tar.xz",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "image-installer",
|
||||
args: args{"image-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-commit",
|
||||
args: args{"edge-commit"},
|
||||
want: wantResult{
|
||||
filename: "commit.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
// Alias
|
||||
{
|
||||
name: "rhel-edge-commit",
|
||||
args: args{"rhel-edge-commit"},
|
||||
want: wantResult{
|
||||
filename: "commit.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-container",
|
||||
args: args{"edge-container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
// Alias
|
||||
{
|
||||
name: "rhel-edge-container",
|
||||
args: args{"rhel-edge-container"},
|
||||
want: wantResult{
|
||||
filename: "container.tar",
|
||||
mimeType: "application/x-tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-installer",
|
||||
args: args{"edge-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
// Alias
|
||||
{
|
||||
name: "rhel-edge-installer",
|
||||
args: args{"rhel-edge-installer"},
|
||||
want: wantResult{
|
||||
filename: "installer.iso",
|
||||
mimeType: "application/x-iso9660-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gce",
|
||||
args: args{"gce"},
|
||||
want: wantResult{
|
||||
filename: "image.tar.gz",
|
||||
mimeType: "application/gzip",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-ami",
|
||||
args: args{"edge-ami"},
|
||||
want: wantResult{
|
||||
filename: "image.raw",
|
||||
mimeType: "application/octet-stream",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-output-type",
|
||||
args: args{"foobar"},
|
||||
want: wantResult{wantErr: true},
|
||||
},
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
arch, _ := dist.GetArch("x86_64")
|
||||
imgType, err := arch.GetImageType(tt.args.outputFormat)
|
||||
if (err != nil) != tt.want.wantErr {
|
||||
t.Errorf("Arch.GetImageType() error = %v, wantErr %v", err, tt.want.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.want.wantErr {
|
||||
gotFilename := imgType.Filename()
|
||||
gotMIMEType := imgType.MIMEType()
|
||||
if gotFilename != tt.want.filename {
|
||||
t.Errorf("ImageType.Filename() got = %v, want %v", gotFilename, tt.want.filename)
|
||||
}
|
||||
if gotMIMEType != tt.want.mimeType {
|
||||
t.Errorf("ImageType.MIMEType() got1 = %v, want %v", gotMIMEType, tt.want.mimeType)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_BuildPackages(t *testing.T) {
|
||||
x8664BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"grub2-efi-x64",
|
||||
"grub2-pc",
|
||||
"policycoreutils",
|
||||
"shim-x64",
|
||||
"systemd",
|
||||
"tar",
|
||||
"qemu-img",
|
||||
"xz",
|
||||
}
|
||||
aarch64BuildPackages := []string{
|
||||
"dnf",
|
||||
"dosfstools",
|
||||
"e2fsprogs",
|
||||
"policycoreutils",
|
||||
"qemu-img",
|
||||
"systemd",
|
||||
"tar",
|
||||
"xz",
|
||||
}
|
||||
buildPackages := map[string][]string{
|
||||
"x86_64": x8664BuildPackages,
|
||||
"aarch64": aarch64BuildPackages,
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
d := dist.distro
|
||||
for _, archLabel := range d.ListArches() {
|
||||
archStruct, err := d.GetArch(archLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
for _, itLabel := range archStruct.ListImageTypes() {
|
||||
itStruct, err := archStruct.GetImageType(itLabel)
|
||||
if assert.NoErrorf(t, err, "d.GetArch(%v) returned err = %v; expected nil", archLabel, err) {
|
||||
continue
|
||||
}
|
||||
manifest, _, err := itStruct.Manifest(&blueprint.Blueprint{}, distro.ImageOptions{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
buildPkgs := manifest.GetPackageSetChains()["build"]
|
||||
assert.NotNil(t, buildPkgs)
|
||||
assert.Len(t, buildPkgs, 1)
|
||||
assert.ElementsMatch(t, buildPackages[archLabel], buildPkgs[0].Include)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageType_Name(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
"vmdk",
|
||||
"ova",
|
||||
"ami",
|
||||
"ec2",
|
||||
"ec2-ha",
|
||||
"ec2-sap",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"edge-installer",
|
||||
"gce",
|
||||
"tar",
|
||||
"image-installer",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "aarch64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"ami",
|
||||
"ec2",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"tar",
|
||||
"image-installer",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "ppc64le",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "s390x",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
if mapping.arch == platform.ARCH_S390X.String() && dist.name == "centos" {
|
||||
continue
|
||||
}
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
if assert.NoError(t, err) {
|
||||
for _, imgName := range mapping.imgNames {
|
||||
if imgName == "edge-commit" && dist.name == "centos" {
|
||||
continue
|
||||
}
|
||||
imgType, err := arch.GetImageType(imgName)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equalf(t, imgName, imgType.Name(), "arch: %s", mapping.arch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageTypeAliases(t *testing.T) {
|
||||
type args struct {
|
||||
imageTypeAliases []string
|
||||
}
|
||||
type wantResult struct {
|
||||
imageTypeName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantResult
|
||||
}{
|
||||
{
|
||||
name: "edge-commit aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"rhel-edge-commit"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "edge-commit",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-container aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"rhel-edge-container"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "edge-container",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "edge-installer aliases",
|
||||
args: args{
|
||||
imageTypeAliases: []string{"rhel-edge-installer"},
|
||||
},
|
||||
want: wantResult{
|
||||
imageTypeName: "edge-installer",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dist := dist.distro
|
||||
for _, archName := range dist.ListArches() {
|
||||
t.Run(archName, func(t *testing.T) {
|
||||
arch, err := dist.GetArch(archName)
|
||||
require.Nilf(t, err,
|
||||
"failed to get architecture '%s', previously listed as supported for the distro '%s'",
|
||||
archName, dist.Name())
|
||||
// Test image type aliases only if the aliased image type is supported for the arch
|
||||
if _, err = arch.GetImageType(tt.want.imageTypeName); err != nil {
|
||||
t.Skipf("aliased image type '%s' is not supported for architecture '%s'",
|
||||
tt.want.imageTypeName, archName)
|
||||
}
|
||||
for _, alias := range tt.args.imageTypeAliases {
|
||||
t.Run(fmt.Sprintf("'%s' alias for image type '%s'", alias, tt.want.imageTypeName),
|
||||
func(t *testing.T) {
|
||||
gotImage, err := arch.GetImageType(alias)
|
||||
require.Nilf(t, err, "arch.GetImageType() for image type alias '%s' failed: %v",
|
||||
alias, err)
|
||||
assert.Equalf(t, tt.want.imageTypeName, gotImage.Name(),
|
||||
"got unexpected image type name for alias '%s'. got = %s, want = %s",
|
||||
alias, tt.want.imageTypeName, gotImage.Name())
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check that Manifest() function returns an error for unsupported
|
||||
// configurations.
|
||||
func TestDistro_ManifestError(t *testing.T) {
|
||||
// Currently, the only unsupported configuration is OSTree commit types
|
||||
// with Kernel boot options
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Kernel: &blueprint.KernelCustomization{
|
||||
Append: "debug",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
imgOpts := distro.ImageOptions{
|
||||
Size: imgType.Size(0),
|
||||
}
|
||||
_, _, err := imgType.Manifest(&bp, imgOpts, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "kernel boot parameter customizations are not supported for ostree types")
|
||||
} else if imgTypeName == "edge-raw-image" || imgTypeName == "edge-ami" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("\"%s\" images require specifying a URL from which to retrieve the OSTree commit", imgTypeName))
|
||||
} else if imgTypeName == "edge-installer" || imgTypeName == "edge-simplified-installer" {
|
||||
assert.EqualError(t, err, fmt.Sprintf("boot ISO image type \"%s\" requires specifying a URL from which to retrieve the OSTree commit", imgTypeName))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchitecture_ListImageTypes(t *testing.T) {
|
||||
imgMap := []struct {
|
||||
arch string
|
||||
imgNames []string
|
||||
rhelAdditionalImageTypes []string
|
||||
}{
|
||||
{
|
||||
arch: "x86_64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
"vmdk",
|
||||
"ova",
|
||||
"ami",
|
||||
"ec2",
|
||||
"ec2-ha",
|
||||
"ec2-sap",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"edge-installer",
|
||||
"edge-raw-image",
|
||||
"edge-simplified-installer",
|
||||
"edge-ami",
|
||||
"gce",
|
||||
"gce-rhui",
|
||||
"tar",
|
||||
"image-installer",
|
||||
"oci",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "aarch64",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"openstack",
|
||||
"ami",
|
||||
"ec2",
|
||||
"edge-commit",
|
||||
"edge-container",
|
||||
"edge-installer",
|
||||
"edge-simplified-installer",
|
||||
"edge-raw-image",
|
||||
"edge-ami",
|
||||
"tar",
|
||||
"image-installer",
|
||||
"vhd",
|
||||
"azure-rhui",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "ppc64le",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
{
|
||||
arch: "s390x",
|
||||
imgNames: []string{
|
||||
"qcow2",
|
||||
"tar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, mapping := range imgMap {
|
||||
arch, err := dist.distro.GetArch(mapping.arch)
|
||||
require.NoError(t, err)
|
||||
imageTypes := arch.ListImageTypes()
|
||||
|
||||
var expectedImageTypes []string
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.imgNames...)
|
||||
if dist.name == "rhel" {
|
||||
expectedImageTypes = append(expectedImageTypes, mapping.rhelAdditionalImageTypes...)
|
||||
}
|
||||
|
||||
require.ElementsMatch(t, expectedImageTypes, imageTypes)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRhel9_ListArches(t *testing.T) {
|
||||
arches := rhel9.New().ListArches()
|
||||
assert.Equal(t, []string{"aarch64", "ppc64le", "s390x", "x86_64"}, arches)
|
||||
}
|
||||
|
||||
func TestRhel9_GetArch(t *testing.T) {
|
||||
arches := []struct {
|
||||
name string
|
||||
errorExpected bool
|
||||
errorExpectedInCentos bool
|
||||
}{
|
||||
{
|
||||
name: "x86_64",
|
||||
},
|
||||
{
|
||||
name: "aarch64",
|
||||
},
|
||||
{
|
||||
name: "ppc64le",
|
||||
},
|
||||
{
|
||||
name: "s390x",
|
||||
},
|
||||
{
|
||||
name: "foo-arch",
|
||||
errorExpected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, dist := range rhelFamilyDistros {
|
||||
t.Run(dist.name, func(t *testing.T) {
|
||||
for _, a := range arches {
|
||||
actualArch, err := dist.distro.GetArch(a.name)
|
||||
if a.errorExpected || (a.errorExpectedInCentos && dist.name == "centos") {
|
||||
assert.Nil(t, actualArch)
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.Equal(t, a.name, actualArch.Name())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRhel9_Name(t *testing.T) {
|
||||
distro := rhel9.New()
|
||||
assert.Equal(t, "rhel-9", distro.Name())
|
||||
}
|
||||
|
||||
func TestRhel9_ModulePlatformID(t *testing.T) {
|
||||
distro := rhel9.New()
|
||||
assert.Equal(t, "platform:el9", distro.ModulePlatformID())
|
||||
}
|
||||
|
||||
func TestRhel9_KernelOption(t *testing.T) {
|
||||
distro_test_common.TestDistro_KernelOption(t, rhel9.New())
|
||||
}
|
||||
|
||||
func TestRhel9_OSTreeOptions(t *testing.T) {
|
||||
distro_test_common.TestDistro_OSTreeOptions(t, rhel9.New())
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemManifestError(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/etc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "edge-installer" || imgTypeName == "edge-simplified-installer" || imgTypeName == "edge-raw-image" || imgTypeName == "edge-ami" {
|
||||
continue
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/etc\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_TestRootMountPoint(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "edge-installer" || imgTypeName == "edge-simplified-installer" || imgTypeName == "edge-raw-image" || imgTypeName == "edge-ami" {
|
||||
continue
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemSubDirectories(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if strings.HasPrefix(imgTypeName, "edge-") {
|
||||
continue
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_MountpointsWithArbitraryDepthAllowed(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var/a/b/c/d",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if strings.HasPrefix(imgTypeName, "edge-") {
|
||||
continue
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_DirtyMountpointsNotAllowed(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/var//log/audit/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if strings.HasPrefix(imgTypeName, "edge-") {
|
||||
continue
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"//\" \"/var//\" \"/var//log/audit/\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomFileSystemPatternMatching(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable",
|
||||
},
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/variable/log/audit",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "edge-installer" || imgTypeName == "edge-simplified-installer" || imgTypeName == "edge-raw-image" || imgTypeName == "edge-ami" {
|
||||
continue
|
||||
} else {
|
||||
assert.EqualError(t, err, "The following custom mountpoints are not supported [\"/variable\" \"/variable/log/audit\"]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDistro_CustomUsrPartitionNotLargeEnough(t *testing.T) {
|
||||
r9distro := rhel9.New()
|
||||
bp := blueprint.Blueprint{
|
||||
Customizations: &blueprint.Customizations{
|
||||
Filesystem: []blueprint.FilesystemCustomization{
|
||||
{
|
||||
MinSize: 1024,
|
||||
Mountpoint: "/usr",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, archName := range r9distro.ListArches() {
|
||||
arch, _ := r9distro.GetArch(archName)
|
||||
for _, imgTypeName := range arch.ListImageTypes() {
|
||||
imgType, _ := arch.GetImageType(imgTypeName)
|
||||
_, _, err := imgType.Manifest(&bp, distro.ImageOptions{}, nil, 0)
|
||||
if imgTypeName == "edge-commit" || imgTypeName == "edge-container" {
|
||||
assert.EqualError(t, err, "Custom mountpoints are not supported for ostree types")
|
||||
} else if imgTypeName == "edge-installer" || imgTypeName == "edge-simplified-installer" || imgTypeName == "edge-raw-image" || imgTypeName == "edge-ami" {
|
||||
continue
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
package distroregistry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/rhel8"
|
||||
)
|
||||
|
||||
// Test that all distros are registered properly and that Registry.List() works.
|
||||
func TestRegistry_List(t *testing.T) {
|
||||
// build expected distros
|
||||
var expected []string
|
||||
for _, supportedDistro := range supportedDistros {
|
||||
d := supportedDistro()
|
||||
expected = append(expected, d.Name())
|
||||
}
|
||||
|
||||
distros := NewDefault()
|
||||
|
||||
require.ElementsMatch(t, expected, distros.List(), "unexpected list of distros")
|
||||
}
|
||||
|
||||
func TestRegistry_GetDistro(t *testing.T) {
|
||||
distros := NewDefault()
|
||||
|
||||
t.Run("distro exists", func(t *testing.T) {
|
||||
expectedDistro := rhel8.New()
|
||||
require.Equal(t, expectedDistro.Name(), distros.GetDistro(expectedDistro.Name()).Name())
|
||||
})
|
||||
|
||||
t.Run("distro doesn't exist", func(t *testing.T) {
|
||||
require.Nil(t, distros.GetDistro("toucan-os"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestRegistry_mangleHostDistroName(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
name string
|
||||
isBeta bool
|
||||
isStream bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{"fedora-33", args{"fedora-33", false, false}, "fedora-33"},
|
||||
{"fedora-33 beta", args{"fedora-33", true, false}, "fedora-33-beta"},
|
||||
{"fedora-33 stream", args{"fedora-33", false, true}, "fedora-33"},
|
||||
{"fedora-33 beta stream", args{"fedora-33", true, true}, "fedora-33-beta"},
|
||||
|
||||
{"rhel-8", args{"rhel-8", false, false}, "rhel-8"},
|
||||
{"rhel-8 beta", args{"rhel-8", true, false}, "rhel-8-beta"},
|
||||
{"rhel-8 stream", args{"rhel-8", false, true}, "rhel-8"},
|
||||
{"rhel-8 beta stream", args{"rhel-8", true, true}, "rhel-8-beta"},
|
||||
|
||||
{"rhel-84", args{"rhel-84", false, false}, "rhel-8"},
|
||||
{"rhel-84 beta", args{"rhel-84", true, false}, "rhel-8-beta"},
|
||||
{"rhel-84 stream", args{"rhel-84", false, true}, "rhel-8"},
|
||||
{"rhel-84 beta stream", args{"rhel-84", true, true}, "rhel-8-beta"},
|
||||
|
||||
{"centos-8", args{"centos-8", false, false}, "centos-8"},
|
||||
{"centos-8 beta", args{"centos-8", true, false}, "centos-8-beta"},
|
||||
{"centos-8 stream", args{"centos-8", false, true}, "centos-stream-8"},
|
||||
{"centos-8 beta stream", args{"centos-8", true, true}, "centos-8-beta"},
|
||||
|
||||
{"rhel-90", args{"rhel-90", false, false}, "rhel-90"},
|
||||
{"rhel-90 beta", args{"rhel-90", true, false}, "rhel-90-beta"},
|
||||
{"rhel-90 stream", args{"rhel-90", false, true}, "rhel-90"},
|
||||
{"rhel-90 beta stream", args{"rhel-90", true, true}, "rhel-90-beta"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mangledName := mangleHostDistroName(tt.args.name, tt.args.isBeta, tt.args.isStream)
|
||||
require.Equalf(
|
||||
t,
|
||||
tt.want,
|
||||
mangledName,
|
||||
"mangleHostDistroName() name:%s, isBeta:%s, isStream:%s =\nExpected: %s\nGot: %s\n",
|
||||
tt.args.name,
|
||||
tt.args.isBeta,
|
||||
tt.args.isStream,
|
||||
tt.want,
|
||||
mangledName,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistry_FromHost(t *testing.T) {
|
||||
// expected distros
|
||||
var distros []distro.Distro
|
||||
for _, supportedDistro := range supportedDistros {
|
||||
distros = append(distros, supportedDistro())
|
||||
}
|
||||
|
||||
t.Run("host distro is nil", func(t *testing.T) {
|
||||
registry, err := New(nil, distros...)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, registry)
|
||||
require.Nil(t, registry.FromHost())
|
||||
})
|
||||
|
||||
t.Run("host distro not nil", func(t *testing.T) {
|
||||
hostDistro := rhel8.New()
|
||||
fmt.Println(hostDistro.Name())
|
||||
registry, err := New(hostDistro, distros...)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, registry)
|
||||
|
||||
gotDistro := registry.FromHost()
|
||||
require.NotNil(t, gotDistro)
|
||||
require.Equal(t, gotDistro.Name(), hostDistro.Name())
|
||||
})
|
||||
}
|
||||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/rhsm"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/images/pkg/rhsm"
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
)
|
||||
|
||||
// BaseSolver defines the basic solver configuration without platform
|
||||
|
|
|
|||
|
|
@ -8,9 +8,9 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/mocks/rpmrepo"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
package environment
|
||||
|
||||
import "github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
import "github.com/osbuild/images/pkg/rpmmd"
|
||||
|
||||
type Environment interface {
|
||||
GetPackages() []string
|
||||
|
|
|
|||
|
|
@ -1,154 +0,0 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/osbuild"
|
||||
"github.com/osbuild/osbuild-composer/internal/platform"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/runner"
|
||||
"github.com/osbuild/osbuild-composer/internal/subscription"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// NewTestOS returns a minimally populated OS struct for use in testing
|
||||
func NewTestOS() *OS {
|
||||
repos := []rpmmd.RepoConfig{}
|
||||
manifest := New()
|
||||
runner := &runner.Fedora{Version: 37}
|
||||
build := NewBuild(&manifest, runner, repos)
|
||||
build.Checkpoint()
|
||||
|
||||
// create an x86_64 platform with bios boot
|
||||
platform := &platform.X86{
|
||||
BIOS: true,
|
||||
}
|
||||
|
||||
os := NewOS(&manifest, build, platform, repos)
|
||||
packages := []rpmmd.PackageSpec{
|
||||
{Name: "pkg1", Checksum: "sha1:c02524e2bd19490f2a7167958f792262754c5f46"},
|
||||
}
|
||||
os.serializeStart(packages, nil, nil)
|
||||
|
||||
return os
|
||||
}
|
||||
|
||||
// CheckFirstBootStageOptions checks the Command strings
|
||||
func CheckFirstBootStageOptions(t *testing.T, stages []*osbuild.Stage, commands []string) {
|
||||
// Find the FirstBootStage
|
||||
for _, s := range stages {
|
||||
if s.Type == "org.osbuild.first-boot" {
|
||||
require.NotNil(t, s.Options)
|
||||
options, ok := s.Options.(*osbuild.FirstBootStageOptions)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, len(options.Commands), len(commands))
|
||||
|
||||
// Make sure the commands are the same
|
||||
for idx, cmd := range commands {
|
||||
assert.Equal(t, cmd, options.Commands[idx])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckPkgSetInclude makes sure the packages named in pkgs are all included
|
||||
func CheckPkgSetInclude(t *testing.T, pkgSetChain []rpmmd.PackageSet, pkgs []string) {
|
||||
|
||||
// Gather up all the includes
|
||||
var includes []string
|
||||
for _, ps := range pkgSetChain {
|
||||
includes = append(includes, ps.Include...)
|
||||
}
|
||||
|
||||
for _, p := range pkgs {
|
||||
assert.Contains(t, includes, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriptionManagerCommands(t *testing.T) {
|
||||
os := NewTestOS()
|
||||
os.Subscription = &subscription.ImageOptions{
|
||||
Organization: "2040324",
|
||||
ActivationKey: "my-secret-key",
|
||||
ServerUrl: "subscription.rhsm.redhat.com",
|
||||
BaseUrl: "http://cdn.redhat.com/",
|
||||
}
|
||||
pipeline := os.serialize()
|
||||
CheckFirstBootStageOptions(t, pipeline.Stages, []string{
|
||||
"/usr/sbin/subscription-manager register --org=2040324 --activationkey=my-secret-key --serverurl subscription.rhsm.redhat.com --baseurl http://cdn.redhat.com/",
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubscriptionManagerInsightsCommands(t *testing.T) {
|
||||
os := NewTestOS()
|
||||
os.Subscription = &subscription.ImageOptions{
|
||||
Organization: "2040324",
|
||||
ActivationKey: "my-secret-key",
|
||||
ServerUrl: "subscription.rhsm.redhat.com",
|
||||
BaseUrl: "http://cdn.redhat.com/",
|
||||
Insights: true,
|
||||
}
|
||||
pipeline := os.serialize()
|
||||
CheckFirstBootStageOptions(t, pipeline.Stages, []string{
|
||||
"/usr/sbin/subscription-manager register --org=2040324 --activationkey=my-secret-key --serverurl subscription.rhsm.redhat.com --baseurl http://cdn.redhat.com/",
|
||||
"/usr/bin/insights-client --register",
|
||||
"restorecon -R /root/.gnupg",
|
||||
})
|
||||
}
|
||||
|
||||
func TestRhcInsightsCommands(t *testing.T) {
|
||||
os := NewTestOS()
|
||||
os.Subscription = &subscription.ImageOptions{
|
||||
Organization: "2040324",
|
||||
ActivationKey: "my-secret-key",
|
||||
ServerUrl: "subscription.rhsm.redhat.com",
|
||||
BaseUrl: "http://cdn.redhat.com/",
|
||||
Insights: false,
|
||||
Rhc: true,
|
||||
}
|
||||
pipeline := os.serialize()
|
||||
CheckFirstBootStageOptions(t, pipeline.Stages, []string{
|
||||
"/usr/bin/rhc connect -o=2040324 -a=my-secret-key --server subscription.rhsm.redhat.com",
|
||||
"restorecon -R /root/.gnupg",
|
||||
"/usr/sbin/semanage permissive --add rhcd_t",
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubscriptionManagerPackages(t *testing.T) {
|
||||
os := NewTestOS()
|
||||
os.Subscription = &subscription.ImageOptions{
|
||||
Organization: "2040324",
|
||||
ActivationKey: "my-secret-key",
|
||||
ServerUrl: "subscription.rhsm.redhat.com",
|
||||
BaseUrl: "http://cdn.redhat.com/",
|
||||
}
|
||||
|
||||
CheckPkgSetInclude(t, os.getPackageSetChain(DISTRO_NULL), []string{"subscription-manager"})
|
||||
}
|
||||
|
||||
func TestSubscriptionManagerInsightsPackages(t *testing.T) {
|
||||
os := NewTestOS()
|
||||
os.Subscription = &subscription.ImageOptions{
|
||||
Organization: "2040324",
|
||||
ActivationKey: "my-secret-key",
|
||||
ServerUrl: "subscription.rhsm.redhat.com",
|
||||
BaseUrl: "http://cdn.redhat.com/",
|
||||
Insights: true,
|
||||
}
|
||||
CheckPkgSetInclude(t, os.getPackageSetChain(DISTRO_NULL), []string{"subscription-manager", "insights-client"})
|
||||
}
|
||||
|
||||
func TestRhcInsightsPackages(t *testing.T) {
|
||||
os := NewTestOS()
|
||||
os.Subscription = &subscription.ImageOptions{
|
||||
Organization: "2040324",
|
||||
ActivationKey: "my-secret-key",
|
||||
ServerUrl: "subscription.rhsm.redhat.com",
|
||||
BaseUrl: "http://cdn.redhat.com/",
|
||||
Insights: false,
|
||||
Rhc: true,
|
||||
}
|
||||
CheckPkgSetInclude(t, os.getPackageSetChain(DISTRO_NULL), []string{"rhc", "subscription-manager", "insights-client"})
|
||||
}
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
package distro_mock
|
||||
|
||||
import (
|
||||
"github.com/osbuild/osbuild-composer/internal/distro/test_distro"
|
||||
"github.com/osbuild/osbuild-composer/internal/distroregistry"
|
||||
"github.com/osbuild/images/pkg/distro/test_distro"
|
||||
"github.com/osbuild/images/pkg/distroregistry"
|
||||
)
|
||||
|
||||
func NewDefaultRegistry() (*distroregistry.Registry, error) {
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/dnfjson"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
func generatePackageList() rpmmd.PackageList {
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ import (
|
|||
"net/http/httptest"
|
||||
"os"
|
||||
|
||||
"github.com/osbuild/images/pkg/rpmmd"
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/rpmmd"
|
||||
)
|
||||
|
||||
type testRepoServer struct {
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewAuthconfigStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.authconfig",
|
||||
Options: &AuthconfigStageOptions{},
|
||||
}
|
||||
actualStage := NewAuthconfigStage(&AuthconfigStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewAuthselectStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.authselect",
|
||||
Options: &AuthselectStageOptions{},
|
||||
}
|
||||
actualStage := NewAuthselectStage(&AuthselectStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,244 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewChownStage(t *testing.T) {
|
||||
stageOptions := &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/foobar": {
|
||||
User: "root",
|
||||
Group: int64(12345),
|
||||
Recursive: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.chown",
|
||||
Options: stageOptions,
|
||||
}
|
||||
actualStage := NewChownStage(stageOptions)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestChownStageOptionsValidate(t *testing.T) {
|
||||
validPathOptions := ChownStagePathOptions{
|
||||
User: "root",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
options *ChownStageOptions
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: &ChownStageOptions{},
|
||||
},
|
||||
{
|
||||
name: "no-items",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-item-path-1",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"": validPathOptions,
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-item-path-2",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"foobar": validPathOptions,
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-item-path-3",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/../foobar": validPathOptions,
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-item-path-4",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/../foobar": validPathOptions,
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-item-path-5",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/..": validPathOptions,
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-item-path-6",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"../etc/foo/../bar": validPathOptions,
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "valid-item-path-1",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/foobar": validPathOptions,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-item-path-2",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/foo/bar/baz": validPathOptions,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-item-path-3",
|
||||
options: &ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc": validPathOptions,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.options.validate()
|
||||
if tc.err {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestChownStagePathOptionsValidate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
options ChownStagePathOptions
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: ChownStagePathOptions{},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-user-string-1",
|
||||
options: ChownStagePathOptions{
|
||||
User: "",
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-user-string-2",
|
||||
options: ChownStagePathOptions{
|
||||
User: "r@@t",
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-user-id",
|
||||
options: ChownStagePathOptions{
|
||||
User: int64(-1),
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "valid-user-string",
|
||||
options: ChownStagePathOptions{
|
||||
User: "root",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-user-id",
|
||||
options: ChownStagePathOptions{
|
||||
User: int64(0),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid-group-string-1",
|
||||
options: ChownStagePathOptions{
|
||||
Group: "",
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-group-string-2",
|
||||
options: ChownStagePathOptions{
|
||||
Group: "r@@t",
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-group-id",
|
||||
options: ChownStagePathOptions{
|
||||
Group: int64(-1),
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "valid-group-string",
|
||||
options: ChownStagePathOptions{
|
||||
Group: "root",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-group-id",
|
||||
options: ChownStagePathOptions{
|
||||
Group: int64(0),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-both-1",
|
||||
options: ChownStagePathOptions{
|
||||
User: "root",
|
||||
Group: int64(12345),
|
||||
Recursive: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-both-2",
|
||||
options: ChownStagePathOptions{
|
||||
User: int64(12345),
|
||||
Group: "root",
|
||||
Recursive: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.options.validate()
|
||||
if tc.err {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewChronyStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.chrony",
|
||||
Options: &ChronyStageOptions{},
|
||||
}
|
||||
actualStage := NewChronyStage(&ChronyStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,216 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
)
|
||||
|
||||
func TestNewCloudInitStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.cloud-init",
|
||||
Options: &CloudInitStageOptions{
|
||||
Filename: "aaa",
|
||||
Config: CloudInitConfigFile{
|
||||
SystemInfo: &CloudInitConfigSystemInfo{
|
||||
DefaultUser: &CloudInitConfigDefaultUser{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
actualStage := NewCloudInitStage(&CloudInitStageOptions{
|
||||
Filename: "aaa",
|
||||
Config: CloudInitConfigFile{
|
||||
SystemInfo: &CloudInitConfigSystemInfo{
|
||||
DefaultUser: &CloudInitConfigDefaultUser{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestCloudInitStage_NewStage_Invalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options CloudInitStageOptions
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: CloudInitStageOptions{},
|
||||
},
|
||||
{
|
||||
name: "no-config-file-section",
|
||||
options: CloudInitStageOptions{
|
||||
Filename: "00-default_user.cfg",
|
||||
Config: CloudInitConfigFile{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-system-info-section-option",
|
||||
options: CloudInitStageOptions{
|
||||
Filename: "00-default_user.cfg",
|
||||
Config: CloudInitConfigFile{
|
||||
SystemInfo: &CloudInitConfigSystemInfo{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-default-user-section-option",
|
||||
options: CloudInitStageOptions{
|
||||
Filename: "00-default_user.cfg",
|
||||
Config: CloudInitConfigFile{
|
||||
SystemInfo: &CloudInitConfigSystemInfo{
|
||||
DefaultUser: &CloudInitConfigDefaultUser{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Panics(t, func() { NewCloudInitStage(&tt.options) }, "NewCloudInitStage didn't panic, but it should [idx: %d]", idx)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudInitStage_MarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options CloudInitStageOptions
|
||||
json string
|
||||
}{
|
||||
{
|
||||
name: "simple-cloud-init-config-with-system-info",
|
||||
options: CloudInitStageOptions{
|
||||
Config: CloudInitConfigFile{
|
||||
SystemInfo: &CloudInitConfigSystemInfo{
|
||||
DefaultUser: &CloudInitConfigDefaultUser{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{"filename":"","config":{"system_info":{"default_user":{"name":"foo"}}}}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBytes, err := json.Marshal(tt.options)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.json, string(gotBytes))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudInitStage_UnmarshalJSON(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options CloudInitStageOptions
|
||||
json string
|
||||
}{
|
||||
{
|
||||
name: "simple-cloud-init-config-with-system-info",
|
||||
options: CloudInitStageOptions{
|
||||
Config: CloudInitConfigFile{
|
||||
SystemInfo: &CloudInitConfigSystemInfo{
|
||||
DefaultUser: &CloudInitConfigDefaultUser{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{"filename":"","config":{"system_info":{"default_user":{"name":"foo"}}}}`,
|
||||
},
|
||||
{
|
||||
name: "osbuild-test-suite-1",
|
||||
options: CloudInitStageOptions{
|
||||
Filename: "10-azure-kfp.cfg",
|
||||
Config: CloudInitConfigFile{
|
||||
Reporting: &CloudInitConfigReporting{
|
||||
Logging: &CloudInitConfigReportingHandlers{
|
||||
Type: "log",
|
||||
},
|
||||
Telemetry: &CloudInitConfigReportingHandlers{
|
||||
Type: "hyperv",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{
|
||||
"filename": "10-azure-kfp.cfg",
|
||||
"config": {
|
||||
"reporting": {
|
||||
"logging": {
|
||||
"type": "log"
|
||||
},
|
||||
"telemetry": {
|
||||
"type": "hyperv"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
name: "osbuild-test-suite-2",
|
||||
options: CloudInitStageOptions{
|
||||
Filename: "91-azure_datasource.cfg",
|
||||
Config: CloudInitConfigFile{
|
||||
DatasourceList: []string{"Azure"},
|
||||
Datasource: &CloudInitConfigDatasource{
|
||||
Azure: &CloudInitConfigDatasourceAzure{
|
||||
ApplyNetworkConfig: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{
|
||||
"filename": "91-azure_datasource.cfg",
|
||||
"config": {
|
||||
"datasource_list": [
|
||||
"Azure"
|
||||
],
|
||||
"datasource": {
|
||||
"Azure": {
|
||||
"apply_network_config": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
name: "osbuild-test-suite-3",
|
||||
options: CloudInitStageOptions{
|
||||
Filename: "06_logging_override.cfg",
|
||||
Config: CloudInitConfigFile{
|
||||
Output: &CloudInitConfigOutput{
|
||||
All: common.ToPtr(">> /var/log/cloud-init-all.log"),
|
||||
},
|
||||
},
|
||||
},
|
||||
json: `{
|
||||
"filename": "06_logging_override.cfg",
|
||||
"config": {
|
||||
"output": {
|
||||
"all": ">> /var/log/cloud-init-all.log"
|
||||
}
|
||||
}
|
||||
}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var gotOptions CloudInitStageOptions
|
||||
err := json.Unmarshal([]byte(tt.json), &gotOptions)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, reflect.DeepEqual(tt.options, gotOptions))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,342 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import "github.com/osbuild/osbuild-composer/internal/disk"
|
||||
|
||||
// This is a copy of `internal/disk/disk_test.go`
|
||||
// (but ours has one more entry: "luks+lvm+clevisBind"):
|
||||
var testPartitionTables = map[string]disk.PartitionTable{
|
||||
|
||||
"plain": {
|
||||
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
|
||||
Type: "gpt",
|
||||
Partitions: []disk.Partition{
|
||||
{
|
||||
Size: 1048576, // 1MB
|
||||
Bootable: true,
|
||||
Type: disk.BIOSBootPartitionGUID,
|
||||
UUID: disk.BIOSBootPartitionUUID,
|
||||
},
|
||||
{
|
||||
Size: 209715200, // 200 MB
|
||||
Type: disk.EFISystemPartitionGUID,
|
||||
UUID: disk.EFISystemPartitionUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "vfat",
|
||||
UUID: disk.EFIFilesystemUUID,
|
||||
Mountpoint: "/boot/efi",
|
||||
Label: "EFI-SYSTEM",
|
||||
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Size: 1024000, // 500 MB
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.FilesystemDataUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Mountpoint: "/boot",
|
||||
Label: "boot",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.RootPartitionUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Label: "root",
|
||||
Mountpoint: "/",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"luks": {
|
||||
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
|
||||
Type: "gpt",
|
||||
Partitions: []disk.Partition{
|
||||
{
|
||||
Size: 1048576, // 1MB
|
||||
Bootable: true,
|
||||
Type: disk.BIOSBootPartitionGUID,
|
||||
UUID: disk.BIOSBootPartitionUUID,
|
||||
},
|
||||
{
|
||||
Size: 209715200, // 200 MB
|
||||
Type: disk.EFISystemPartitionGUID,
|
||||
UUID: disk.EFISystemPartitionUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "vfat",
|
||||
UUID: disk.EFIFilesystemUUID,
|
||||
Mountpoint: "/boot/efi",
|
||||
Label: "EFI-SYSTEM",
|
||||
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Size: 1024000, // 500 MB
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.FilesystemDataUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Mountpoint: "/boot",
|
||||
Label: "boot",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.RootPartitionUUID,
|
||||
Payload: &disk.LUKSContainer{
|
||||
Label: "crypt-root",
|
||||
Passphrase: "osbuild",
|
||||
PBKDF: disk.Argon2id{
|
||||
Memory: 32,
|
||||
Iterations: 4,
|
||||
Parallelism: 1,
|
||||
},
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Label: "root",
|
||||
Mountpoint: "/",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"luks+lvm": {
|
||||
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
|
||||
Type: "gpt",
|
||||
Partitions: []disk.Partition{
|
||||
{
|
||||
Size: 1048576, // 1MB
|
||||
Bootable: true,
|
||||
Type: disk.BIOSBootPartitionGUID,
|
||||
UUID: disk.BIOSBootPartitionUUID,
|
||||
},
|
||||
{
|
||||
Size: 209715200, // 200 MB
|
||||
Type: disk.EFISystemPartitionGUID,
|
||||
UUID: disk.EFISystemPartitionUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "vfat",
|
||||
UUID: disk.EFIFilesystemUUID,
|
||||
Mountpoint: "/boot/efi",
|
||||
Label: "EFI-SYSTEM",
|
||||
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Size: 1024000, // 500 MB
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.FilesystemDataUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Mountpoint: "/boot",
|
||||
Label: "boot",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.RootPartitionUUID,
|
||||
Size: 5 * 1024 * 1024 * 1024,
|
||||
Payload: &disk.LUKSContainer{
|
||||
Label: "crypt-root",
|
||||
Passphrase: "osbuild",
|
||||
PBKDF: disk.Argon2id{
|
||||
Memory: 32,
|
||||
Iterations: 4,
|
||||
Parallelism: 1,
|
||||
},
|
||||
Payload: &disk.LVMVolumeGroup{
|
||||
Name: "root",
|
||||
Description: "root volume group",
|
||||
LogicalVolumes: []disk.LVMLogicalVolume{
|
||||
{
|
||||
Size: 2 * 1024 * 1024 * 1024,
|
||||
Name: "rootlv",
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Label: "root",
|
||||
Mountpoint: "/",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Size: 2 * 1024 * 1024 * 1024,
|
||||
Name: "homelv",
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Label: "root",
|
||||
Mountpoint: "/home",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"luks+lvm+clevisBind": {
|
||||
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
|
||||
Type: "gpt",
|
||||
Partitions: []disk.Partition{
|
||||
{
|
||||
Size: 1048576, // 1MB
|
||||
Bootable: true,
|
||||
Type: disk.BIOSBootPartitionGUID,
|
||||
UUID: disk.BIOSBootPartitionUUID,
|
||||
},
|
||||
{
|
||||
Size: 209715200, // 200 MB
|
||||
Type: disk.EFISystemPartitionGUID,
|
||||
UUID: disk.EFISystemPartitionUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "vfat",
|
||||
UUID: disk.EFIFilesystemUUID,
|
||||
Mountpoint: "/boot/efi",
|
||||
Label: "EFI-SYSTEM",
|
||||
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Size: 1024000, // 500 MB
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.FilesystemDataUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Mountpoint: "/boot",
|
||||
Label: "boot",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.RootPartitionUUID,
|
||||
Payload: &disk.LUKSContainer{
|
||||
Label: "crypt_root",
|
||||
Cipher: "cipher_null",
|
||||
Passphrase: "osbuild",
|
||||
PBKDF: disk.Argon2id{
|
||||
Memory: 32,
|
||||
Iterations: 4,
|
||||
Parallelism: 1,
|
||||
},
|
||||
Clevis: &disk.ClevisBind{
|
||||
Pin: "null",
|
||||
Policy: "{}",
|
||||
RemovePassphrase: true,
|
||||
},
|
||||
Payload: &disk.LVMVolumeGroup{
|
||||
Name: "rootvg",
|
||||
Description: "built with lvm2 and osbuild",
|
||||
LogicalVolumes: []disk.LVMLogicalVolume{
|
||||
{
|
||||
Size: 9 * 1024 * 1024 * 1024, // 9 GB
|
||||
Name: "rootlv",
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Label: "root",
|
||||
Mountpoint: "/",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"btrfs": {
|
||||
UUID: "D209C89E-EA5E-4FBD-B161-B461CCE297E0",
|
||||
Type: "gpt",
|
||||
Partitions: []disk.Partition{
|
||||
{
|
||||
Size: 1048576, // 1MB
|
||||
Bootable: true,
|
||||
Type: disk.BIOSBootPartitionGUID,
|
||||
UUID: disk.BIOSBootPartitionUUID,
|
||||
},
|
||||
{
|
||||
Size: 209715200, // 200 MB
|
||||
Type: disk.EFISystemPartitionGUID,
|
||||
UUID: disk.EFISystemPartitionUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "vfat",
|
||||
UUID: disk.EFIFilesystemUUID,
|
||||
Mountpoint: "/boot/efi",
|
||||
Label: "EFI-SYSTEM",
|
||||
FSTabOptions: "defaults,uid=0,gid=0,umask=077,shortname=winnt",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Size: 1024000, // 500 MB
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.FilesystemDataUUID,
|
||||
Payload: &disk.Filesystem{
|
||||
Type: "xfs",
|
||||
Mountpoint: "/boot",
|
||||
Label: "boot",
|
||||
FSTabOptions: "defaults",
|
||||
FSTabFreq: 0,
|
||||
FSTabPassNo: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: disk.FilesystemDataGUID,
|
||||
UUID: disk.RootPartitionUUID,
|
||||
Size: 10 * 1024 * 1024 * 1024,
|
||||
Payload: &disk.Btrfs{
|
||||
Label: "rootfs",
|
||||
Subvolumes: []disk.BtrfsSubvolume{
|
||||
{
|
||||
Size: 0,
|
||||
Mountpoint: "/",
|
||||
GroupID: 0,
|
||||
},
|
||||
{
|
||||
Size: 5 * 1024 * 1024 * 1024,
|
||||
Mountpoint: "/var",
|
||||
GroupID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestContainersStorageConfStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.containers.storage.conf",
|
||||
Options: &ContainersStorageConfStageOptions{
|
||||
Filename: "/usr/share/containers/storage.conf",
|
||||
Config: ContainersStorageConfig{
|
||||
Storage: CSCStorage{
|
||||
Options: &CSCStorageOptions{
|
||||
AdditionalImageStores: []string{
|
||||
"/usr/share/containers/storage/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actualStage := NewContainersStorageConfStage(
|
||||
NewContainerStorageOptions("/usr/share/containers/storage.conf",
|
||||
"/usr/share/containers/storage/"),
|
||||
)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewCopyStage(t *testing.T) {
|
||||
|
||||
paths := []CopyStagePath{
|
||||
{
|
||||
From: "input://tree-input/",
|
||||
To: "mount://root/",
|
||||
},
|
||||
}
|
||||
|
||||
devices := make(map[string]Device)
|
||||
devices["root"] = Device{
|
||||
Type: "org.osbuild.loopback",
|
||||
Options: LoopbackDeviceOptions{
|
||||
Filename: "/somekindofimage.img",
|
||||
Start: 0,
|
||||
Size: 1073741824,
|
||||
},
|
||||
}
|
||||
|
||||
mounts := []Mount{
|
||||
*NewBtrfsMount("root", "root", "/"),
|
||||
}
|
||||
|
||||
treeInput := NewTreeInput("name:input-pipeline")
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.copy",
|
||||
Options: &CopyStageOptions{paths},
|
||||
Inputs: &PipelineTreeInputs{"tree-input": *treeInput},
|
||||
Devices: devices,
|
||||
Mounts: mounts,
|
||||
}
|
||||
// convert to alias types
|
||||
stageMounts := Mounts(mounts)
|
||||
stageDevices := Devices(devices)
|
||||
actualStage := NewCopyStage(&CopyStageOptions{paths}, NewPipelineTreeInputs("tree-input", "input-pipeline"), &stageDevices, &stageMounts)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestNewCopyStageSimpleSourcesInputs(t *testing.T) {
|
||||
fileSum := "sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
|
||||
|
||||
paths := []CopyStagePath{
|
||||
{
|
||||
From: fmt.Sprintf("input://inlinefile/%x", fileSum),
|
||||
To: "tree://etc/inlinefile",
|
||||
},
|
||||
}
|
||||
|
||||
filesInputs := CopyStageFilesInputs{
|
||||
"inlinefile": NewFilesInput(NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry(fileSum, nil),
|
||||
})),
|
||||
}
|
||||
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.copy",
|
||||
Options: &CopyStageOptions{paths},
|
||||
Inputs: &filesInputs,
|
||||
}
|
||||
actualStage := NewCopyStageSimple(&CopyStageOptions{paths}, &filesInputs)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/disk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGenDeviceCreationStages(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// math/rand is good enough in this case
|
||||
/* #nosec G404 */
|
||||
rng := rand.New(rand.NewSource(13))
|
||||
|
||||
luks_lvm := testPartitionTables["luks+lvm"]
|
||||
|
||||
pt, err := disk.NewPartitionTable(&luks_lvm, []blueprint.FilesystemCustomization{}, 0, false, make(map[string]uint64), rng)
|
||||
assert.NoError(err)
|
||||
|
||||
stages := GenDeviceCreationStages(pt, "image.raw")
|
||||
|
||||
// we should have two stages
|
||||
assert.Equal(len(stages), 2)
|
||||
|
||||
// first one should be a "org.osbuild.luks2.format"
|
||||
luks := stages[0]
|
||||
assert.Equal(luks.Type, "org.osbuild.luks2.format")
|
||||
|
||||
// it needs to have one device
|
||||
assert.Equal(len(luks.Devices), 1)
|
||||
|
||||
// the device should be called `device`
|
||||
device, ok := luks.Devices["device"]
|
||||
assert.True(ok, "Need device called `device`")
|
||||
|
||||
// device should be a loopback device
|
||||
assert.Equal(device.Type, "org.osbuild.loopback")
|
||||
|
||||
lvm := stages[1]
|
||||
assert.Equal(lvm.Type, "org.osbuild.lvm2.create")
|
||||
lvmOptions, ok := lvm.Options.(*LVM2CreateStageOptions)
|
||||
assert.True(ok, "Need LVM2CreateStageOptions for org.osbuild.lvm2.create")
|
||||
|
||||
// LVM should have two volumes
|
||||
assert.Equal(len(lvmOptions.Volumes), 2)
|
||||
rootlv := lvmOptions.Volumes[0]
|
||||
assert.Equal(rootlv.Name, "rootlv")
|
||||
|
||||
homelv := lvmOptions.Volumes[1]
|
||||
assert.Equal(homelv.Name, "homelv")
|
||||
|
||||
// it needs to have two(!) devices, the loopback and the luks
|
||||
assert.Equal(len(lvm.Devices), 2)
|
||||
|
||||
// this is the target one, which should be the luks one
|
||||
device, ok = lvm.Devices["device"]
|
||||
assert.True(ok, "Need device called `device`")
|
||||
assert.Equal(device.Type, "org.osbuild.luks2")
|
||||
assert.NotEmpty(device.Parent, "Need a parent device for LUKS on loopback")
|
||||
|
||||
luksOptions, ok := device.Options.(*LUKS2DeviceOptions)
|
||||
assert.True(ok, "Need LUKS2DeviceOptions for luks device")
|
||||
assert.Equal(luksOptions.Passphrase, "osbuild")
|
||||
|
||||
parent, ok := lvm.Devices[device.Parent]
|
||||
assert.True(ok, "Need device called `device`")
|
||||
assert.Equal(parent.Type, "org.osbuild.loopback")
|
||||
|
||||
}
|
||||
|
||||
func TestGenDeviceFinishStages(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// math/rand is good enough in this case
|
||||
/* #nosec G404 */
|
||||
rng := rand.New(rand.NewSource(13))
|
||||
|
||||
luks_lvm := testPartitionTables["luks+lvm"]
|
||||
|
||||
pt, err := disk.NewPartitionTable(&luks_lvm, []blueprint.FilesystemCustomization{}, 0, false, make(map[string]uint64), rng)
|
||||
assert.NoError(err)
|
||||
|
||||
stages := GenDeviceFinishStages(pt, "image.raw")
|
||||
|
||||
// we should have one stage
|
||||
assert.Equal(1, len(stages))
|
||||
|
||||
// it should be a "org.osbuild.lvm2.metadata"
|
||||
lvm := stages[0]
|
||||
assert.Equal("org.osbuild.lvm2.metadata", lvm.Type)
|
||||
|
||||
// it should have two devices
|
||||
assert.Equal(2, len(lvm.Devices))
|
||||
|
||||
// this is the target one, which should be the luks one
|
||||
device, ok := lvm.Devices["device"]
|
||||
assert.True(ok, "Need device called `device`")
|
||||
assert.Equal("org.osbuild.luks2", device.Type)
|
||||
assert.NotEmpty(device.Parent, "Need a parent device for LUKS on loopback")
|
||||
|
||||
luksOptions, ok := device.Options.(*LUKS2DeviceOptions)
|
||||
assert.True(ok, "Need LUKS2DeviceOptions for luks device")
|
||||
assert.Equal("osbuild", luksOptions.Passphrase)
|
||||
|
||||
parent, ok := lvm.Devices[device.Parent]
|
||||
assert.True(ok, "Need device called `device`")
|
||||
assert.Equal("org.osbuild.loopback", parent.Type)
|
||||
|
||||
opts, ok := lvm.Options.(*LVM2MetadataStageOptions)
|
||||
assert.True(ok, "Need LVM2MetadataStageOptions for org.osbuild.lvm2.metadata")
|
||||
assert.Equal("root", opts.VGName)
|
||||
}
|
||||
|
||||
func TestGenDeviceFinishStagesOrderWithLVMClevisBind(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// math/rand is good enough in this case
|
||||
/* #nosec G404 */
|
||||
rng := rand.New(rand.NewSource(13))
|
||||
|
||||
luks_lvm := testPartitionTables["luks+lvm+clevisBind"]
|
||||
|
||||
pt, err := disk.NewPartitionTable(&luks_lvm, []blueprint.FilesystemCustomization{}, 0, false, make(map[string]uint64), rng)
|
||||
assert.NoError(err)
|
||||
|
||||
stages := GenDeviceFinishStages(pt, "image.raw")
|
||||
|
||||
// we should have two stages
|
||||
assert.Equal(2, len(stages))
|
||||
lvm := stages[0]
|
||||
luks := stages[1]
|
||||
|
||||
// the first one should be "org.osbuild.lvm2.metadata"
|
||||
assert.Equal("org.osbuild.lvm2.metadata", lvm.Type)
|
||||
// followed by "org.osbuild.luks2.remove-key"
|
||||
assert.Equal("org.osbuild.luks2.remove-key", luks.Type)
|
||||
}
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/blueprint"
|
||||
"github.com/osbuild/osbuild-composer/internal/disk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGenImageKernelOptions(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// math/rand is good enough in this case
|
||||
/* #nosec G404 */
|
||||
rng := rand.New(rand.NewSource(13))
|
||||
|
||||
luks_lvm := testPartitionTables["luks+lvm"]
|
||||
|
||||
pt, err := disk.NewPartitionTable(&luks_lvm, []blueprint.FilesystemCustomization{}, 0, false, make(map[string]uint64), rng)
|
||||
assert.NoError(err)
|
||||
|
||||
var uuid string
|
||||
|
||||
findLuksUUID := func(e disk.Entity, path []disk.Entity) error {
|
||||
switch ent := e.(type) {
|
||||
case *disk.LUKSContainer:
|
||||
uuid = ent.UUID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
_ = pt.ForEachEntity(findLuksUUID)
|
||||
|
||||
assert.NotEmpty(uuid, "Could not find LUKS container")
|
||||
cmdline := GenImageKernelOptions(pt)
|
||||
|
||||
assert.Subset(cmdline, []string{"luks.uuid=" + uuid})
|
||||
}
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
)
|
||||
|
||||
func TestNewDNFAutomaticConfigStage(t *testing.T) {
|
||||
stageOptions := NewDNFAutomaticConfigStageOptions(&DNFAutomaticConfig{})
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.dnf-automatic.config",
|
||||
Options: stageOptions,
|
||||
}
|
||||
actualStage := NewDNFAutomaticConfigStage(stageOptions)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestDNFAutomaticConfigStageOptionsValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options DNFAutomaticConfigStageOptions
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: DNFAutomaticConfigStageOptions{},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
name: "invalid-upgrade_type",
|
||||
options: DNFAutomaticConfigStageOptions{
|
||||
Config: &DNFAutomaticConfig{
|
||||
Commands: &DNFAutomaticConfigCommands{
|
||||
ApplyUpdates: common.ToPtr(true),
|
||||
UpgradeType: "invalid",
|
||||
},
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "valid-data-1",
|
||||
options: DNFAutomaticConfigStageOptions{
|
||||
Config: &DNFAutomaticConfig{
|
||||
Commands: &DNFAutomaticConfigCommands{
|
||||
ApplyUpdates: common.ToPtr(true),
|
||||
UpgradeType: DNFAutomaticUpgradeTypeDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
name: "valid-data-2",
|
||||
options: DNFAutomaticConfigStageOptions{
|
||||
Config: &DNFAutomaticConfig{
|
||||
Commands: &DNFAutomaticConfigCommands{
|
||||
ApplyUpdates: common.ToPtr(false),
|
||||
UpgradeType: DNFAutomaticUpgradeTypeSecurity,
|
||||
},
|
||||
},
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.err {
|
||||
assert.Errorf(t, tt.options.validate(), "%q didn't return an error [idx: %d]", tt.name, idx)
|
||||
assert.Panics(t, func() { NewDNFAutomaticConfigStage(&tt.options) })
|
||||
} else {
|
||||
assert.NoErrorf(t, tt.options.validate(), "%q returned an error [idx: %d]", tt.name, idx)
|
||||
assert.NotPanics(t, func() { NewDNFAutomaticConfigStage(&tt.options) })
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,133 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewDNFConfigStageOptions(t *testing.T) {
|
||||
variables := []DNFVariable{
|
||||
{
|
||||
Name: "release",
|
||||
Value: "8.4",
|
||||
},
|
||||
}
|
||||
|
||||
dnfconfig := &DNFConfig{
|
||||
Main: &DNFConfigMain{
|
||||
IPResolve: "4",
|
||||
},
|
||||
}
|
||||
|
||||
expectedOptions := &DNFConfigStageOptions{
|
||||
Variables: variables,
|
||||
Config: dnfconfig,
|
||||
}
|
||||
actualOptions := NewDNFConfigStageOptions(variables, dnfconfig)
|
||||
assert.Equal(t, expectedOptions, actualOptions)
|
||||
}
|
||||
|
||||
func TestNewDNFConfigStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.dnf.config",
|
||||
Options: &DNFConfigStageOptions{},
|
||||
}
|
||||
actualStage := NewDNFConfigStage(&DNFConfigStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestJSONDNFConfigStage(t *testing.T) {
|
||||
expectedOptions := DNFConfigStageOptions{
|
||||
Variables: []DNFVariable{
|
||||
{
|
||||
Name: "release",
|
||||
Value: "8.4",
|
||||
},
|
||||
},
|
||||
Config: &DNFConfig{
|
||||
Main: &DNFConfigMain{
|
||||
IPResolve: "4",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
inputString := `{"variables":[{"name":"release","value":"8.4"}],"config":{"main":{"ip_resolve":"4"}}}`
|
||||
var inputOptions DNFConfigStageOptions
|
||||
err := json.Unmarshal([]byte(inputString), &inputOptions)
|
||||
assert.NoError(t, err, "failed to parse JSON dnf config")
|
||||
assert.True(t, reflect.DeepEqual(expectedOptions, inputOptions))
|
||||
|
||||
inputBytes, err := json.Marshal(expectedOptions)
|
||||
assert.NoError(t, err, "failed to marshal YUM config into JSON")
|
||||
assert.Equal(t, inputString, string(inputBytes))
|
||||
}
|
||||
|
||||
func TestDNFConfigValidate(t *testing.T) {
|
||||
variables := []DNFVariable{
|
||||
{
|
||||
Name: "release",
|
||||
Value: "8.4",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
options DNFConfigStageOptions
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
DNFConfigStageOptions{},
|
||||
true,
|
||||
},
|
||||
{
|
||||
DNFConfigStageOptions{
|
||||
Variables: variables,
|
||||
Config: &DNFConfig{
|
||||
Main: nil,
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
DNFConfigStageOptions{
|
||||
Variables: variables,
|
||||
Config: &DNFConfig{
|
||||
Main: &DNFConfigMain{},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
DNFConfigStageOptions{
|
||||
Variables: variables,
|
||||
Config: &DNFConfig{
|
||||
Main: &DNFConfigMain{
|
||||
IPResolve: "4",
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
DNFConfigStageOptions{
|
||||
Variables: variables,
|
||||
Config: &DNFConfig{
|
||||
Main: &DNFConfigMain{
|
||||
IPResolve: "urgh",
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if test.valid {
|
||||
require.NotPanics(t, func() { NewDNFConfigStage(&test.options) })
|
||||
} else {
|
||||
require.Panics(t, func() { NewDNFConfigStage(&test.options) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewDracutConfStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.dracut.conf",
|
||||
Options: &DracutConfStageOptions{},
|
||||
}
|
||||
actualStage := NewDracutConfStage(&DracutConfStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestDracutConfStage_MarshalJSON_Invalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options DracutConfStageOptions
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: DracutConfStageOptions{},
|
||||
},
|
||||
{
|
||||
name: "no-options-in-config",
|
||||
options: DracutConfStageOptions{
|
||||
Filename: "testing.conf",
|
||||
Config: DracutConfigFile{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBytes, err := json.Marshal(tt.options)
|
||||
assert.NotNilf(t, err, "json.Marshal() didn't return an error, but %s [idx: %d]", string(gotBytes), idx)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewDracutStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.dracut",
|
||||
Options: &DracutStageOptions{},
|
||||
}
|
||||
actualStage := NewDracutStage(&DracutStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFDOStageForRootCerts(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
tests := []struct {
|
||||
data string
|
||||
hash string
|
||||
}{
|
||||
{"42\n", "sha256:084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0"},
|
||||
{"Hallo Welt\n", "sha256:f950375066d74787f31cbd8f9f91c71819357cad243fb9d4a0d9ef4fa76709e0"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
stage := NewFDOStageForRootCerts(tt.data)
|
||||
|
||||
inputs := stage.Inputs.(*FDOStageInputs)
|
||||
certs := inputs.RootCerts
|
||||
refs := certs.References.(*FilesInputSourcePlainRef)
|
||||
|
||||
assert.Len(*refs, 1)
|
||||
assert.Equal((*refs)[0], tt.hash)
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFilesInput_UnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
ref FilesInputRef
|
||||
rawJson []byte
|
||||
}{
|
||||
{
|
||||
name: "pipeline-object-ref",
|
||||
ref: NewFilesInputPipelineObjectRef("os", "image.raw", nil),
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.pipeline","references":{"name:os":{"file":"image.raw"}}}`),
|
||||
},
|
||||
{
|
||||
name: "pipeline-array-ref",
|
||||
ref: NewFilesInputPipelineArrayRef("os", "image.raw", nil),
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.pipeline","references":[{"id":"name:os","options":{"file":"image.raw"}}]}`),
|
||||
},
|
||||
{
|
||||
name: "source-plain-ref",
|
||||
ref: NewFilesInputSourcePlainRef([]string{"sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"}),
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.source","references":["sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"]}`),
|
||||
},
|
||||
{
|
||||
name: "source-array-ref",
|
||||
ref: NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry("sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", nil),
|
||||
}),
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.source","references":[{"id":"sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"}]}`),
|
||||
},
|
||||
{
|
||||
name: "source-object-ref",
|
||||
ref: NewFilesInputSourceObjectRef(map[string]FilesInputRefMetadata{
|
||||
"sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef": nil,
|
||||
}),
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.source","references":{"sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef":{}}}`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var gotInput FilesInput
|
||||
err := json.Unmarshal(tt.rawJson, &gotInput)
|
||||
assert.NoErrorf(t, err, "FilesInput.UnmarshalJSON() error = %v", err)
|
||||
|
||||
input := NewFilesInput(tt.ref)
|
||||
gotBytes, err := json.Marshal(input)
|
||||
assert.NoErrorf(t, err, "FilesInput.MarshalJSON() error = %v", err)
|
||||
|
||||
assert.EqualValuesf(t, tt.rawJson, gotBytes, "Expected JSON `%v`, got JSON `%v`", string(tt.rawJson), string(gotBytes))
|
||||
assert.EqualValuesf(t, input, &gotInput, "Expected input `%v`, got input `%v` [test: %q]", input, &gotInput, tt.name)
|
||||
})
|
||||
}
|
||||
|
||||
// test invalid cases
|
||||
invalidTestCases := []struct {
|
||||
name string
|
||||
rawJson []byte
|
||||
}{
|
||||
{
|
||||
name: "invalid-pipeline-ref",
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.pipeline","references":1}`),
|
||||
},
|
||||
{
|
||||
name: "invalid-source-ref",
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.source","references":2}`),
|
||||
},
|
||||
{
|
||||
name: "invalid-origin",
|
||||
rawJson: []byte(`{"type":"org.osbuild.files","origin":"org.osbuild.invalid","references":{}}`),
|
||||
},
|
||||
{
|
||||
name: "invalid-input",
|
||||
rawJson: []byte(`[]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range invalidTestCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var gotInput FilesInput
|
||||
err := json.Unmarshal(tt.rawJson, &gotInput)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFirewallStage(t *testing.T) {
|
||||
expectedFirewall := &Stage{
|
||||
Type: "org.osbuild.firewall",
|
||||
Options: &FirewallStageOptions{},
|
||||
}
|
||||
actualFirewall := NewFirewallStage(&FirewallStageOptions{})
|
||||
assert.Equal(t, expectedFirewall, actualFirewall)
|
||||
}
|
||||
|
||||
func TestFirewallStageZones_ValidateInvalid(t *testing.T) {
|
||||
options := FirewallStageOptions{}
|
||||
var sources []string
|
||||
options.Zones = append(options.Zones, FirewallZone{
|
||||
Name: "",
|
||||
Sources: sources,
|
||||
})
|
||||
assert := assert.New(t)
|
||||
assert.Error(options.validate())
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFirstBootStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.first-boot",
|
||||
Options: &FirstBootStageOptions{},
|
||||
}
|
||||
actualStage := NewFirstBootStage(&FirstBootStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFixBLSStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.fix-bls",
|
||||
Options: &FixBLSStageOptions{},
|
||||
}
|
||||
actualStage := NewFixBLSStage(&FixBLSStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,272 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
"github.com/osbuild/osbuild-composer/internal/fsnode"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGenFileNodesStages(t *testing.T) {
|
||||
fileData1 := []byte("hello world")
|
||||
fileData2 := []byte("hello world 2")
|
||||
|
||||
ensureFileCreation := func(file *fsnode.File, err error) *fsnode.File {
|
||||
t.Helper()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, file)
|
||||
return file
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
files []*fsnode.File
|
||||
expected []*Stage
|
||||
}{
|
||||
{
|
||||
name: "empty-files-list",
|
||||
files: []*fsnode.File{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "nil-files-list",
|
||||
files: nil,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "single-file-simple",
|
||||
files: []*fsnode.File{
|
||||
ensureFileCreation(fsnode.NewFile("/etc/file", nil, nil, nil, []byte(fileData1))),
|
||||
},
|
||||
expected: []*Stage{
|
||||
NewCopyStageSimple(&CopyStageOptions{
|
||||
Paths: []CopyStagePath{
|
||||
{
|
||||
From: fmt.Sprintf("input://file-%[1]x/sha256:%[1]x", sha256.Sum256(fileData1)),
|
||||
To: "tree:///etc/file",
|
||||
RemoveDestination: true,
|
||||
},
|
||||
},
|
||||
}, &CopyStageFilesInputs{
|
||||
fmt.Sprintf("file-%x", sha256.Sum256(fileData1)): NewFilesInput(NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry(fmt.Sprintf("sha256:%x", sha256.Sum256(fileData1)), nil),
|
||||
})),
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple-files-simple",
|
||||
files: []*fsnode.File{
|
||||
ensureFileCreation(fsnode.NewFile("/etc/file", nil, nil, nil, []byte(fileData1))),
|
||||
ensureFileCreation(fsnode.NewFile("/etc/file2", nil, nil, nil, []byte(fileData2))),
|
||||
},
|
||||
expected: []*Stage{
|
||||
NewCopyStageSimple(&CopyStageOptions{
|
||||
Paths: []CopyStagePath{
|
||||
{
|
||||
From: fmt.Sprintf("input://file-%[1]x/sha256:%[1]x", sha256.Sum256(fileData1)),
|
||||
To: "tree:///etc/file",
|
||||
RemoveDestination: true,
|
||||
},
|
||||
{
|
||||
From: fmt.Sprintf("input://file-%[1]x/sha256:%[1]x", sha256.Sum256(fileData2)),
|
||||
To: "tree:///etc/file2",
|
||||
RemoveDestination: true,
|
||||
},
|
||||
},
|
||||
}, &CopyStageFilesInputs{
|
||||
fmt.Sprintf("file-%x", sha256.Sum256(fileData1)): NewFilesInput(NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry(fmt.Sprintf("sha256:%x", sha256.Sum256(fileData1)), nil),
|
||||
})),
|
||||
fmt.Sprintf("file-%x", sha256.Sum256(fileData2)): NewFilesInput(NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry(fmt.Sprintf("sha256:%x", sha256.Sum256(fileData2)), nil),
|
||||
})),
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple-files-with-all-options",
|
||||
files: []*fsnode.File{
|
||||
ensureFileCreation(fsnode.NewFile("/etc/file", common.ToPtr(os.FileMode(0644)), "root", int64(12345), []byte(fileData1))),
|
||||
ensureFileCreation(fsnode.NewFile("/etc/file2", common.ToPtr(os.FileMode(0755)), int64(12345), "root", []byte(fileData2))),
|
||||
},
|
||||
expected: []*Stage{
|
||||
NewCopyStageSimple(&CopyStageOptions{
|
||||
Paths: []CopyStagePath{
|
||||
{
|
||||
From: fmt.Sprintf("input://file-%[1]x/sha256:%[1]x", sha256.Sum256(fileData1)),
|
||||
To: "tree:///etc/file",
|
||||
RemoveDestination: true,
|
||||
},
|
||||
{
|
||||
From: fmt.Sprintf("input://file-%[1]x/sha256:%[1]x", sha256.Sum256(fileData2)),
|
||||
To: "tree:///etc/file2",
|
||||
RemoveDestination: true,
|
||||
},
|
||||
},
|
||||
}, &CopyStageFilesInputs{
|
||||
fmt.Sprintf("file-%x", sha256.Sum256(fileData1)): NewFilesInput(NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry(fmt.Sprintf("sha256:%x", sha256.Sum256(fileData1)), nil),
|
||||
})),
|
||||
fmt.Sprintf("file-%x", sha256.Sum256(fileData2)): NewFilesInput(NewFilesInputSourceArrayRef([]FilesInputSourceArrayRefEntry{
|
||||
NewFilesInputSourceArrayRefEntry(fmt.Sprintf("sha256:%x", sha256.Sum256(fileData2)), nil),
|
||||
})),
|
||||
}),
|
||||
NewChmodStage(&ChmodStageOptions{
|
||||
Items: map[string]ChmodStagePathOptions{
|
||||
"/etc/file": {
|
||||
Mode: fmt.Sprintf("%#o", os.FileMode(0644)),
|
||||
},
|
||||
"/etc/file2": {
|
||||
Mode: fmt.Sprintf("%#o", os.FileMode(0755)),
|
||||
},
|
||||
},
|
||||
}),
|
||||
NewChownStage(&ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/file": {
|
||||
User: "root",
|
||||
Group: int64(12345),
|
||||
},
|
||||
"/etc/file2": {
|
||||
User: int64(12345),
|
||||
Group: "root",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotStages := GenFileNodesStages(tc.files)
|
||||
assert.EqualValues(t, tc.expected, gotStages)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenDirectoryNodesStages(t *testing.T) {
|
||||
|
||||
ensureDirCreation := func(dir *fsnode.Directory, err error) *fsnode.Directory {
|
||||
t.Helper()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
return dir
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dirs []*fsnode.Directory
|
||||
expected []*Stage
|
||||
}{
|
||||
{
|
||||
name: "empty-dirs-list",
|
||||
dirs: []*fsnode.Directory{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "nil-dirs-list",
|
||||
dirs: nil,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "single-dir-simple",
|
||||
dirs: []*fsnode.Directory{
|
||||
ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, nil, nil, false)),
|
||||
},
|
||||
expected: []*Stage{
|
||||
NewMkdirStage(&MkdirStageOptions{
|
||||
Paths: []MkdirStagePath{
|
||||
{
|
||||
Path: "/etc/dir",
|
||||
ExistOk: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple-dirs-simple",
|
||||
dirs: []*fsnode.Directory{
|
||||
ensureDirCreation(fsnode.NewDirectory("/etc/dir", nil, nil, nil, false)),
|
||||
ensureDirCreation(fsnode.NewDirectory("/etc/dir2", nil, nil, nil, false)),
|
||||
},
|
||||
expected: []*Stage{
|
||||
NewMkdirStage(&MkdirStageOptions{
|
||||
Paths: []MkdirStagePath{
|
||||
{
|
||||
Path: "/etc/dir",
|
||||
ExistOk: true,
|
||||
},
|
||||
{
|
||||
Path: "/etc/dir2",
|
||||
ExistOk: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple-dirs-with-all-options",
|
||||
dirs: []*fsnode.Directory{
|
||||
ensureDirCreation(fsnode.NewDirectory("/etc/dir", common.ToPtr(os.FileMode(0700)), "root", int64(12345), true)),
|
||||
ensureDirCreation(fsnode.NewDirectory("/etc/dir2", common.ToPtr(os.FileMode(0755)), int64(12345), "root", false)),
|
||||
ensureDirCreation(fsnode.NewDirectory("/etc/dir3", nil, nil, nil, true)),
|
||||
},
|
||||
expected: []*Stage{
|
||||
NewMkdirStage(&MkdirStageOptions{
|
||||
Paths: []MkdirStagePath{
|
||||
{
|
||||
Path: "/etc/dir",
|
||||
Parents: true,
|
||||
ExistOk: false,
|
||||
},
|
||||
{
|
||||
Path: "/etc/dir2",
|
||||
ExistOk: false,
|
||||
},
|
||||
{
|
||||
Path: "/etc/dir3",
|
||||
Parents: true,
|
||||
ExistOk: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
NewChmodStage(&ChmodStageOptions{
|
||||
Items: map[string]ChmodStagePathOptions{
|
||||
"/etc/dir": {
|
||||
Mode: fmt.Sprintf("%#o", os.FileMode(0700)),
|
||||
},
|
||||
"/etc/dir2": {
|
||||
Mode: fmt.Sprintf("%#o", os.FileMode(0755)),
|
||||
},
|
||||
},
|
||||
}),
|
||||
NewChownStage(&ChownStageOptions{
|
||||
Items: map[string]ChownStagePathOptions{
|
||||
"/etc/dir": {
|
||||
User: "root",
|
||||
Group: int64(12345),
|
||||
},
|
||||
"/etc/dir2": {
|
||||
User: int64(12345),
|
||||
Group: "root",
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotStages := GenDirectoryNodesStages(tc.dirs)
|
||||
assert.EqualValues(t, tc.expected, gotStages)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewFSTabStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.fstab",
|
||||
Options: &FSTabStageOptions{},
|
||||
}
|
||||
actualStage := NewFSTabStage(&FSTabStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestAddFilesystem(t *testing.T) {
|
||||
options := &FSTabStageOptions{}
|
||||
filesystems := []*FSTabEntry{
|
||||
{
|
||||
UUID: "76a22bf4-f153-4541-b6c7-0332c0dfaeac",
|
||||
VFSType: "ext4",
|
||||
Path: "/",
|
||||
Options: "defaults",
|
||||
Freq: 1,
|
||||
PassNo: 1,
|
||||
},
|
||||
{
|
||||
UUID: "bba22bf4-f153-4541-b6c7-0332c0dfaeac",
|
||||
VFSType: "xfs",
|
||||
Path: "/home",
|
||||
Options: "defaults",
|
||||
Freq: 1,
|
||||
PassNo: 2,
|
||||
},
|
||||
{
|
||||
UUID: "cca22bf4-f153-4541-b6c7-0332c0dfaeac",
|
||||
VFSType: "xfs",
|
||||
Path: "/var",
|
||||
Options: "defaults",
|
||||
Freq: 1,
|
||||
PassNo: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, fs := range filesystems {
|
||||
options.AddFilesystem(fs.UUID, fs.VFSType, fs.Path, fs.Options, fs.Freq, fs.PassNo)
|
||||
assert.Equal(t, options.FileSystems[i], fs)
|
||||
}
|
||||
assert.Equal(t, len(filesystems), len(options.FileSystems))
|
||||
}
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewGcpGuestAgentConfigOptionsValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options GcpGuestAgentConfigOptions
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-config",
|
||||
options: GcpGuestAgentConfigOptions{},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "empty-config",
|
||||
options: GcpGuestAgentConfigOptions{
|
||||
ConfigScope: GcpGuestAgentConfigScopeDistro,
|
||||
Config: &GcpGuestAgentConfig{},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-ConfigScope",
|
||||
options: GcpGuestAgentConfigOptions{
|
||||
ConfigScope: "incorrect",
|
||||
Config: &GcpGuestAgentConfig{
|
||||
Accounts: &GcpGuestAgentConfigAccounts{
|
||||
Groups: []string{"group1", "group2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "valid-data",
|
||||
options: GcpGuestAgentConfigOptions{
|
||||
ConfigScope: GcpGuestAgentConfigScopeDistro,
|
||||
Config: &GcpGuestAgentConfig{
|
||||
Accounts: &GcpGuestAgentConfigAccounts{
|
||||
Groups: []string{"group1", "group2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.err {
|
||||
assert.Errorf(t, tt.options.validate(), "%q didn't return an error [idx: %d]", tt.name, idx)
|
||||
assert.Panics(t, func() { NewGcpGuestAgentConfigStage(&tt.options) })
|
||||
} else {
|
||||
assert.NoErrorf(t, tt.options.validate(), "%q returned an error [idx: %d]", tt.name, idx)
|
||||
assert.NotPanics(t, func() { NewGcpGuestAgentConfigStage(&tt.options) })
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestNewGcpGuestAgentConfigStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.gcp.guest-agent.conf",
|
||||
Options: &GcpGuestAgentConfigOptions{
|
||||
Config: &GcpGuestAgentConfig{
|
||||
Accounts: &GcpGuestAgentConfigAccounts{
|
||||
Groups: []string{"group1", "group2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
actualStage := NewGcpGuestAgentConfigStage(&GcpGuestAgentConfigOptions{
|
||||
Config: &GcpGuestAgentConfig{
|
||||
Accounts: &GcpGuestAgentConfigAccounts{
|
||||
Groups: []string{"group1", "group2"},
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewGroupsStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.groups",
|
||||
Options: &GroupsStageOptions{},
|
||||
}
|
||||
actualStage := NewGroupsStage(&GroupsStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,113 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
)
|
||||
|
||||
func TestNewGrub2InstStage(t *testing.T) {
|
||||
options := Grub2InstStageOptions{
|
||||
Filename: "img.raw",
|
||||
Platform: "i386-pc",
|
||||
Location: 2048,
|
||||
Core: CoreMkImage{
|
||||
Type: "mkimage",
|
||||
PartLabel: "gpt",
|
||||
Filesystem: "ext4",
|
||||
},
|
||||
Prefix: PrefixPartition{
|
||||
Type: "partition",
|
||||
PartLabel: "gpt",
|
||||
Number: 1,
|
||||
Path: "/boot/grub2",
|
||||
},
|
||||
SectorSize: common.ToPtr(uint64(512)),
|
||||
}
|
||||
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.grub2.inst",
|
||||
Options: &options,
|
||||
}
|
||||
|
||||
actualStage := NewGrub2InstStage(&options)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestMarshalGrub2InstStage(t *testing.T) {
|
||||
goodOptions := func() Grub2InstStageOptions {
|
||||
return Grub2InstStageOptions{
|
||||
Filename: "img.raw",
|
||||
Platform: "i386-pc",
|
||||
Location: 2048,
|
||||
Core: CoreMkImage{
|
||||
Type: "mkimage",
|
||||
PartLabel: "gpt",
|
||||
Filesystem: "ext4",
|
||||
},
|
||||
Prefix: PrefixPartition{
|
||||
Type: "partition",
|
||||
PartLabel: "gpt",
|
||||
Number: 1,
|
||||
Path: "/boot/grub2",
|
||||
},
|
||||
SectorSize: common.ToPtr(uint64(512)),
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
options := goodOptions()
|
||||
|
||||
stage := NewGrub2InstStage(&options)
|
||||
_, err := json.Marshal(stage)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
options := goodOptions()
|
||||
options.Core.Type = "notmkimage"
|
||||
|
||||
stage := NewGrub2InstStage(&options)
|
||||
_, err := json.Marshal(stage)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
options := goodOptions()
|
||||
options.Core.PartLabel = "notgpt"
|
||||
|
||||
stage := NewGrub2InstStage(&options)
|
||||
_, err := json.Marshal(stage)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
options := goodOptions()
|
||||
options.Core.Filesystem = "apfs"
|
||||
|
||||
stage := NewGrub2InstStage(&options)
|
||||
_, err := json.Marshal(stage)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
options := goodOptions()
|
||||
options.Prefix.Type = "notpartition"
|
||||
|
||||
stage := NewGrub2InstStage(&options)
|
||||
_, err := json.Marshal(stage)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
options := goodOptions()
|
||||
options.Prefix.PartLabel = "notdos"
|
||||
|
||||
stage := NewGrub2InstStage(&options)
|
||||
_, err := json.Marshal(stage)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGrub2LegacyStage_Validation(t *testing.T) {
|
||||
|
||||
options := GRUB2LegacyStageOptions{}
|
||||
|
||||
err := options.validate()
|
||||
assert.Error(t, err)
|
||||
|
||||
options.RootFS.Device = "/dev/sda"
|
||||
err = options.validate()
|
||||
assert.Error(t, err)
|
||||
|
||||
prod := GRUB2Product{
|
||||
Name: "Fedora",
|
||||
Nick: "Foo",
|
||||
Version: "1",
|
||||
}
|
||||
options.Entries = MakeGrub2MenuEntries("id", "kernel", prod, false)
|
||||
err = options.validate()
|
||||
assert.Error(t, err)
|
||||
|
||||
options.BIOS = &GRUB2BIOS{
|
||||
Platform: "i386-pc",
|
||||
}
|
||||
err = options.validate()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewGRUB2Stage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.grub2",
|
||||
Options: &GRUB2StageOptions{},
|
||||
}
|
||||
actualStage := NewGRUB2Stage(&GRUB2StageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewHostnameStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.hostname",
|
||||
Options: &HostnameStageOptions{},
|
||||
}
|
||||
actualStage := NewHostnameStage(&HostnameStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInlineSource(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
tests := []struct {
|
||||
data string
|
||||
hash string
|
||||
encoded string
|
||||
}{
|
||||
{"42\n", "sha256:084c799cd551dd1d8d5c5f9a5d593b2e931f5e36122ee5c793c1d08a19839cc0", "NDIK"},
|
||||
{"Hallo Welt\n", "sha256:f950375066d74787f31cbd8f9f91c71819357cad243fb9d4a0d9ef4fa76709e0", "SGFsbG8gV2VsdAo="},
|
||||
}
|
||||
|
||||
ils := NewInlineSource()
|
||||
|
||||
for _, tt := range tests {
|
||||
hash := ils.AddItem(tt.data)
|
||||
assert.Equal(tt.hash, hash)
|
||||
|
||||
item := ils.Items[hash]
|
||||
assert.Equal(item.Data, tt.encoded)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewKernelCmdlineStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.kernel-cmdline",
|
||||
Options: &KernelCmdlineStageOptions{},
|
||||
}
|
||||
actualStage := NewKernelCmdlineStage(&KernelCmdlineStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewKeymapStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.keymap",
|
||||
Options: &KeymapStageOptions{},
|
||||
}
|
||||
actualStage := NewKeymapStage(&KeymapStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestKeymapStage_MarshalJSON_Invalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options KeymapStageOptions
|
||||
}{
|
||||
{
|
||||
name: "x11-keymap-empty-layout-list",
|
||||
options: KeymapStageOptions{
|
||||
X11Keymap: &X11KeymapOptions{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBytes, err := json.Marshal(tt.options)
|
||||
assert.NotNilf(t, err, "json.Marshal() didn't return an error, but: %s [idx: %d]", string(gotBytes), idx)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewLocaleStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.locale",
|
||||
Options: &LocaleStageOptions{},
|
||||
}
|
||||
actualStage := NewLocaleStage(&LocaleStageOptions{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewLVM2CreateStageValidation(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
okOptions := LVM2CreateStageOptions{
|
||||
Volumes: []LogicalVolume{
|
||||
{
|
||||
Name: "a_volume_name",
|
||||
Size: "",
|
||||
},
|
||||
{
|
||||
Name: "good-volume.name",
|
||||
Size: "10G",
|
||||
},
|
||||
{
|
||||
Name: "99-luft+volumes",
|
||||
Size: "10737418240",
|
||||
},
|
||||
{
|
||||
Name: "++",
|
||||
Size: "1337",
|
||||
},
|
||||
{
|
||||
Name: "_",
|
||||
Size: "0",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(okOptions.validate())
|
||||
|
||||
badVolumes := []LogicalVolume{
|
||||
{
|
||||
Name: "!bad-bad-volume-name",
|
||||
Size: "1337",
|
||||
},
|
||||
{
|
||||
Name: "even worse",
|
||||
},
|
||||
{
|
||||
Name: "-",
|
||||
},
|
||||
}
|
||||
|
||||
for _, vol := range badVolumes {
|
||||
options := LVM2CreateStageOptions{
|
||||
Volumes: []LogicalVolume{vol},
|
||||
}
|
||||
assert.Error(options.validate(), vol.Name)
|
||||
}
|
||||
|
||||
empty := LVM2CreateStageOptions{}
|
||||
assert.Error(empty.validate())
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
package osbuild
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewLVM2MetadataStageValidation(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
okOptions := []LVM2MetadataStageOptions{
|
||||
{
|
||||
VGName: "a_volume_name",
|
||||
CreationTime: "0",
|
||||
},
|
||||
{
|
||||
VGName: "good-volume.name",
|
||||
CreationTime: "1629282647",
|
||||
},
|
||||
{
|
||||
VGName: "99-luft+volumes",
|
||||
CreationTime: "2147483648",
|
||||
},
|
||||
{
|
||||
VGName: "++",
|
||||
CreationTime: "42",
|
||||
},
|
||||
{
|
||||
VGName: "_",
|
||||
CreationTime: "4294967297",
|
||||
},
|
||||
}
|
||||
for _, o := range okOptions {
|
||||
assert.NoError(o.validate(), o)
|
||||
}
|
||||
|
||||
badOptions := []LVM2MetadataStageOptions{
|
||||
{
|
||||
VGName: "ok-name-bad-time",
|
||||
CreationTime: "-10",
|
||||
},
|
||||
{
|
||||
VGName: "!bad-name",
|
||||
CreationTime: "1629282647",
|
||||
},
|
||||
{
|
||||
VGName: "worse.time",
|
||||
CreationTime: "TIME",
|
||||
},
|
||||
}
|
||||
for _, o := range badOptions {
|
||||
assert.Error(o.validate(), o)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/osbuild/osbuild-composer/internal/common"
|
||||
)
|
||||
|
||||
func TestNewMkfsStage(t *testing.T) {
|
||||
devOpts := LoopbackDeviceOptions{
|
||||
Filename: "file.img",
|
||||
Start: 0,
|
||||
Size: 1024,
|
||||
SectorSize: common.ToPtr(uint64(512)),
|
||||
}
|
||||
device := NewLoopbackDevice(&devOpts)
|
||||
|
||||
devices := map[string]Device{
|
||||
"device": *device,
|
||||
}
|
||||
|
||||
btrfsOptions := &MkfsBtrfsStageOptions{
|
||||
UUID: uuid.New().String(),
|
||||
Label: "test",
|
||||
}
|
||||
mkbtrfs := NewMkfsBtrfsStage(btrfsOptions, devices)
|
||||
mkbtrfsExpected := &Stage{
|
||||
Type: "org.osbuild.mkfs.btrfs",
|
||||
Options: btrfsOptions,
|
||||
Devices: Devices{"device": *device},
|
||||
}
|
||||
assert.Equal(t, mkbtrfsExpected, mkbtrfs)
|
||||
|
||||
ext4Options := &MkfsExt4StageOptions{
|
||||
UUID: uuid.New().String(),
|
||||
Label: "test",
|
||||
}
|
||||
mkext4 := NewMkfsExt4Stage(ext4Options, devices)
|
||||
mkext4Expected := &Stage{
|
||||
Type: "org.osbuild.mkfs.ext4",
|
||||
Options: ext4Options,
|
||||
Devices: Devices{"device": *device},
|
||||
}
|
||||
assert.Equal(t, mkext4Expected, mkext4)
|
||||
|
||||
fatOptions := &MkfsFATStageOptions{
|
||||
VolID: "7B7795E7",
|
||||
Label: "test",
|
||||
FATSize: common.ToPtr(12),
|
||||
}
|
||||
mkfat := NewMkfsFATStage(fatOptions, devices)
|
||||
mkfatExpected := &Stage{
|
||||
Type: "org.osbuild.mkfs.fat",
|
||||
Options: fatOptions,
|
||||
Devices: Devices{"device": *device},
|
||||
}
|
||||
assert.Equal(t, mkfatExpected, mkfat)
|
||||
|
||||
xfsOptions := &MkfsXfsStageOptions{
|
||||
UUID: uuid.New().String(),
|
||||
Label: "test",
|
||||
}
|
||||
mkxfs := NewMkfsXfsStage(xfsOptions, devices)
|
||||
mkxfsExpected := &Stage{
|
||||
Type: "org.osbuild.mkfs.xfs",
|
||||
Options: xfsOptions,
|
||||
Devices: Devices{"device": *device},
|
||||
}
|
||||
assert.Equal(t, mkxfsExpected, mkxfs)
|
||||
}
|
||||
|
|
@ -1,147 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewModprobeStage(t *testing.T) {
|
||||
stageOptions := &ModprobeStageOptions{
|
||||
Filename: "testing.conf",
|
||||
Commands: ModprobeConfigCmdList{
|
||||
NewModprobeConfigCmdBlacklist("testing_module"),
|
||||
},
|
||||
}
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.modprobe",
|
||||
Options: stageOptions,
|
||||
}
|
||||
actualStage := NewModprobeStage(stageOptions)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestModprobeStageOptionsValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options ModprobeStageOptions
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: ModprobeStageOptions{},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "no-commands",
|
||||
options: ModprobeStageOptions{
|
||||
Filename: "disallow-modules.conf",
|
||||
Commands: ModprobeConfigCmdList{},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "no-filename",
|
||||
options: ModprobeStageOptions{
|
||||
Commands: ModprobeConfigCmdList{NewModprobeConfigCmdBlacklist("module_name")},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "incorrect-filename",
|
||||
options: ModprobeStageOptions{
|
||||
Filename: "disallow-modules.ccoonnff",
|
||||
Commands: ModprobeConfigCmdList{NewModprobeConfigCmdBlacklist("module_name")},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "good-options",
|
||||
options: ModprobeStageOptions{
|
||||
Filename: "disallow-modules.conf",
|
||||
Commands: ModprobeConfigCmdList{NewModprobeConfigCmdBlacklist("module_name")},
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.err {
|
||||
assert.Errorf(t, tt.options.validate(), "%q didn't return an error [idx: %d]", tt.name, idx)
|
||||
assert.Panics(t, func() { NewModprobeStage(&tt.options) })
|
||||
} else {
|
||||
assert.NoErrorf(t, tt.options.validate(), "%q returned an error [idx: %d]", tt.name, idx)
|
||||
assert.NotPanics(t, func() { NewModprobeStage(&tt.options) })
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewModprobeConfigCmdBlacklist(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modulename string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-modulename",
|
||||
modulename: "",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "non-empty-modulename",
|
||||
modulename: "module_name",
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.err {
|
||||
assert.Errorf(t, ModprobeConfigCmdBlacklist{Command: "blacklist", Modulename: tt.modulename}.validate(), "%q didn't return an error [idx: %d]", tt.name, idx)
|
||||
assert.Panics(t, func() { NewModprobeConfigCmdBlacklist(tt.modulename) })
|
||||
} else {
|
||||
assert.NoErrorf(t, ModprobeConfigCmdBlacklist{Command: "blacklist", Modulename: tt.modulename}.validate(), "%q returned an error [idx: %d]", tt.name, idx)
|
||||
assert.NotPanics(t, func() { NewModprobeConfigCmdBlacklist(tt.modulename) })
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewModprobeConfigCmdInstall(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modulename string
|
||||
cmdline string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-modulename",
|
||||
modulename: "",
|
||||
cmdline: "/usr/bin/true",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "empty-cmdline",
|
||||
modulename: "module_name",
|
||||
cmdline: "",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "non-empty-modulename",
|
||||
modulename: "module_name",
|
||||
cmdline: "/usr/bin/true",
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.err {
|
||||
assert.Errorf(t, ModprobeConfigCmdInstall{Command: "install", Modulename: tt.modulename, Cmdline: tt.cmdline}.validate(), "%q didn't return an error [idx: %d]", tt.name, idx)
|
||||
assert.Panics(t, func() { NewModprobeConfigCmdInstall(tt.modulename, tt.cmdline) })
|
||||
} else {
|
||||
assert.NoErrorf(t, ModprobeConfigCmdInstall{Command: "install", Modulename: tt.modulename, Cmdline: tt.cmdline}.validate(), "%q returned an error [idx: %d]", tt.name, idx)
|
||||
assert.NotPanics(t, func() { NewModprobeConfigCmdInstall(tt.modulename, tt.cmdline) })
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewMounts(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
{ // btrfs
|
||||
actual := NewBtrfsMount("btrfs", "/dev/sda1", "/mnt/btrfs")
|
||||
expected := &Mount{
|
||||
Name: "btrfs",
|
||||
Type: "org.osbuild.btrfs",
|
||||
Source: "/dev/sda1",
|
||||
Target: "/mnt/btrfs",
|
||||
}
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
|
||||
{ // ext4
|
||||
actual := NewExt4Mount("ext4", "/dev/sda2", "/mnt/ext4")
|
||||
expected := &Mount{
|
||||
Name: "ext4",
|
||||
Type: "org.osbuild.ext4",
|
||||
Source: "/dev/sda2",
|
||||
Target: "/mnt/ext4",
|
||||
}
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
|
||||
{ // fat
|
||||
actual := NewFATMount("fat", "/dev/sda3", "/mnt/fat")
|
||||
expected := &Mount{
|
||||
Name: "fat",
|
||||
Type: "org.osbuild.fat",
|
||||
Source: "/dev/sda3",
|
||||
Target: "/mnt/fat",
|
||||
}
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
|
||||
{ // xfs
|
||||
actual := NewXfsMount("xfs", "/dev/sda4", "/mnt/xfs")
|
||||
expected := &Mount{
|
||||
Name: "xfs",
|
||||
Type: "org.osbuild.xfs",
|
||||
Source: "/dev/sda4",
|
||||
Target: "/mnt/xfs",
|
||||
}
|
||||
assert.Equal(expected, actual)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,105 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestOCIArchiveStage(t *testing.T) {
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.oci-archive",
|
||||
Options: &OCIArchiveStageOptions{},
|
||||
Inputs: &OCIArchiveStageInputs{},
|
||||
}
|
||||
actualStage := NewOCIArchiveStage(&OCIArchiveStageOptions{}, &OCIArchiveStageInputs{})
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestOCIArchiveInputs(t *testing.T) {
|
||||
exp := `{
|
||||
"base": {
|
||||
"type": "org.osbuild.oci-archive",
|
||||
"origin":"org.osbuild.pipeline",
|
||||
"references": ["name:container-tree"]
|
||||
},
|
||||
"layer.1": {
|
||||
"type": "org.osbuild.tree",
|
||||
"origin": "org.osbuild.pipeline",
|
||||
"references": ["name:container-ostree"]
|
||||
},
|
||||
"layer.2": {
|
||||
"type": "org.osbuild.tree",
|
||||
"origin": "org.osbuild.pipeline",
|
||||
"references": ["name:container-ostree2"]
|
||||
}
|
||||
}`
|
||||
inputs := new(OCIArchiveStageInputs)
|
||||
base := &TreeInput{
|
||||
References: []string{
|
||||
"name:container-tree",
|
||||
},
|
||||
}
|
||||
base.Type = "org.osbuild.oci-archive"
|
||||
base.Origin = "org.osbuild.pipeline"
|
||||
|
||||
layer1 := TreeInput{
|
||||
References: []string{
|
||||
"name:container-ostree",
|
||||
},
|
||||
}
|
||||
layer1.Type = "org.osbuild.tree"
|
||||
layer1.Origin = "org.osbuild.pipeline"
|
||||
layer2 := TreeInput{
|
||||
References: []string{
|
||||
"name:container-ostree2",
|
||||
},
|
||||
}
|
||||
layer2.Type = "org.osbuild.tree"
|
||||
layer2.Origin = "org.osbuild.pipeline"
|
||||
|
||||
inputs.Base = base
|
||||
inputs.Layers = []TreeInput{layer1, layer2}
|
||||
|
||||
data, err := json.Marshal(inputs)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, exp, string(data))
|
||||
|
||||
inputsRead := new(OCIArchiveStageInputs)
|
||||
err = json.Unmarshal([]byte(exp), inputsRead)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, inputs, inputsRead)
|
||||
}
|
||||
|
||||
func TestOCIArchiveInputsErrors(t *testing.T) {
|
||||
noBase := `{
|
||||
"layer.10": {
|
||||
"type": "org.osbuild.tree",
|
||||
"origin": "org.osbuild.pipeline",
|
||||
"references": ["name:container-ostree"]
|
||||
},
|
||||
"layer.2": {
|
||||
"type": "org.osbuild.tree",
|
||||
"origin": "org.osbuild.pipeline",
|
||||
"references": ["name:container-ostree2"]
|
||||
}
|
||||
}`
|
||||
|
||||
inputsRead := new(OCIArchiveStageInputs)
|
||||
assert.Error(t, json.Unmarshal([]byte(noBase), inputsRead))
|
||||
|
||||
invalidKey := `{
|
||||
"base": {
|
||||
"type": "org.osbuild.oci-archive",
|
||||
"origin":"org.osbuild.pipeline",
|
||||
"references": ["name:container-tree"]
|
||||
},
|
||||
"not-a-layer": {
|
||||
"type": "org.osbuild.tree",
|
||||
"origin": "org.osbuild.pipeline",
|
||||
"references": ["name:container-ostree2"]
|
||||
}
|
||||
}`
|
||||
assert.Error(t, json.Unmarshal([]byte(invalidKey), inputsRead))
|
||||
}
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
// Package osbuild provides primitives for representing and (un)marshalling
|
||||
// OSBuild types.
|
||||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPipeline_AddStage(t *testing.T) {
|
||||
expectedPipeline := &Pipeline{
|
||||
Build: "name:build",
|
||||
Stages: []*Stage{
|
||||
{
|
||||
Type: "org.osbuild.rpm",
|
||||
},
|
||||
},
|
||||
}
|
||||
actualPipeline := &Pipeline{
|
||||
Build: "name:build",
|
||||
}
|
||||
actualPipeline.AddStage(&Stage{
|
||||
Type: "org.osbuild.rpm",
|
||||
})
|
||||
assert.Equal(t, expectedPipeline, actualPipeline)
|
||||
assert.Equal(t, 1, len(actualPipeline.Stages))
|
||||
}
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
package osbuild
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewOscapRemediationStage(t *testing.T) {
|
||||
stageOptions := &OscapRemediationStageOptions{DataDir: "/var/tmp", Config: OscapConfig{
|
||||
Datastream: "test_stream",
|
||||
ProfileID: "test_profile",
|
||||
}}
|
||||
expectedStage := &Stage{
|
||||
Type: "org.osbuild.oscap.remediation",
|
||||
Options: stageOptions,
|
||||
}
|
||||
actualStage := NewOscapRemediationStage(stageOptions)
|
||||
assert.Equal(t, expectedStage, actualStage)
|
||||
}
|
||||
|
||||
func TestOscapRemediationStageOptionsValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
options OscapRemediationStageOptions
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "empty-options",
|
||||
options: OscapRemediationStageOptions{},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "empty-datastream",
|
||||
options: OscapRemediationStageOptions{
|
||||
Config: OscapConfig{
|
||||
ProfileID: "test-profile",
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "empty-profile-id",
|
||||
options: OscapRemediationStageOptions{
|
||||
Config: OscapConfig{
|
||||
Datastream: "test-datastream",
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "invalid-verbosity-level",
|
||||
options: OscapRemediationStageOptions{
|
||||
Config: OscapConfig{
|
||||
Datastream: "test-datastream",
|
||||
ProfileID: "test-profile",
|
||||
VerboseLevel: "FAKE",
|
||||
},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "valid-data",
|
||||
options: OscapRemediationStageOptions{
|
||||
Config: OscapConfig{
|
||||
Datastream: "test-datastream",
|
||||
ProfileID: "test-profile",
|
||||
VerboseLevel: "INFO",
|
||||
},
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
}
|
||||
for idx, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.err {
|
||||
assert.Errorf(t, tt.options.Config.validate(), "%q didn't return an error [idx: %d]", tt.name, idx)
|
||||
assert.Panics(t, func() { NewOscapRemediationStage(&tt.options) })
|
||||
} else {
|
||||
assert.NoErrorf(t, tt.options.Config.validate(), "%q returned an error [idx: %d]", tt.name, idx)
|
||||
assert.NotPanics(t, func() { NewOscapRemediationStage(&tt.options) })
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue