Add STAGE_DESC, STAGE_INFO, and STAGE_OPTS to stages

This commit adds semi-structured documentation to all osbuild stages and
assemblers. The variables added work like this:

* STAGE_DESC: Short description of the stage.
* STAGE_INFO: Longer documentation of the stage, including expected
              behavior, required binaries, etc.
* STAGE_OPTS: A JSON Schema describing the stage's expected/allowed
              options. (see https://json-schema.org/ for details)

It also has a little unittest to check stageinfo - specifically:

1. All (executable) stages in stages/* and assemblers/ must define strings named
   STAGE_DESC, STAGE_INFO, and STAGE_OPTS
2. The contents of STAGE_OPTS must be valid JSON (if you put '{' '}'
   around it)
3. STAGE_OPTS, if non-empty, should have a "properties" object
4. if STAGE_OPTS lists "required" properties, those need to be present
   in the "properties" object.

The test is *not* included in .travis.yml because I'm not sure we want
to fail the build for this, but it's still helpful as a lint-style
check.
This commit is contained in:
Will Woods 2019-11-04 18:10:06 -05:00 committed by Tom Gundersen
parent 9d4b526a25
commit 6164b38fb9
25 changed files with 893 additions and 0 deletions

View file

@ -3,6 +3,12 @@
import json
import sys
STAGE_DESC = "No-op assembler"
STAGE_INFO = """
No-op assembler. Produces no output, just prints a JSON dump of its options
and then exits.
"""
STAGE_OPTS = ""
def main(_tree, _output_dir, options):
print("Not doing anything with these options:", json.dumps(options))

View file

@ -10,6 +10,48 @@ import sys
import tempfile
import osbuild.remoteloop as remoteloop
STAGE_DESC = "Assemble a bootable partitioned disk image with qemu-img"
STAGE_INFO = """
Assemble a bootable partitioned disk image using `qemu-img`.
Creates a sparse MBR-partitioned disk image of the given `size`, with a single
bootable partition containing an ext4 root filesystem.
Installs GRUB2 (using the buildhost's `/usr/lib/grub/i386-pc/boot.img` etc.) as
the bootloader.
Copies the tree contents into the root filesystem and then converts the raw
sparse image into the format requested with the `fmt` option.
Buildhost commands used: `truncate`, `mount`, `umount`, `sfdisk`,
`grub2-mkimage`, `mkfs.ext4`, `qemu-img`.
"""
STAGE_OPTS = """
"required": ["format", "filename", "ptuuid", "root_fs_uuid", "size"],
"properties": {
"format": {
"description": "Image file format to use",
"type": "string",
"enum": ["raw", "qcow2", "vdi", "vmdk"]
},
"filename": {
"description": "Image filename",
"type": "string"
},
"ptuuid": {
"description": "UUID for the disk image's partition table",
"type": "string"
},
"root_fs_uuid": {
"description": "UUID for the root filesystem",
"type": "string"
},
"size": {
"description": "Virtual disk size",
"type": "string"
}
}
"""
@contextlib.contextmanager
def mount(source):

View file

@ -8,6 +8,44 @@ import subprocess
import sys
import osbuild.remoteloop as remoteloop
STAGE_DESC = "Assemble tree into a raw ext4 filesystem image"
STAGE_INFO = """
Assemble the tree into a raw ext4 filesystem image named `filename`, with the
UUID `root_fs_uuid`.
The image is a sparse file of the given `size`, which is created using the
`truncate(1)` command. The `size` is an integer with an optional suffix:
K,M,G,T,... (for powers of 1024) or KB,MB,GB,TB,... (powers of 1000).
NOTE: If the tree contents are larger than `size`, this assembler will fail.
On the other hand, since the image is a sparse file, the unused parts of the
image take up almost no disk space - so a 1GB tree in a 20GB image should not
use much more than 1GB disk space.
The filesystem UUID should be a standard (RFC4122) UUID, which you can
generate with uuid.uuid4() in Python, `uuidgen(1)` in a shell script, or
read from `/proc/sys/kernel/random/uuid` if your kernel provides it.
"""
STAGE_OPTS = """
"required": ["filename", "root_fs_uuid", "size"],
"properties": {
"filename": {
"description": "Raw ext4 filesystem image filename",
"type": "string"
},
"root_fs_uuid": {
"description": "UUID for the filesystem",
"type": "string",
"pattern": "^[0-9A-Za-z]{8}(-[0-9A-Za-z]{4}){3}-[0-9A-Za-z]{12}$",
"examples": ["9c6ae55b-cf88-45b8-84e8-64990759f39d"]
},
"size": {
"description": "Maximum size of the filesystem",
"type": "string",
"examples": ["500M", "20GB"]
}
}
"""
@contextlib.contextmanager
def mount(source, dest, *options):

View file

@ -4,6 +4,37 @@ import json
import subprocess
import sys
STAGE_DESC = "Assemble a tar archive"
STAGE_INFO = """
Assembles the tree into a tar archive named `filename`.
Uses the buildhost's `tar` command, like: `tar -cf $FILENAME -C $TREE`
If the `compression` option is given, the archive will be compressed by passing
the `--{compression}` option to `tar`. (This option is non-standard and might
not work for anything other than GNU tar.)
Known options for `compression`: "bzip2", "xz", "lzip", "lzma", "lzop", "gzip".
Note that using `compression` does not add an extension to `filename`, so the
caller is responsible for making sure that `compression` and `filename` match.
Buildhost commands used: `tar` and any named `compression` program.
"""
STAGE_OPTS = """
"required": ["filename"],
"properties": {
"filename": {
"description": "Filename for tar archive",
"type": "string"
},
"compression": {
"description": "Name of compression program",
"type": "string",
"enum": ["bzip2", "xz", "lzip", "lzma", "lzop", "gzip"]
}
}
"""
def main(tree, output_dir, options):
filename = options["filename"]

View file

@ -3,6 +3,22 @@ import json
import sys
import re
STAGE_DESC = "Configure chrony to set system time from the network"
STAGE_INFO = """
Configures `chrony` to set the system time from the given `timeservers`.
Modifies /etc/chrony.conf, removing all "server" or "pool" lines and adding
a "server" line for each server listed in `timeservers`.
"""
STAGE_OPTS = """
"properties": {
"timeservers": {
"type": "array",
"items": { "type": "string" },
"description": "Array of NTP server addresses"
}
}
"""
DELETE_LINE_REGEX = re.compile(r"(server|pool) ")

View file

@ -4,6 +4,23 @@ import json
import os
import sys
STAGE_DESC = "Set up an early root shell on a certain tty"
STAGE_INFO = """
Creates a systemd unit file at /etc/systemd/system/osbuild-debug-shell.service
which starts an early-boot root shell on the given `tty`.
Also symlinks the service file into /etc/systemd/system/sysinit.target.wants/.
"""
STAGE_OPTS = """
"required": ["tty"],
"properties": {
"tty": {
"type": "string",
"description": "Absolute path of the tty device to start a root shell on."
}
}
"""
def main(tree, options):
tty = options["tty"]

View file

@ -8,6 +8,119 @@ import subprocess
import sys
import tempfile
STAGE_DESC = "Install packages using DNF"
STAGE_INFO = """
Depsolves, downloads, and installs packages (and dependencies) using DNF.
Writes the `repos` into `/tmp/dnf.conf`, does some tree setup, and then runs
the buildhost's `dnf` command with `--installroot`, plus the following
arguments generated from the stage options:
* `--forcearch {basearch}`
* `--releasever {releasever}`
* `--setopt install_weak_deps={install_weak_deps}`
* `--config /tmp/dnf.conf`
* `--exclude {pkg}` for each item in `exclude_packages`
Also disables the "generate_completion_cache" plugin, and sets `reposdir` to ""
to ensure the buildhost's repo files are *not* being used.
To prepare the tree, this stage sets `/etc/machine-id` to "ffff..." (32 chars)
and bind-mounts `/dev`, `/sys`, and `/proc` from the buildhost into the tree.
Each repo listed in `repos` needs to have a `checksum` and at least one of
`mirrorlist`, `metalink`, or `baseurl`. If a `gpgkey` is provided, `gpgcheck`
will be turned on for that repo, and DNF will exit with an error unless every
package downloaded from that repo is signed by one of the trusted `gpgkey`s.
The provided `checksum` must start with "sha256:" and then have the hex-encoded
SHA256 of the repo's `repomd.xml` file. If the metadata for any repo has
changed and no longer matches `checksum`, this stage will fail after package
installation.
NOTE: Any pipeline that uses a repo that changes frequently (like Fedora's
"updates") will quickly become un-reproduceable. This is an unavoidable
consequence of Fedora removing "out-of-date" metadata and packages: it's
impossible to reproduce a build that requires files that have been deleted.
To quote Douglas Adams: "We apologize for the inconvenience."
After DNF finishes, this stage cleans up the tree by removing
`/etc/machine-id`, `/var/lib/systemd/random-seed`, and everything under
`/var/cache/dnf`.
Buildhost commands used: `/bin/sh`, `dnf`, `mkdir`, `mount`, `chmod`.
"""
STAGE_OPTS = """
"required": ["repos", "packages", "releasever", "basearch"],
"properties": {
"repos": {
"description": "Array of repo objects to set up",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"metalink": {
"description": "metalink URL for this repo",
"type": "string"
},
"mirrorlist": {
"description": "mirrorlist URL for this repo",
"type": "string"
},
"baseurl": {
"description": "baseurl for this repo",
"type": "string"
},
"checksum": {
"description": "checksum for the expected repo metadata",
"type": "string",
"pattern": "sha256:[a-fA-F0-9]{32}"
},
"gpgkey": {
"description": "GPG public key contents (to check signatures)",
"type": "string"
}
},
"anyOf": [
{"required": ["checksum", "metalink"]},
{"required": ["checksum", "mirrorlist"]},
{"required": ["checksum", "baseurl"]}
]
}
},
"packages": {
"description": "List of package-specs to pass to DNF",
"type": "array",
"minItems": 1,
"items": { "type": "string" }
},
"releasever": {
"description": "DNF $releasever value",
"type": "string"
},
"basearch": {
"description": "DNF $basearch value",
"type": "string"
},
"operation": {
"description": "DNF command to use",
"type": "string",
"default": "install"
},
"install_weak_deps": {
"description": "Whether DNF should install weak deps",
"type": "boolean",
"default": true
},
"exclude_packages": {
"description": "List of package-specs to --exclude",
"type": "array",
"items": { "type": "string" },
"default": []
}
}
"""
def write_repofile(f, repoid, repo, keydir):
f.write(f"[{repoid}]\n")

View file

@ -4,6 +4,59 @@ import json
import subprocess
import sys
STAGE_DESC = "Configure firewall"
STAGE_INFO = """
Configure firewalld using the `firewall-offline-cmd` from inside the target.
This stage adds each of the given `ports` and `enabled_services` to the default
firewall zone using the `--port` and `--service` options, then removes the
services listed in `disabled_services` with `--remove-service`.
Ports should be specified as "portid:protocol" or "portid-portid:protocol",
where "portid" is a number (or a port name from `/etc/services`, like "ssh" or
"echo") and "protocol" is one of "tcp", "udp", "sctp", or "dccp".
Enabling or disabling a service that is already enabled or disabled will not
cause an error.
Attempting to enable/disable an unknown service name will cause this stage to
fail. Known service names are determined by the contents of firewalld's
configuration directories, usually `/{lib,etc}/firewalld/services/*.xml`, and
may vary from release to release.
WARNING: this stage uses `chroot` to run `firewall-offline-cmd` inside the
target tree, which means it may fail unexpectedly when the buildhost and target
are different arches or OSes.
"""
STAGE_OPTS = """
"properties": {
"ports": {
"description": "Ports (or port ranges) to open",
"type": "array",
"items": {
"type": "string",
"description": "A port or port range: 'portid[-portid]:protocol'",
"pattern": ".:(tcp|udp|sctp|dccp)$"
}
},
"enabled_services": {
"description": "Network services to allow in the default firewall zone",
"type": "array",
"items": {
"type": "string",
"description": "Service name (from /{lib,etc}/firewalld/services/*.xml)"
}
},
"disabled_services": {
"description": "Network services to remove from the default firewall zone",
"type": "array",
"items": {
"type": "string",
"description": "Service name (from /{lib,etc}/firewalld/services/*.xml)"
}
}
}
"""
def main(tree, options):
# Takes a list of <port|application protocol>:<transport protocol> pairs

View file

@ -5,6 +5,18 @@ import json
import re
import sys
STAGE_DESC = "Fix paths in /boot/loader/entries"
STAGE_INFO = """
Fixes paths in /boot/loader/entries that have incorrect paths for /boot.
This happens because some boot loader config tools (e.g. grub2-mkrelpath)
examine /proc/self/mountinfo to find the "real" path to /boot, and find the
path to the osbuild tree - which won't be valid at boot time for this image.
This stage reads and (re)writes all .conf files in /boot/loader/entries.
"""
STAGE_OPTS = ""
def main(tree, _options):
"""Fix broken paths in /boot/loader/entries.

View file

@ -3,6 +3,57 @@
import json
import sys
STAGE_DESC = "Create /etc/fstab entries for filesystems"
STAGE_INFO = """
Create /etc/fstab entries for the given `filesystems`.
Each filesystem item must have at least `uuid` and a `path` (mount point).
This stage replaces /etc/fstab, removing any existing entries.
"""
STAGE_OPTS = """
"required": ["filesystems"],
"properties": {
"filesystems": {
"type": "array",
"description": "array of filesystem objects",
"items": {
"type": "object",
"required": ["uuid", "path"],
"properties": {
"uuid": {
"description": "Filesystem UUID",
"type": "string"
},
"path": {
"description": "Filesystem mountpoint",
"type": "string"
},
"vfs_type": {
"description": "Filesystem type",
"type": "string",
"default": "none"
},
"options": {
"description": "Filesystem options (comma-separated)",
"type": "string",
"default": "defaults"
},
"freq": {
"description": "dump(8) period in days",
"type": "number",
"default": 0
},
"passno": {
"description": "pass number on parallel fsck(8)",
"type": "number",
"default": 0
}
}
}
}
}
"""
def main(tree, options):
filesystems = options["filesystems"]

View file

@ -3,6 +3,35 @@ import json
import subprocess
import sys
STAGE_DESC = "Create group accounts"
STAGE_INFO = """
Create group accounts, optionally assigning them static GIDs.
Runs `groupadd` from the buildhost to create the groups listed in `groups`.
If no `gid` is given, `groupadd` will choose one.
If the specified group name or GID is already in use, this stage will fail.
"""
STAGE_OPTS = """
"properties": {
"groups": {
"type": "object",
"description": "Keys are group names, values are objects with group info",
"propertyNames": {
"pattern": "^[A-Za-z0-9_][A-Za-z0-9_-]{0,31}$"
},
"additionalProperties": {
"type": "object",
"properties": {
"gid": {
"type": "number",
"description": "GID for this group"
}
}
}
}
}
"""
def groupadd(root, name, gid=None):
arguments = []

View file

@ -5,6 +5,48 @@ import os
import shutil
import sys
STAGE_DESC = "Configure GRUB2 bootloader and set boot options"
STAGE_INFO = """
Configure the system to use GRUB2 as the bootloader, and set boot options.
Sets the GRUB2 boot/root filesystem to `root_fs_uuid` and sets kernel boot
arguments to "root=UUID={root_fs_uuid} {kernel_opts}".
Configures GRUB2 to boot via the Boot Loader Specification
(https://systemd.io/BOOT_LOADER_SPECIFICATION), which is the default
behavior in Fedora 30 and later.
This stage will overwrite `/etc/default/grub`, `/boot/grub2/grubenv`, and
`/boot/grub2/grub.cfg`. (Leading directories will be created if not present.)
This stage also copies GRUB2 files from the buildhost into the target tree:
* `/usr/share/grub/unicode.pf2` -> `/boot/grub2/fonts/`
* `/usr/lib/grub/i386-pc/*.{mod,lst}` -> `/boot/grub2/i386-pc/`
* NOTE: skips `fdt.lst`, which is an empty file
This stage will fail if the buildhost doesn't have `/usr/lib/grub/i386-pc/`
and `/usr/share/grub/unicode.pf2`.
"""
STAGE_OPTS = """
"required": ["root_fs_uuid"],
"properties": {
"root_fs_uuid": {
"description": "UUID of the root filesystem image",
"type": "string",
"oneOf": [
{ "pattern": "^[0-9A-Za-z]{8}(-[0-9A-Za-z]{4}){3}-[0-9A-Za-z]{12}$",
"examples": ["9c6ae55b-cf88-45b8-84e8-64990759f39d"] },
{ "pattern": "^[0-9A-Za-z]{4}-[0-9A-Za-z]{4}$",
"examples": ["6699-AFB5"] }
]
},
"kernel_opts": {
"description": "Additional kernel boot options",
"type": "string",
"default": ""
}
}
"""
def main(tree, options):
root_fs_uuid = options["root_fs_uuid"]

View file

@ -5,6 +5,23 @@ import os
import subprocess
import sys
STAGE_DESC = "Set system hostname"
STAGE_INFO = """
Sets system hostname.
Deletes /etc/hostname if present, then runs `systemd-firstboot` from the
buildhost with `--hostname={hostname}`, which checks the validity of the
hostname and writes it to /etc/hostname.
"""
STAGE_OPTS = """
"required": ["hostname"],
"properties": {
"hostname": {
"type": "string",
"description": "hostname for the target system"
}
}
"""
def main(tree, options):
hostname = options["hostname"]

View file

@ -5,6 +5,24 @@ import subprocess
import sys
import os
STAGE_DESC = "Set image's default keymap"
STAGE_INFO = """
Sets the default console keyboard layout to `keymap`, like 'us' or 'de-latin1'.
Removes any existing /etc/vconsole.conf, then runs `systemd-firstboot` with the
`--keymap` option, which sets KEYMAP in /etc/vconsole.conf.
Valid keymaps are generally found in /lib/kbd/keymaps.
"""
STAGE_OPTS = """
"required": ["keymap"],
"properties": {
"keymap": {
"type": "string",
"description": "Name of keymap to use"
}
}
"""
def main(tree, options):
keymap = options["keymap"]

View file

@ -5,6 +5,24 @@ import subprocess
import sys
import os
STAGE_DESC = "Set system language."
STAGE_INFO = """
Sets the system language to the given `language`, which must be a valid locale
identifier, like "en_US.UTF-8".
Removes `/etc/locale.conf` and then uses `systemd-firstboot` from the buildhost,
with the `--locale` flag, which will write a new `/etc/locale.conf` in the
target system with `LANG={language}`.
"""
STAGE_OPTS = """
"required": ["language"],
"properties": {
"language": {
"type": "string",
"description": "Locale identifier (like 'en_US.UTF-8') for system LANG"
}
}
"""
def main(tree, options):
language = options["language"]

View file

@ -3,6 +3,13 @@
import json
import sys
STAGE_DESC = "Do Nothing"
STAGE_INFO = """
No-op stage. Prints a JSON dump of the options passed into this stage and
leaves the tree untouched. Useful for testing, debugging, and wasting time.
"""
STAGE_OPTS = ""
def main(_tree, options):
print("Not doing anything with these options:", json.dumps(options))

View file

@ -9,6 +9,59 @@ import subprocess
import sys
import tempfile
STAGE_DESC = "Download, verify, and install RPM packages"
STAGE_INFO = """
Download, verify, and install RPM packages.
`gpgkeys` should be an array of strings containing each GPG key to be used
to verify the downloaded packages.
`packages` is an array of objects; each item must have a `url` to download
the .rpm file and a `checksum` to verify the integrity of the downloaded
data.
This stage will fail if any of URLs can't be reached, or if any downloaded
RPM has a signature or digest that cannot be verified.
NOTE: this stage currently does _not_ fail if a package is unsigned, only if
the package is signed but the signature cannot be verified. A future version
of this stage will fail on unsigned packages by default, but may support a
flag to skip signature checks for packages that are known to be unsigned.
Uses the following binaries from the host:
* `curl` to fetch RPMs
* `sha256sum` (or `sha1sum`, `md5sum`, etc.) to check RPM checksums
* `rpmkeys` to import keys and to verify signatures for each package
* `sh`, `mkdir`, `mount`, `chmod` to prepare the target tree for `rpm`
* `rpm` to install packages into the target tree
"""
STAGE_OPTS = """
"properties": {
"gpgkeys": {
"description": "Array of GPG key contents to import",
"type": "array",
"items": { "type": "string" }
},
"packages": {
"description": "Array of package objects",
"type": "array",
"items": {
"type": "object",
"required": ["url", "checksum"],
"properties": {
"url": {
"type": "string",
"description": "URL to download a .rpm package file"
},
"checksum": {
"type": "string",
"description": ".rpm file checksum, prefixed with 'md5:', 'sha1:', 'sha256:', 'sha384:', or 'sha512:', indicating the algorithm used."
}
}
}
}
}
"""
RPM_CACHE_DIR = "/var/cache/org.osbuild.rpm"

View file

@ -6,6 +6,33 @@ import os
import subprocess
import sys
STAGE_DESC = "Run an arbitrary script inside the target tree"
STAGE_INFO = """
Runs an arbitrary script inside the target tree.
Writes the contents of the `script` item to `/osbuild-script`, sets the
permissions of the script to 0550 (-r-xr-x---), then uses the host's `chroot`
binary to chroot into the tree and execute the script. The script is removed
after it completes.
WARNING: running code inside the tree is unsafe, unreliable, and generally
discouraged. Using this stage may result in unexplained failures or other
undefined behavior, and should only be done as a last resort.
NOTE: if `script` does not start with a line like '#!/bin/bash\n', executing
it will fail with ENOEXEC. Some `chroot` binaries will try to run the script
through `/bin/sh` in that case, so it might still work, but that behavior is
not guaranteed.
"""
STAGE_OPTS = """
"required": ["script"],
"properties": {
"script": {
"type": "string",
"description": "contents of the script file to be executed."
}
}
"""
def main(tree, options):
script = options["script"]

View file

@ -5,6 +5,34 @@ import os
import subprocess
import sys
STAGE_DESC = "Set SELinux file contexts"
STAGE_INFO = """
Sets correct SELinux labels for every file in the tree, according to the
SELinux policy installed inside the tree.
Uses the host's `setfiles` program and the tree's `file_contexts`, usually
/etc/selinux/<SELINUXTYPE>/contexts/files/file_contexts
where <SELINUXTYPE> is the value set in /etc/selinux/config (usually "targeted"
but may also be "minimum" or "mls").
This stage may set or modify xattrs for any file inside the tree, but should
not need to create files, modify file contents, or read any files other than
`file_contexts`.
This stage should run after all other stages that create (or move) files, since
labels for newly-created files are determined by the host's SELinux policy and
may not match the tree's policy.
"""
STAGE_OPTS = """
"required": ["file_contexts"],
"properties": {
"file_contexts": {
"type": "string",
"description": "Path to the active SELinux policy's `file_contexts`"
}
}
"""
def main(tree, options):
file_contexts = os.path.join(f"{tree}", options["file_contexts"])

View file

@ -4,6 +4,32 @@ import json
import subprocess
import sys
STAGE_DESC = "Enable or disable systemd services"
STAGE_INFO = """
Enable or disable systemd units (service, socket, path, etc.)
This stage runs `systemctl enable` for all `enabled_services` items, which may
create symlinks under `/etc/systemd/system`.
After enabling units, it runs `systemctl disable` for all `disabled_services`
items, which will delete _all_ symlinks to the named services.
Uses `systemctl` from the buildhost.
"""
STAGE_OPTS = """
"required": ["enabled_services"],
"properties": {
"enabled_services": {
"type": "array",
"items": { "type": "string" },
"description": "Array of systemd unit names to be enabled"
},
"disabled_services": {
"type": "array",
"items": { "type": "string" },
"description": "Array of systemd unit names to be enabled"
}
}
"""
def main(tree, options):
enabled_services = options["enabled_services"]

View file

@ -4,6 +4,23 @@ import json
import os
import sys
STAGE_DESC = "Enable osbuild Boot Test service"
STAGE_INFO = """
Creates a Boot Test service that executes the given `script` (sending output to
/dev/vport0p1) then immediately shuts down the system.
Creates `/etc/systemd/system/osbuild-test.service`, and a symlink to it in
`/etc/systemd/system/multi-user.target.wants/`.
"""
STAGE_OPTS = """
"required": ["script"],
"properties": {
"script": {
"type": "string",
"description": "Full path to a script that verifies successful bootup"
}
}
"""
def main(tree, options):
script = options["script"]

View file

@ -5,6 +5,23 @@ import subprocess
import sys
import os
STAGE_DESC = "Set system timezone"
STAGE_INFO = """
Set the system's timezone to `zone`, which should be a valid time zone
identifier from the tz database - like "America/New York" or "Europe/Berlin".
Removes `/etc/localtime`, then runs the host's `systemd-firstboot` binary with
the `--timezone` option, which will re-create `/etc/localtime`.
"""
STAGE_OPTS = """
"required": ["zone"],
"properties": {
"zone": {
"type": "string",
"description": "Timezone identifier (from tzdb/zoneinfo)"
}
}
"""
def main(tree, options):
zone = options["zone"]

View file

@ -5,6 +5,67 @@ import subprocess
import sys
import os
STAGE_DESC = "Add or modify user accounts"
STAGE_INFO = """
Add or modify user accounts inside the tree.
WARNING: This stage uses chroot() to run the `useradd` or `usermod` binary
from inside the tree. This will fail for cross-arch builds and may fail or
misbehave if the `usermod`/`useradd` binary inside the tree makes incorrect
assumptions about its host system.
"""
STAGE_OPTS = """
"properties": {
"users": {
"type": "object",
"description": "Keys are usernames, values are objects giving user info.",
"propertyNames": {
"pattern": "^[A-Za-z0-9_][A-Za-z0-9_-]{0,31}$"
},
"additionalProperties": {
"type": "object",
"properties": {
"uid": {
"description": "User UID",
"type": "number"
},
"gid": {
"description": "User GID",
"type": "number"
},
"groups": {
"description": "Array of group names for this user",
"type": "array",
"items": {
"type": "string"
}
},
"description": {
"description": "User account description (or full name)",
"type": "string"
},
"home": {
"description": "Path to user's home directory",
"type": "string"
},
"shell": {
"description": "User's login shell",
"type": "string"
},
"password": {
"description": "User's encrypted password, as returned by crypt(3)",
"type": "string"
},
"key": {
"description": "SSH Public Key to add to ~/.ssh/authorized_keys",
"type": "string"
}
}
}
}
}
"""
def getpwnam(root, name):
"""Similar to pwd.getpwnam, but takes a @root parameter"""

View file

@ -5,6 +5,85 @@ import subprocess
import sys
import tempfile
STAGE_DESC = "Install packages using YUM"
STAGE_INFO = """
Depsolves, downloads, and installs packages (and dependencies) using YUM.
Writes the `repos` into `/tmp/yum.conf`, does some tree setup, and then runs
the buildhost's `yum` command with `--installroot`, plus the following
arguments generated from the stage options:
* `--releasever={releasever}`
* `--rpmverbosity={verbosity}`
* `--config=/tmp/yum.conf`
To prepare the tree, this stage mounts `devtmpfs`, `sysfs`, and `proc` at
`/dev`, `/sys`, and `/proc` (respectively).
Each repo listed in `repos` needs at least one of `mirrorlist`, `metalink`, or
`baseurl`. If a `gpgkey` is provided, `gpgcheck` will be turned on for that
repo, and YUM will exit with an error unless every package downloaded from that
repo is signed by one of the trusted `gpgkey`s.
Buildhost commands used: `/bin/sh`, `yum`, `mkdir`, `mount`.
"""
STAGE_OPTS = """
"required": ["repos", "packages", "releasever"],
"properties": {
"repos": {
"description": "Array of repo objects to set up",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"metalink": {
"description": "metalink URL for this repo",
"type": "string"
},
"mirrorlist": {
"description": "mirrorlist URL for this repo",
"type": "string"
},
"baseurl": {
"description": "baseurl for this repo",
"type": "string"
},
"gpgkey": {
"description": "GPG public key contents (to check signatures)",
"type": "string"
}
},
"anyOf": [
{"required": ["metalink"]},
{"required": ["mirrorlist"]},
{"required": ["baseurl"]}
]
}
},
"packages": {
"description": "List of package-specs to pass to yum",
"type": "array",
"minItems": 1,
"items": { "type": "string" }
},
"releasever": {
"description": "yum $releasever value",
"type": "string"
},
"operation": {
"description": "yum command to use",
"type": "string",
"default": "install"
},
"verbosity": {
"description": "Set yum's --rpmverbosity",
"type": "string",
"default": "info"
}
}
"""
def write_repofile(f, repoid, repo, keydir):
f.write(f"[{repoid}]\n")

75
test/test_stageinfo.py Normal file
View file

@ -0,0 +1,75 @@
import json
import unittest
from pathlib import Path
class TestStageInfo(unittest.TestCase):
@staticmethod
def iter_stages(stagedir):
'''Yield executable files in `stagedir`'''
for p in Path(stagedir).iterdir():
if p.is_file() and not p.is_symlink() and p.stat().st_mode & 1:
yield p
@staticmethod
def get_stage_info(stage: Path) -> dict:
'''Return the STAGE_* variables from the given stage.'''
# TODO: This works for now, but stages should probably have some
# standard way of dumping this info so we (and other higher-level
# tools) don't have to parse the code and walk through the AST
# to find these values.
import ast
stage_info = {}
with open(stage) as fobj:
stage_ast = ast.parse(fobj.read(), filename=stage)
# STAGE_* assignments are at the toplevel, no need to walk()
for node in stage_ast.body:
if type(node) is ast.Assign and type(node.value) == ast.Str:
for target in node.targets:
if target.id.startswith("STAGE_"):
stage_info[target.id] = node.value.s
return stage_info
@staticmethod
def parse_stage_opts(stage_opts: str) -> dict:
if not stage_opts.lstrip().startswith('{'):
stage_opts = '{' + stage_opts + '}'
return json.loads(stage_opts)
def setUp(self):
self.topdir = Path(".") # NOTE: this could be smarter...
self.stages_dir = self.topdir / "stages"
self.assemblers_dir = self.topdir / "assemblers"
self.stages = list(self.iter_stages(self.stages_dir))
self.assemblers = list(self.iter_stages(self.assemblers_dir))
def check_stage_info(self, stage):
with self.subTest(check="STAGE_{INFO,DESC,OPTS} vars present"):
stage_info = self.get_stage_info(stage)
self.assertIn("STAGE_DESC", stage_info)
self.assertIn("STAGE_INFO", stage_info)
self.assertIn("STAGE_OPTS", stage_info)
with self.subTest(check="STAGE_OPTS is valid JSON"):
stage_opts = self.parse_stage_opts(stage_info["STAGE_OPTS"])
self.assertIsNotNone(stage_opts)
# TODO: we probably want an actual JSON Schema validator but
# a nicely basic sanity test for our current STAGE_OPTS is:
# 1. If it's not empty, there should be a "properties" object,
# 2. If "required" exists, each item should be a property name
with self.subTest(check="STAGE_OPTS is valid JSON Schema"):
if stage_opts:
self.assertIn("properties", stage_opts)
self.assertIsInstance(stage_opts["properties"], dict)
for prop in stage_opts.get("required", []):
self.assertIn(prop, stage_opts["properties"])
def test_stage_info(self):
for stage in self.stages:
with self.subTest(stage=stage.name):
self.check_stage_info(stage)
for assembler in self.assemblers:
with self.subTest(assembler=assembler.name):
self.check_stage_info(assembler)