Pipeline: add support for a build pipeline
The build pipeline, is a sub-pipeline used to generate the build tree to use rather than the current root directory. This can be nested arbitrarily deep, but ultimately we will fall back to the current logic when no build property is found. Just like the tree after the last stage of a regular pipeline ends up in the object store, so does currently each build tree (as the build sub-pipeline really is just a regular pipeline in its own right). We may want to avoid both these instances of the implicit storing semantics, and rather make it something the caller opts-in to. However, for now that is left as a future optimization. Signed-off-by: Tom Gundersen <teg@jklm.no>
This commit is contained in:
parent
3669978577
commit
dcc9384ba8
3 changed files with 116 additions and 36 deletions
|
|
@ -22,3 +22,6 @@ jobs:
|
|||
- name: pipeline-yum
|
||||
before_install: sudo apt-get install -y systemd-container yum
|
||||
script: sudo env "PATH=$PATH" python3 -m osbuild --libdir . --output . samples/build-from-yum.json
|
||||
- name: pipeline-base-from-yum
|
||||
before_install: sudo apt-get install -y systemd-container yum
|
||||
script: sudo env "PATH=$PATH" python3 -m osbuild --libdir . --output . samples/base-from-yum.json
|
||||
|
|
|
|||
|
|
@ -274,9 +274,10 @@ def print_header(title, options):
|
|||
|
||||
|
||||
class Stage:
|
||||
def __init__(self, name, base, options):
|
||||
def __init__(self, name, base, build, options):
|
||||
m = hashlib.sha256()
|
||||
m.update(json.dumps(name, sort_keys=True).encode())
|
||||
m.update(json.dumps(build, sort_keys=True).encode())
|
||||
m.update(json.dumps(base, sort_keys=True).encode())
|
||||
m.update(json.dumps(options, sort_keys=True).encode())
|
||||
|
||||
|
|
@ -359,17 +360,39 @@ class Assembler:
|
|||
class Pipeline:
|
||||
def __init__(self, base=None):
|
||||
self.base = base
|
||||
self.build = None
|
||||
self.stages = []
|
||||
self.assembler = None
|
||||
|
||||
def get_id(self):
|
||||
return self.stages[-1].id if self.stages else self.base
|
||||
|
||||
def set_build(self, pipeline):
|
||||
if self.stages:
|
||||
raise ValueError("Must set build before stages.")
|
||||
self.build = pipeline
|
||||
|
||||
def add_stage(self, name, options=None):
|
||||
base = self.stages[-1].id if self.stages else self.base
|
||||
stage = Stage(name, base, options or {})
|
||||
build = self.build.get_id() if self.build else None
|
||||
stage = Stage(name, build, self.get_id(), options or {})
|
||||
self.stages.append(stage)
|
||||
|
||||
def set_assembler(self, name, options=None):
|
||||
self.assembler = Assembler(name, options or {})
|
||||
|
||||
@contextlib.contextmanager
|
||||
def get_buildtree(self, object_store):
|
||||
if self.build:
|
||||
with object_store.get_tree(self.build.get_id()) as tree:
|
||||
yield tree
|
||||
else:
|
||||
with tempfile.TemporaryDirectory(dir=object_store.store) as tmp:
|
||||
subprocess.run(["mount", "-o", "bind,ro,mode=0755", "/", tmp], check=True)
|
||||
try:
|
||||
yield tmp
|
||||
finally:
|
||||
subprocess.run(["umount", "--lazy", tmp], check=True)
|
||||
|
||||
def run(self, output_dir, store, interactive=False, check=True, libdir=None):
|
||||
os.makedirs("/run/osbuild", exist_ok=True)
|
||||
if self.base and not store:
|
||||
|
|
@ -378,40 +401,45 @@ class Pipeline:
|
|||
results = {
|
||||
"stages": []
|
||||
}
|
||||
if self.stages:
|
||||
tree_id = self.stages[-1].id
|
||||
if not object_store.has_tree(tree_id):
|
||||
# The tree does not exist. Create it and save it to the object store. If
|
||||
# two run() calls race each-other, two trees may be generated, and it
|
||||
# is nondeterministic which of them will end up referenced by the tree_id
|
||||
# in the content store. However, we guarantee that all tree_id's and all
|
||||
# generated trees remain valid.
|
||||
with object_store.new_tree(tree_id, base_id=self.base) as tree:
|
||||
for stage in self.stages:
|
||||
r = stage.run(tree,
|
||||
"/",
|
||||
interactive=interactive,
|
||||
check=check,
|
||||
libdir=libdir)
|
||||
results["stages"].append(r)
|
||||
if r["returncode"] != 0:
|
||||
results["returncode"] = r["returncode"]
|
||||
return results
|
||||
else:
|
||||
tree_id = None
|
||||
if self.build:
|
||||
r = self.build.run(None, store, interactive, check, libdir)
|
||||
results["build"] = r
|
||||
if r["returncode"] != 0:
|
||||
results["returncode"] = r["returncode"]
|
||||
return results
|
||||
|
||||
if self.assembler:
|
||||
with object_store.get_tree(tree_id) as tree:
|
||||
r = self.assembler.run(tree,
|
||||
"/",
|
||||
output_dir=output_dir,
|
||||
interactive=interactive,
|
||||
check=check,
|
||||
libdir=libdir)
|
||||
results["assembler"] = r
|
||||
if r["returncode"] != 0:
|
||||
results["returncode"] = r["returncode"]
|
||||
return results
|
||||
with self.get_buildtree(object_store) as build_tree:
|
||||
if self.stages:
|
||||
if not object_store.has_tree(self.get_id()):
|
||||
# The tree does not exist. Create it and save it to the object store. If
|
||||
# two run() calls race each-other, two trees may be generated, and it
|
||||
# is nondeterministic which of them will end up referenced by the tree_id
|
||||
# in the content store. However, we guarantee that all tree_id's and all
|
||||
# generated trees remain valid.
|
||||
with object_store.new_tree(self.get_id(), base_id=self.base) as tree:
|
||||
for stage in self.stages:
|
||||
r = stage.run(tree,
|
||||
build_tree,
|
||||
interactive=interactive,
|
||||
check=check,
|
||||
libdir=libdir)
|
||||
results["stages"].append(r)
|
||||
if r["returncode"] != 0:
|
||||
results["returncode"] = r["returncode"]
|
||||
return results
|
||||
|
||||
if self.assembler:
|
||||
with object_store.get_tree(self.get_id()) as tree:
|
||||
r = self.assembler.run(tree,
|
||||
build_tree,
|
||||
output_dir=output_dir,
|
||||
interactive=interactive,
|
||||
check=check,
|
||||
libdir=libdir)
|
||||
results["assembler"] = r
|
||||
if r["returncode"] != 0:
|
||||
results["returncode"] = r["returncode"]
|
||||
return results
|
||||
|
||||
results["returncode"] = 0
|
||||
return results
|
||||
|
|
@ -420,6 +448,10 @@ class Pipeline:
|
|||
def load(description):
|
||||
pipeline = Pipeline(description.get("base"))
|
||||
|
||||
b = description.get("build")
|
||||
if b:
|
||||
pipeline.set_build(load(b))
|
||||
|
||||
for s in description.get("stages", []):
|
||||
pipeline.add_stage(s["name"], s.get("options", {}))
|
||||
|
||||
|
|
|
|||
45
samples/base-from-yum.json
Normal file
45
samples/base-from-yum.json
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
{
|
||||
"name": "base",
|
||||
"build": {
|
||||
"name": "build",
|
||||
"stages": [
|
||||
{
|
||||
"name": "org.osbuild.yum",
|
||||
"options": {
|
||||
"releasever": "27",
|
||||
"repos": {
|
||||
"fedora": {
|
||||
"name": "Fedora",
|
||||
"baseurl": "https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/$releasever/Everything/$basearch/os/",
|
||||
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"
|
||||
}
|
||||
},
|
||||
"packages": [
|
||||
"dnf",
|
||||
"systemd"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"stages": [
|
||||
{
|
||||
"name": "org.osbuild.dnf",
|
||||
"options": {
|
||||
"releasever": "30",
|
||||
"repos": {
|
||||
"fedora": {
|
||||
"name": "Fedora",
|
||||
"metalink": "https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch",
|
||||
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch"
|
||||
}
|
||||
},
|
||||
"packages": [
|
||||
"@Core",
|
||||
"selinux-policy-targeted",
|
||||
"grub2-pc"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue