Support loading repositories from a root tree instead of supplying them
with the request. The repositories should be in the standard yum repo
format. Both repository sources can be defined simultaneously, but at
least one is required.
The root_dir is expected to contain files necessary for depsolving in
the standard paths.
These files are:
- Repository (.repo) configurations in <root_dir>/etc/yum.repos.d/
- GPG key files in <root_dir>/etc/pki/rpm-gpg/
- This will be used to resolve gpg key paths specified in the .repo
files that are relative to the root_dir.
- (Optional) Custom dnf config variables in <root_dir>/etc/dnf/vars or
<root_dir>/etc/yum/vars.
- This is used by CentOS Stream to set the value of $stream.
Custom repository configurations in arbitrary (non-root) paths will have
to follow this directory structure.
A new variable is added to the request, `releasever`, which is mandatory
when using `root_dir`. This variable is used in repository URLs and GPG
key paths. In the default case, dnf reads this variable by inspecting
the rpm database. We will override it in the Solver the same way we
override the arch and basearch for variable substitution. In the
future, we will make this variable mandatory in all cases, which will
make the variable available for repo configs defined in the request as
well.
The root_dir is used in two ways:
- Set the base.conf.reposdir to <root_dir>/etc/yum.repos.d.
- Call update_from_etc() with root_dir to read custom variables in
<root_dir>/etc/yum/vars and <root_dir>/etc/dnf/vars.
415 lines
15 KiB
Python
Executable file
415 lines
15 KiB
Python
Executable file
#!/usr/bin/python3
|
|
# pylint: disable=invalid-name
|
|
|
|
"""
|
|
A JSON-based interface for depsolving using DNF.
|
|
|
|
Reads a request through stdin and prints the result to stdout.
|
|
In case of error, a structured error is printed to stdout as well.
|
|
"""
|
|
import json
|
|
import os
|
|
import sys
|
|
import tempfile
|
|
from datetime import datetime
|
|
from typing import List
|
|
|
|
import dnf
|
|
import hawkey
|
|
|
|
|
|
class Solver():
|
|
|
|
def __init__(self, request, persistdir, cache_dir):
|
|
arch = request["arch"]
|
|
releasever = request.get("releasever")
|
|
module_platform_id = request["module_platform_id"]
|
|
|
|
arguments = request["arguments"]
|
|
repos = arguments.get("repos", [])
|
|
root_dir = arguments.get("root_dir")
|
|
|
|
self.base = dnf.Base()
|
|
|
|
# Enable fastestmirror to ensure we choose the fastest mirrors for
|
|
# downloading metadata (when depsolving) and downloading packages.
|
|
self.base.conf.fastestmirror = True
|
|
|
|
# We use the same cachedir for multiple architectures. Unfortunately,
|
|
# this is something that doesn't work well in certain situations
|
|
# with zchunk:
|
|
# Imagine that we already have cache for arch1. Then, we use dnf-json
|
|
# to depsolve for arch2. If ZChunk is enabled and available (that's
|
|
# the case for Fedora), dnf will try to download only differences
|
|
# between arch1 and arch2 metadata. But, as these are completely
|
|
# different, dnf must basically redownload everything.
|
|
# For downloding deltas, zchunk uses HTTP range requests. Unfortunately,
|
|
# if the mirror doesn't support multi range requests, then zchunk will
|
|
# download one small segment per a request. Because we need to update
|
|
# the whole metadata (10s of MB), this can be extremely slow in some cases.
|
|
# I think that we can come up with a better fix but let's just disable
|
|
# zchunk for now. As we are already downloading a lot of data when
|
|
# building images, I don't care if we download even more.
|
|
self.base.conf.zchunk = False
|
|
|
|
# Set the rest of the dnf configuration.
|
|
self.base.conf.module_platform_id = module_platform_id
|
|
self.base.conf.config_file_path = "/dev/null"
|
|
self.base.conf.persistdir = persistdir
|
|
self.base.conf.cachedir = cache_dir
|
|
self.base.conf.substitutions['arch'] = arch
|
|
self.base.conf.substitutions['basearch'] = dnf.rpm.basearch(arch)
|
|
if releasever:
|
|
self.base.conf.substitutions['releasever'] = releasever
|
|
|
|
for repo in repos:
|
|
self.base.repos.add(self._dnfrepo(repo, self.base.conf))
|
|
|
|
if root_dir:
|
|
# This sets the varsdir to ("{root_dir}/etc/yum/vars/", "{root_dir}/etc/dnf/vars/") for custom variable
|
|
# substitution (e.g. CentOS Stream 9's $stream variable)
|
|
self.base.conf.substitutions.update_from_etc(root_dir)
|
|
|
|
repos_dir = os.path.join(root_dir, "etc/yum.repos.d")
|
|
self.base.conf.reposdir = repos_dir
|
|
self.base.read_all_repos()
|
|
|
|
self.base.fill_sack(load_system_repo=False)
|
|
|
|
# pylint: disable=too-many-branches
|
|
@staticmethod
|
|
def _dnfrepo(desc, parent_conf=None):
|
|
"""Makes a dnf.repo.Repo out of a JSON repository description"""
|
|
|
|
repo = dnf.repo.Repo(desc["id"], parent_conf)
|
|
|
|
if "name" in desc:
|
|
repo.name = desc["name"]
|
|
|
|
# at least one is required
|
|
if "baseurl" in desc:
|
|
repo.baseurl = desc["baseurl"]
|
|
elif "metalink" in desc:
|
|
repo.metalink = desc["metalink"]
|
|
elif "mirrorlist" in desc:
|
|
repo.mirrorlist = desc["mirrorlist"]
|
|
else:
|
|
raise ValueError("missing either `baseurl`, `metalink`, or `mirrorlist` in repo")
|
|
|
|
if desc.get("ignoressl", False):
|
|
repo.sslverify = False
|
|
if "sslcacert" in desc:
|
|
repo.sslcacert = desc["sslcacert"]
|
|
if "sslclientkey" in desc:
|
|
repo.sslclientkey = desc["sslclientkey"]
|
|
if "sslclientcert" in desc:
|
|
repo.sslclientcert = desc["sslclientcert"]
|
|
|
|
if "check_gpg" in desc:
|
|
repo.gpgcheck = desc["check_gpg"]
|
|
if "check_repogpg" in desc:
|
|
repo.repo_gpgcheck = desc["check_repogpg"]
|
|
if "gpgkey" in desc:
|
|
repo.gpgkey = [desc["gpgkey"]]
|
|
if "gpgkeys" in desc:
|
|
# gpgkeys can contain a full key, or it can be a URL
|
|
# dnf expects urls, so write the key to a temporary location and add the file://
|
|
# path to repo.gpgkey
|
|
keydir = os.path.join(parent_conf.persistdir, "gpgkeys")
|
|
if not os.path.exists(keydir):
|
|
os.makedirs(keydir, mode=0o700, exist_ok=True)
|
|
|
|
for key in desc["gpgkeys"]:
|
|
if key.startswith("-----BEGIN PGP PUBLIC KEY BLOCK-----"):
|
|
# Not using with because it needs to be a valid file for the duration. It
|
|
# is inside the temporary persistdir so will be cleaned up on exit.
|
|
# pylint: disable=consider-using-with
|
|
keyfile = tempfile.NamedTemporaryFile(dir=keydir, delete=False)
|
|
keyfile.write(key.encode("utf-8"))
|
|
repo.gpgkey.append(f"file://{keyfile.name}")
|
|
keyfile.close()
|
|
else:
|
|
repo.gpgkey.append(key)
|
|
|
|
# In dnf, the default metadata expiration time is 48 hours. However,
|
|
# some repositories never expire the metadata, and others expire it much
|
|
# sooner than that. We therefore allow this to be configured. If nothing
|
|
# is provided we error on the side of checking if we should invalidate
|
|
# the cache. If cache invalidation is not necessary, the overhead of
|
|
# checking is in the hundreds of milliseconds. In order to avoid this
|
|
# overhead accumulating for API calls that consist of several dnf calls,
|
|
# we set the expiration to a short time period, rather than 0.
|
|
repo.metadata_expire = desc.get("metadata_expire", "20s")
|
|
|
|
# This option if True disables modularization filtering. Effectively
|
|
# disabling modularity for given repository.
|
|
if "module_hotfixes" in desc:
|
|
repo.module_hotfixes = desc["module_hotfixes"]
|
|
|
|
return repo
|
|
|
|
@staticmethod
|
|
def _timestamp_to_rfc3339(timestamp):
|
|
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%SZ')
|
|
|
|
def dump(self):
|
|
packages = []
|
|
for package in self.base.sack.query().available():
|
|
packages.append({
|
|
"name": package.name,
|
|
"summary": package.summary,
|
|
"description": package.description,
|
|
"url": package.url,
|
|
"repo_id": package.repoid,
|
|
"epoch": package.epoch,
|
|
"version": package.version,
|
|
"release": package.release,
|
|
"arch": package.arch,
|
|
"buildtime": self._timestamp_to_rfc3339(package.buildtime),
|
|
"license": package.license
|
|
})
|
|
return packages
|
|
|
|
def search(self, args):
|
|
""" Perform a search on the available packages
|
|
|
|
args contains a "search" dict with parameters to use for searching.
|
|
"packages" list of package name globs to search for
|
|
"latest" is a boolean that will return only the latest NEVRA instead
|
|
of all matching builds in the metadata.
|
|
|
|
eg.
|
|
|
|
"search": {
|
|
"latest": false,
|
|
"packages": ["tmux", "vim*", "*ssh*"]
|
|
},
|
|
"""
|
|
pkg_globs = args.get("packages", [])
|
|
|
|
packages = []
|
|
|
|
# NOTE: Build query one piece at a time, don't pass all to filterm at the same
|
|
# time.
|
|
available = self.base.sack.query().available()
|
|
for name in pkg_globs:
|
|
# If the package name glob has * in it, use glob.
|
|
# If it has *name* use substr
|
|
# If it has neither use exact match
|
|
if "*" in name:
|
|
if name[0] != "*" or name[-1] != "*":
|
|
q = available.filter(name__glob=name)
|
|
else:
|
|
q = available.filter(name__substr=name.replace("*", ""))
|
|
else:
|
|
q = available.filter(name__eq=name)
|
|
|
|
if args.get("latest", False):
|
|
q = q.latest()
|
|
|
|
for package in q:
|
|
packages.append({
|
|
"name": package.name,
|
|
"summary": package.summary,
|
|
"description": package.description,
|
|
"url": package.url,
|
|
"repo_id": package.repoid,
|
|
"epoch": package.epoch,
|
|
"version": package.version,
|
|
"release": package.release,
|
|
"arch": package.arch,
|
|
"buildtime": self._timestamp_to_rfc3339(package.buildtime),
|
|
"license": package.license
|
|
})
|
|
return packages
|
|
|
|
def depsolve(self, transactions):
|
|
last_transaction: List = []
|
|
|
|
for transaction in transactions:
|
|
self.base.reset(goal=True)
|
|
self.base.sack.reset_excludes()
|
|
|
|
self.base.conf.install_weak_deps = transaction.get("install_weak_deps", False)
|
|
|
|
# set the packages from the last transaction as installed
|
|
for installed_pkg in last_transaction:
|
|
self.base.package_install(installed_pkg, strict=True)
|
|
|
|
# depsolve the current transaction
|
|
self.base.install_specs(
|
|
transaction.get("package-specs"),
|
|
transaction.get("exclude-specs"),
|
|
reponame=transaction.get("repo-ids"),
|
|
)
|
|
self.base.resolve()
|
|
|
|
# store the current transaction result
|
|
last_transaction.clear()
|
|
for tsi in self.base.transaction:
|
|
# Avoid using the install_set() helper, as it does not guarantee
|
|
# a stable order
|
|
if tsi.action not in dnf.transaction.FORWARD_ACTIONS:
|
|
continue
|
|
last_transaction.append(tsi.pkg)
|
|
|
|
dependencies = []
|
|
for package in last_transaction:
|
|
dependencies.append({
|
|
"name": package.name,
|
|
"epoch": package.epoch,
|
|
"version": package.version,
|
|
"release": package.release,
|
|
"arch": package.arch,
|
|
"repo_id": package.repoid,
|
|
"path": package.relativepath,
|
|
"remote_location": package.remote_location(),
|
|
"checksum": (
|
|
f"{hawkey.chksum_name(package.chksum[0])}:"
|
|
f"{package.chksum[1].hex()}"
|
|
)
|
|
})
|
|
|
|
return dependencies
|
|
|
|
|
|
def setup_cachedir(request):
|
|
arch = request["arch"]
|
|
# If dnf-json is run as a service, we don't want users to be able to set the cache
|
|
cache_dir = os.environ.get("OVERWRITE_CACHE_DIR", "")
|
|
if cache_dir:
|
|
cache_dir = os.path.join(cache_dir, arch)
|
|
else:
|
|
cache_dir = request.get("cachedir", "")
|
|
|
|
if not cache_dir:
|
|
return "", {"kind": "Error", "reason": "No cache dir set"}
|
|
|
|
return cache_dir, None
|
|
|
|
|
|
def solve(request, cache_dir):
|
|
command = request["command"]
|
|
arguments = request["arguments"]
|
|
|
|
transactions = arguments.get("transactions")
|
|
with tempfile.TemporaryDirectory() as persistdir:
|
|
try:
|
|
solver = Solver(request, persistdir, cache_dir)
|
|
if command == "dump":
|
|
result = solver.dump()
|
|
elif command == "depsolve":
|
|
result = solver.depsolve(transactions)
|
|
elif command == "search":
|
|
result = solver.search(arguments.get("search", {}))
|
|
|
|
except dnf.exceptions.MarkingErrors as e:
|
|
printe("error install_specs")
|
|
return None, {
|
|
"kind": "MarkingErrors",
|
|
"reason": f"Error occurred when marking packages for installation: {e}"
|
|
}
|
|
except dnf.exceptions.DepsolveError as e:
|
|
printe("error depsolve")
|
|
# collect list of packages for error
|
|
pkgs = []
|
|
for t in transactions:
|
|
pkgs.extend(t["package-specs"])
|
|
return None, {
|
|
"kind": "DepsolveError",
|
|
"reason": f"There was a problem depsolving {', '.join(pkgs)}: {e}"
|
|
}
|
|
except dnf.exceptions.RepoError as e:
|
|
return None, {
|
|
"kind": "RepoError",
|
|
"reason": f"There was a problem reading a repository: {e}"
|
|
}
|
|
except dnf.exceptions.Error as e:
|
|
printe("error repository setup")
|
|
return None, {
|
|
"kind": type(e).__name__,
|
|
"reason": str(e)
|
|
}
|
|
return result, None
|
|
|
|
|
|
def printe(*msg):
|
|
print(*msg, file=sys.stderr)
|
|
|
|
|
|
def fail(err):
|
|
printe(f"{err['kind']}: {err['reason']}")
|
|
print(json.dumps(err))
|
|
sys.exit(1)
|
|
|
|
|
|
def respond(result):
|
|
print(json.dumps(result))
|
|
|
|
|
|
# pylint: disable=too-many-return-statements
|
|
def validate_request(request):
|
|
command = request.get("command")
|
|
valid_cmds = ("depsolve", "dump", "search")
|
|
if command not in valid_cmds:
|
|
return {
|
|
"kind": "InvalidRequest",
|
|
"reason": f"invalid command '{command}': must be one of {', '.join(valid_cmds)}"
|
|
}
|
|
|
|
if not request.get("arch"):
|
|
return {
|
|
"kind": "InvalidRequest",
|
|
"reason": "no 'arch' specified"
|
|
}
|
|
|
|
if not request.get("module_platform_id"):
|
|
return {
|
|
"kind": "InvalidRequest",
|
|
"reason": "no 'module_platform_id' specified"
|
|
}
|
|
|
|
arguments = request.get("arguments")
|
|
if not arguments:
|
|
return {
|
|
"kind": "InvalidRequest",
|
|
"reason": "empty 'arguments'"
|
|
}
|
|
|
|
if not arguments.get("repos") and not arguments.get("root_dir"):
|
|
return {
|
|
"kind": "InvalidRequest",
|
|
"reason": "no 'repos' or 'root_dir' specified"
|
|
}
|
|
|
|
# if root_dir is used, we also need releasever
|
|
# pylint: disable=fixme
|
|
# TODO: Make releasever mandatory in all cases.
|
|
# We temporarily keep it tied to root_dir for short-term backwards compatibility
|
|
if arguments.get("root_dir") and not request.get("releasever"):
|
|
return {
|
|
"kind": "InvalidRequest",
|
|
"reason": "'root_dir' requires setting 'releasever'"
|
|
}
|
|
|
|
return None
|
|
|
|
|
|
def main():
|
|
request = json.load(sys.stdin)
|
|
err = validate_request(request)
|
|
if err:
|
|
fail(err)
|
|
|
|
cachedir, err = setup_cachedir(request)
|
|
if err:
|
|
fail(err)
|
|
result, err = solve(request, cachedir)
|
|
if err:
|
|
fail(err)
|
|
else:
|
|
respond(result)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|