PR#4033: kojira on demand
Merges #4033 https://pagure.io/koji/pull-request/4033 Fixes #3176 https://pagure.io/koji/issue/3176 Kojira redesign Fixes #3866 https://pagure.io/koji/issue/3866 Broken repos in expired state
This commit is contained in:
commit
f2da4b8e44
39 changed files with 5874 additions and 1337 deletions
|
|
@ -5824,9 +5824,24 @@ class NewRepoTask(BaseTaskHandler):
|
|||
self.logger.debug('Arch repo test passed %s' % arch)
|
||||
return True
|
||||
|
||||
def handler(self, tag, event=None, src=False, debuginfo=False, separate_src=False):
|
||||
def handler(self, tag, event=None, src=None, debuginfo=None, separate_src=None, opts=None):
|
||||
tinfo = self.session.getTag(tag, strict=True, event=event)
|
||||
|
||||
# handle deprecated opts
|
||||
_opts = {}
|
||||
if src is not None:
|
||||
_opts['src'] = bool(src)
|
||||
if debuginfo is not None:
|
||||
_opts['debuginfo'] = bool(debuginfo)
|
||||
if separate_src is not None:
|
||||
_opts['separate_src'] = bool(separate_src)
|
||||
if _opts:
|
||||
if opts is not None:
|
||||
raise koji.ParameterError('opts parameter cannot be combined with legacy options')
|
||||
self.logger.warning('The src, debuginfo, and separate_src parameters for newRepo '
|
||||
'tasks are deprecated. Use the opts parameter.')
|
||||
opts = _opts
|
||||
|
||||
# check for fs access before we try calling repoInit
|
||||
top_repos_dir = joinpath(self.options.topdir, "repos")
|
||||
if not os.path.isdir(top_repos_dir):
|
||||
|
|
@ -5835,17 +5850,10 @@ class NewRepoTask(BaseTaskHandler):
|
|||
raise RefuseTask("No access to repos dir %s" % top_repos_dir)
|
||||
|
||||
# call repoInit
|
||||
kwargs = {}
|
||||
kwargs = {'opts': opts, 'task_id': self.id}
|
||||
if event is not None:
|
||||
kwargs['event'] = event
|
||||
if src:
|
||||
kwargs['with_src'] = True
|
||||
if separate_src:
|
||||
kwargs['with_separate_src'] = True
|
||||
# generate debuginfo repo if requested or if specified in sidetag's extra
|
||||
if debuginfo or tinfo['extra'].get('with_debuginfo'):
|
||||
kwargs['with_debuginfo'] = True
|
||||
repo_id, event_id = self.session.host.repoInit(tinfo['id'], task_id=self.id, **kwargs)
|
||||
repo_id, event_id = self.session.host.repoInit(tinfo['id'], **kwargs)
|
||||
|
||||
path = koji.pathinfo.repo(repo_id, tinfo['name'])
|
||||
if not os.path.isdir(path):
|
||||
|
|
@ -5855,12 +5863,7 @@ class NewRepoTask(BaseTaskHandler):
|
|||
if fn != 'groups' and os.path.isfile("%s/%s/pkglist" % (path, fn)):
|
||||
arches.append(fn)
|
||||
# see if we can find a previous repo to update from
|
||||
# only shadowbuild tags should start with SHADOWBUILD, their repos are auto
|
||||
# expired. so lets get the most recent expired tag for newRepo shadowbuild tasks.
|
||||
if tinfo['name'].startswith('SHADOWBUILD'):
|
||||
oldrepo_state = koji.REPO_EXPIRED
|
||||
else:
|
||||
oldrepo_state = koji.REPO_READY
|
||||
oldrepo_state = koji.REPO_READY
|
||||
oldrepo = self.session.getRepo(tinfo['id'], state=oldrepo_state)
|
||||
oldrepo_path = None
|
||||
if oldrepo:
|
||||
|
|
@ -5912,8 +5915,6 @@ class NewRepoTask(BaseTaskHandler):
|
|||
|
||||
# finalize
|
||||
kwargs = {}
|
||||
if event is not None:
|
||||
kwargs['expire'] = True
|
||||
if cloned_archs:
|
||||
kwargs['repo_json_updates'] = {
|
||||
'cloned_from_repo_id': oldrepo['id'],
|
||||
|
|
@ -6167,7 +6168,7 @@ class NewDistRepoTask(BaseTaskHandler):
|
|||
continue
|
||||
upload_dir = koji.pathinfo.taskrelpath(subtasks[arch])
|
||||
self.session.host.distRepoMove(repo_id, upload_dir, arch)
|
||||
self.session.host.repoDone(repo_id, {}, expire=False)
|
||||
self.session.host.repoDone(repo_id, {})
|
||||
return 'Dist repository #%s successfully generated' % repo_id
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -46,8 +46,8 @@ from koji_cli.lib import (
|
|||
print_task_headers,
|
||||
print_task_recurse,
|
||||
unique_path,
|
||||
warn,
|
||||
wait_repo,
|
||||
warn,
|
||||
watch_logs,
|
||||
watch_tasks,
|
||||
truncate_string
|
||||
|
|
@ -562,7 +562,7 @@ def handle_build(options, session, args):
|
|||
help="Wait on the build, even if running in the background")
|
||||
parser.add_option("--nowait", action="store_false", dest="wait", help="Don't wait on build")
|
||||
parser.add_option("--wait-repo", action="store_true",
|
||||
help="Wait for the actual buildroot repo of given target")
|
||||
help="Wait for a current repo for the build tag")
|
||||
parser.add_option("--wait-build", metavar="NVR", action="append", dest="wait_builds",
|
||||
default=[], help="Wait for the given nvr to appear in buildroot repo")
|
||||
parser.add_option("--quiet", action="store_true",
|
||||
|
|
@ -4347,6 +4347,13 @@ def _print_histline(entry, **kwargs):
|
|||
fmt = "new external repo: %(external_repo.name)s"
|
||||
else:
|
||||
fmt = "external repo deleted: %(external_repo.name)s"
|
||||
elif table == 'external_repo_data':
|
||||
if edit:
|
||||
fmt = "tracking data for external repo %(external_repo.name)s altered"
|
||||
elif create:
|
||||
fmt = "new tracking data for external repo %(external_repo.name)s"
|
||||
else:
|
||||
fmt = "deleted tracking data for external repo %(external_repo.name)s"
|
||||
elif table == 'tag_external_repos':
|
||||
if edit:
|
||||
fmt = "external repo entry for %(external_repo.name)s in tag %(tag.name)s updated"
|
||||
|
|
@ -4450,6 +4457,7 @@ _table_keys = {
|
|||
'tag_extra': ['tag_id', 'key'],
|
||||
'build_target_config': ['build_target_id'],
|
||||
'external_repo_config': ['external_repo_id'],
|
||||
'external_repo_data': ['external_repo_id'],
|
||||
'host_config': ['host_id'],
|
||||
'host_channels': ['host_id', 'channel_id'],
|
||||
'tag_external_repos': ['tag_id', 'external_repo_id'],
|
||||
|
|
@ -4934,13 +4942,16 @@ def anon_handle_taginfo(goptions, session, args):
|
|||
build_targets = session.getBuildTargets(buildTagID=info['id'], **event_opts)
|
||||
repos = {}
|
||||
if not event:
|
||||
for target in dest_targets + build_targets:
|
||||
if target['build_tag'] not in repos:
|
||||
repo = session.getRepo(target['build_tag'])
|
||||
# find related repos
|
||||
repo_tags = [tg['build_tag'] for tg in dest_targets + build_targets]
|
||||
repo_tags.append(info['id'])
|
||||
for repo_tag_id in repo_tags:
|
||||
if repo_tag_id not in repos:
|
||||
repo = session.getRepo(repo_tag_id)
|
||||
if repo is None:
|
||||
repos[target['build_tag']] = "no active repo"
|
||||
repos[repo_tag_id] = "no active repo"
|
||||
else:
|
||||
repos[target['build_tag']] = "repo#%(id)i: %(creation_time)s" % repo
|
||||
repos[repo_tag_id] = "repo#%(id)i: %(creation_time)s" % repo
|
||||
if dest_targets:
|
||||
print("Targets that build into this tag:")
|
||||
for target in dest_targets:
|
||||
|
|
@ -4956,6 +4967,8 @@ def anon_handle_taginfo(goptions, session, args):
|
|||
print("Targets that build from this tag:")
|
||||
for target in build_targets:
|
||||
print(" %s" % target['name'])
|
||||
elif info['id'] in repos:
|
||||
print("Current repo: %s" % repos[info['id']])
|
||||
external_repos = session.getTagExternalRepos(tag_info=info['id'], **event_opts)
|
||||
if external_repos:
|
||||
print("External repos:")
|
||||
|
|
@ -7254,9 +7267,14 @@ def anon_handle_wait_repo(options, session, args):
|
|||
"(may be used multiple times)")
|
||||
parser.add_option("--target", action="store_true",
|
||||
help="Interpret the argument as a build target name")
|
||||
parser.add_option("--request", action="store_true",
|
||||
help="Create a repo request (requires auth)")
|
||||
parser.add_option("--no-request", action="store_false", dest="request",
|
||||
help="Do not create a repo request (the default)")
|
||||
parser.add_option("--timeout", type="int", default=120,
|
||||
help="Amount of time to wait (in minutes) before giving up "
|
||||
"(default: 120)")
|
||||
parser.add_option("-v", "--verbose", action="store_true", help="Be verbose")
|
||||
parser.add_option("--quiet", action="store_true", default=options.quiet,
|
||||
help="Suppress output, success or failure will be indicated by the return "
|
||||
"value only")
|
||||
|
|
@ -7268,52 +7286,164 @@ def anon_handle_wait_repo(options, session, args):
|
|||
elif len(args) > 1:
|
||||
parser.error("Only one tag may be specified")
|
||||
|
||||
tag = args[0]
|
||||
tag_arg = args[0]
|
||||
|
||||
compat = False
|
||||
if session.hub_version < (1, 35, 0):
|
||||
compat = True
|
||||
|
||||
anon = True
|
||||
if suboptions.request:
|
||||
if compat:
|
||||
error('Hub does not support repo requests')
|
||||
# requires auth
|
||||
options.noauth = False
|
||||
activate_session(session, options)
|
||||
anon = False
|
||||
elif suboptions.request is None and not compat:
|
||||
warn('The --request option is recommended for faster results')
|
||||
|
||||
ensure_connection(session, options)
|
||||
|
||||
# get tag
|
||||
if suboptions.target:
|
||||
target_info = session.getBuildTarget(tag)
|
||||
# treat as a target
|
||||
target_info = session.getBuildTarget(tag_arg)
|
||||
if not target_info:
|
||||
parser.error("No such build target: %s" % tag)
|
||||
tag = target_info['build_tag_name']
|
||||
tag_id = target_info['build_tag']
|
||||
parser.error("No such build target: %s" % tag_arg)
|
||||
tag = session.getTag(target_info['build_tag'], strict=True)
|
||||
else:
|
||||
tag_info = session.getTag(tag)
|
||||
if not tag_info:
|
||||
parser.error("No such tag: %s" % tag)
|
||||
targets = session.getBuildTargets(buildTagID=tag_info['id'])
|
||||
tag = session.getTag(tag_arg)
|
||||
if not tag:
|
||||
parser.error("No such tag: %s" % tag_arg)
|
||||
# warn if not a build target
|
||||
targets = session.getBuildTargets(buildTagID=tag['id'])
|
||||
if not targets:
|
||||
warn("%(name)s is not a build tag for any target" % tag_info)
|
||||
targets = session.getBuildTargets(destTagID=tag_info['id'])
|
||||
warn("%(name)s is not a build tag for any target" % tag)
|
||||
targets = session.getBuildTargets(destTagID=tag['id'])
|
||||
if targets:
|
||||
maybe = {}.fromkeys([t['build_tag_name'] for t in targets])
|
||||
maybe = sorted(maybe.keys())
|
||||
warn("Suggested tags: %s" % ', '.join(maybe))
|
||||
error()
|
||||
tag_id = tag_info['id']
|
||||
|
||||
if not suboptions.request:
|
||||
# do we expect automatic regen?
|
||||
if not tag['extra'].get('repo.auto') and not compat:
|
||||
warn("This tag is not configured for automatic regeneration")
|
||||
|
||||
for nvr in builds:
|
||||
data = session.getLatestBuilds(tag_id, package=nvr["name"])
|
||||
data = session.getLatestBuilds(tag['id'], package=nvr["name"])
|
||||
if len(data) == 0:
|
||||
warn("No %s builds in tag %s" % (nvr["name"], tag))
|
||||
warn("No %s builds in tag %s" % (nvr["name"], tag['name']))
|
||||
else:
|
||||
present_nvr = [x["nvr"] for x in data][0]
|
||||
expected_nvr = '%(name)s-%(version)s-%(release)s' % nvr
|
||||
if present_nvr != expected_nvr:
|
||||
warn("nvr %s is not current in tag %s\n latest build in %s is %s" %
|
||||
(expected_nvr, tag, tag, present_nvr))
|
||||
warn("nvr %s is not current in tag %s\n latest build is %s" %
|
||||
(expected_nvr, tag['name'], present_nvr))
|
||||
|
||||
success, msg = wait_repo(session, tag_id, builds,
|
||||
poll_interval=options.poll_interval, timeout=suboptions.timeout)
|
||||
if success:
|
||||
if not suboptions.quiet:
|
||||
print(msg)
|
||||
else:
|
||||
if compat:
|
||||
# compat for 1.34 and below
|
||||
success, msg = wait_repo(session, tag['id'], builds, poll_interval=options.poll_interval,
|
||||
timeout=suboptions.timeout)
|
||||
if success:
|
||||
if not suboptions.quiet:
|
||||
print(msg)
|
||||
else:
|
||||
error('' if suboptions.quiet else msg)
|
||||
return
|
||||
|
||||
watcher = _get_watcher(options, suboptions, session, tag['id'], nvrs=suboptions.builds,
|
||||
min_event=None)
|
||||
|
||||
try:
|
||||
repoinfo = watcher.waitrepo(anon=anon)
|
||||
except koji.GenericError as err:
|
||||
msg = 'Failed to get repo -- %s' % err
|
||||
error('' if suboptions.quiet else msg)
|
||||
|
||||
if not suboptions.quiet:
|
||||
print('Got repo %(id)i' % repoinfo)
|
||||
print("Repo info: %s/repoinfo?repoID=%s" % (options.weburl, repoinfo['id']))
|
||||
|
||||
def handle_regen_repo(options, session, args):
|
||||
"[admin] Force a repo to be regenerated"
|
||||
|
||||
def handle_wait_repo_request(goptions, session, args):
|
||||
"""[monitor] Wait for an existing repo request"""
|
||||
usage = "usage: %prog wait-repo-request [options] <request-id>"
|
||||
parser = OptionParser(usage=get_usage_str(usage))
|
||||
parser.add_option("--timeout", type="int", default=120,
|
||||
help="Wait timeout (default: 120)")
|
||||
parser.add_option("-v", "--verbose", action="store_true", help="More verbose output")
|
||||
parser.add_option("--quiet", action="store_true", default=goptions.quiet,
|
||||
help="Reduced output")
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
if len(args) == 0:
|
||||
parser.error("A request id must be specified")
|
||||
elif len(args) > 1:
|
||||
parser.error("This command only accepts one argument")
|
||||
|
||||
activate_session(session, goptions)
|
||||
|
||||
req_id = args[0]
|
||||
|
||||
# first check the request
|
||||
check = session.repo.checkRequest(req_id)
|
||||
|
||||
repo = check['repo']
|
||||
if repo:
|
||||
print('Got repo %(id)i' % repo)
|
||||
print("Repo info: %s/repoinfo?repoID=%s" % (goptions.weburl, repo['id']))
|
||||
return
|
||||
|
||||
# otherwise
|
||||
req = check['request']
|
||||
tag_id = req['tag_id']
|
||||
|
||||
watcher = _get_watcher(goptions, options, session, tag_id)
|
||||
|
||||
try:
|
||||
repo = watcher.wait_request(req)
|
||||
except koji.GenericError as err:
|
||||
msg = 'Failed to get repo -- %s' % err
|
||||
error('' if options.quiet else msg)
|
||||
|
||||
|
||||
def _get_watcher(goptions, options, *a, **kw):
|
||||
"""Get RepoWatcher instance"""
|
||||
|
||||
def check_opt(key):
|
||||
for opts in options, goptions:
|
||||
val = getattr(opts, key, None)
|
||||
if val is not None:
|
||||
return val
|
||||
return None
|
||||
|
||||
logger = logging.getLogger("waitrepo") # not under koji.*
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setFormatter(logging.Formatter('%(message)s'))
|
||||
handler.setLevel(logging.DEBUG)
|
||||
logger.addHandler(handler)
|
||||
if check_opt('debug'):
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif check_opt('quiet'):
|
||||
logger.setLevel(logging.ERROR)
|
||||
elif check_opt('verbose'):
|
||||
logger.setLevel(logging.INFO)
|
||||
else:
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
watcher = koji.util.RepoWatcher(*a, logger=logger, **kw)
|
||||
watcher.PAUSE = goptions.poll_interval
|
||||
timeout = check_opt('timeout')
|
||||
if timeout is not None:
|
||||
watcher.TIMEOUT = timeout
|
||||
|
||||
return watcher
|
||||
|
||||
|
||||
def handle_regen_repo(goptions, session, args):
|
||||
"[admin] Generate a current repo if there is not one"
|
||||
usage = "usage: %prog regen-repo [options] <tag>"
|
||||
parser = OptionParser(usage=get_usage_str(usage))
|
||||
parser.add_option("--target", action="store_true",
|
||||
|
|
@ -7322,23 +7452,44 @@ def handle_regen_repo(options, session, args):
|
|||
help="Wait on for regen to finish, even if running in the background")
|
||||
parser.add_option("--nowait", action="store_false", dest="wait",
|
||||
help="Don't wait on for regen to finish")
|
||||
parser.add_option("--make-task", action="store_true", help="Directly create a newRepo task")
|
||||
parser.add_option("--debuginfo", action="store_true", help="Include debuginfo rpms in repo")
|
||||
parser.add_option("--source", "--src", action="store_true",
|
||||
help="Include source rpms in each of repos")
|
||||
parser.add_option("--separate-source", "--separate-src", action="store_true",
|
||||
help="Include source rpms in separate src repo")
|
||||
(suboptions, args) = parser.parse_args(args)
|
||||
parser.add_option("--timeout", type="int", default=120,
|
||||
help="Wait timeout (default: 120)")
|
||||
parser.add_option("-v", "--verbose", action="store_true", help="More verbose output")
|
||||
parser.add_option("--quiet", action="store_true", default=goptions.quiet,
|
||||
help="Reduced output")
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
if not options.make_task:
|
||||
if session.hub_version >= (1, 35, 0):
|
||||
# alias for request-repo --current
|
||||
options.at_event = None
|
||||
options.min_event = None
|
||||
options.current = True
|
||||
return _request_repo(goptions, session, parser, options, args)
|
||||
else:
|
||||
warn('Hub does not support repo requests, attempting to create a task directly.')
|
||||
|
||||
# otherwise we still have the old way
|
||||
|
||||
if len(args) == 0:
|
||||
parser.error("A tag name must be specified")
|
||||
elif len(args) > 1:
|
||||
if suboptions.target:
|
||||
if options.target:
|
||||
parser.error("Only a single target may be specified")
|
||||
else:
|
||||
parser.error("Only a single tag name may be specified")
|
||||
activate_session(session, options)
|
||||
|
||||
activate_session(session, goptions)
|
||||
|
||||
tag = args[0]
|
||||
repo_opts = {}
|
||||
if suboptions.target:
|
||||
if options.target:
|
||||
info = session.getBuildTarget(tag)
|
||||
if not info:
|
||||
parser.error("No such build target: %s" % tag)
|
||||
|
|
@ -7354,20 +7505,127 @@ def handle_regen_repo(options, session, args):
|
|||
warn("%s is not a build tag" % tag)
|
||||
if not info['arches']:
|
||||
warn("Tag %s has an empty arch list" % info['name'])
|
||||
if suboptions.debuginfo:
|
||||
if options.debuginfo:
|
||||
repo_opts['debuginfo'] = True
|
||||
if suboptions.source:
|
||||
if options.source:
|
||||
repo_opts['src'] = True
|
||||
if suboptions.separate_source:
|
||||
if options.separate_source:
|
||||
repo_opts['separate_src'] = True
|
||||
|
||||
task_id = session.newRepo(tag, **repo_opts)
|
||||
print("Regenerating repo for tag: %s" % tag)
|
||||
print("Created task: %d" % task_id)
|
||||
print("Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id))
|
||||
if suboptions.wait or (suboptions.wait is None and not _running_in_bg()):
|
||||
print("Task info: %s/taskinfo?taskID=%s" % (goptions.weburl, task_id))
|
||||
if options.wait or (options.wait is None and not _running_in_bg()):
|
||||
session.logout()
|
||||
return watch_tasks(session, [task_id], quiet=options.quiet,
|
||||
poll_interval=options.poll_interval, topurl=options.topurl)
|
||||
return watch_tasks(session, [task_id], quiet=goptions.quiet,
|
||||
poll_interval=goptions.poll_interval, topurl=goptions.topurl)
|
||||
|
||||
|
||||
def handle_request_repo(goptions, session, args):
|
||||
"""Request a repo for a tag"""
|
||||
usage = "usage: %prog request-repo [options] <tag>"
|
||||
parser = OptionParser(usage=get_usage_str(usage))
|
||||
parser.add_option("--target", action="store_true",
|
||||
help="Interpret the argument as a build target name")
|
||||
parser.add_option("--wait", action="store_true",
|
||||
help="Wait on for regen to finish, even if running in the background")
|
||||
parser.add_option("--nowait", action="store_false", dest="wait",
|
||||
help="Don't wait on for regen to finish")
|
||||
parser.add_option("--min-event", type="int", help="Minimum event id for repo")
|
||||
parser.add_option("--at-event", type="int", help="Specific event id for repo")
|
||||
parser.add_option("--current", "--last", action="store_true", help="Use current event for tag")
|
||||
parser.add_option("--debuginfo", action="store_true", help="Include debuginfo rpms in repo")
|
||||
parser.add_option("--source", "--src", action="store_true",
|
||||
help="Include source rpms in each of repos")
|
||||
parser.add_option("--separate-source", "--separate-src", action="store_true",
|
||||
help="Include source rpms in separate src repo")
|
||||
parser.add_option("--timeout", type="int", default=120,
|
||||
help="Wait timeout (default: 120)")
|
||||
parser.add_option("-v", "--verbose", action="store_true", help="More verbose output")
|
||||
parser.add_option("--quiet", action="store_true", default=goptions.quiet,
|
||||
help="Reduced output")
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
_request_repo(goptions, session, parser, options, args)
|
||||
|
||||
|
||||
def _request_repo(goptions, session, parser, options, args):
|
||||
"""Handle the request-repo command"""
|
||||
if len(args) == 0:
|
||||
parser.error("A tag name must be specified")
|
||||
elif len(args) > 1:
|
||||
if options.target:
|
||||
parser.error("Only a single target may be specified")
|
||||
else:
|
||||
parser.error("Only a single tag name may be specified")
|
||||
|
||||
# get the request parameters
|
||||
params = {}
|
||||
if options.at_event:
|
||||
if options.min_event or options.current:
|
||||
parser.error('Cannot specify both min-event and at-event')
|
||||
params['at_event'] = options.at_event
|
||||
elif options.current:
|
||||
if options.min_event:
|
||||
parser.error('Cannot specify both min-event and current')
|
||||
params['min_event'] = "last"
|
||||
elif options.min_event:
|
||||
params['min_event'] = options.min_event
|
||||
repo_opts = {}
|
||||
if options.debuginfo:
|
||||
repo_opts['debuginfo'] = True
|
||||
if options.source:
|
||||
repo_opts['src'] = True
|
||||
if options.separate_source:
|
||||
repo_opts['separate_src'] = True
|
||||
if repo_opts:
|
||||
params['opts'] = repo_opts
|
||||
|
||||
activate_session(session, goptions)
|
||||
|
||||
# get the tag
|
||||
if options.target:
|
||||
# treat first arg as a target
|
||||
target = session.getBuildTarget(args[0])
|
||||
if not target:
|
||||
parser.error("No such build target: %s" % args[0])
|
||||
tag = session.getTag(target['build_tag'], strict=True)
|
||||
else:
|
||||
tag = session.getTag(args[0])
|
||||
if not tag:
|
||||
parser.error("No such tag: %s" % args[0])
|
||||
if not tag['arches']:
|
||||
warn("Tag %s has an empty arch list" % tag['name'])
|
||||
|
||||
watcher = _get_watcher(goptions, options, session, tag['id'], **params)
|
||||
|
||||
# first make the request
|
||||
check = watcher.request()
|
||||
|
||||
repo = check['repo']
|
||||
if repo:
|
||||
print('Got repo %(id)i' % repo)
|
||||
print("Repo info: %s/repoinfo?repoID=%s" % (goptions.weburl, repo['id']))
|
||||
return
|
||||
|
||||
# otherwise we should have a request
|
||||
req = check['request']
|
||||
if not options.wait:
|
||||
print('Got request: %(id)s' % req)
|
||||
if req.get('task_id'):
|
||||
print('Got task: %(task_id)s' % req)
|
||||
print('Task info: %s/taskinfo?taskID=%s' % (goptions.weburl, req['task_id']))
|
||||
return
|
||||
else:
|
||||
try:
|
||||
repo = watcher.wait_request(req)
|
||||
except koji.GenericError as err:
|
||||
msg = 'Failed to get repo -- %s' % err
|
||||
error('' if options.quiet else msg)
|
||||
|
||||
print('Got repo %(id)i' % repo)
|
||||
print("Repo info: %s/repoinfo?repoID=%s" % (goptions.weburl, repo['id']))
|
||||
|
||||
|
||||
def handle_dist_repo(options, session, args):
|
||||
|
|
|
|||
|
|
@ -35,6 +35,17 @@ def handle_rpc(self, environ):
|
|||
raise
|
||||
|
||||
|
||||
def override_load_config(opts):
|
||||
original_load_config = kojixmlrpc.load_config
|
||||
|
||||
def my_load_config(environ):
|
||||
oldopts = original_load_config(environ)
|
||||
oldopts.update(opts)
|
||||
return oldopts
|
||||
|
||||
kojixmlrpc.load_config = my_load_config
|
||||
|
||||
|
||||
# Fake session for authenticated calls
|
||||
class FakeSession(auth.Session):
|
||||
|
||||
|
|
@ -106,6 +117,8 @@ def get_options():
|
|||
help='emulate an exclusive session')
|
||||
parser.add_option('-n', '--no-commit', action='store_true',
|
||||
help='skip commit')
|
||||
parser.add_option('-o', '--config-option', help='override config option',
|
||||
action='append', metavar='NAME=VALUE')
|
||||
opts, args = parser.parse_args()
|
||||
|
||||
# parse request from args
|
||||
|
|
@ -123,6 +136,13 @@ def get_options():
|
|||
callargs.append(nice_literal(s))
|
||||
callargs = koji.encode_args(*callargs, **kwargs)
|
||||
opts.request = koji.xmlrpcplus.dumps(callargs, method, allow_none=1)
|
||||
if opts.config_option:
|
||||
overrides = {}
|
||||
for s in opts.config_option:
|
||||
k, v = s.split('=', 1)
|
||||
v = nice_literal(v)
|
||||
overrides[k] = v
|
||||
opts.config_option = overrides
|
||||
|
||||
return opts
|
||||
|
||||
|
|
@ -161,6 +181,8 @@ def main():
|
|||
kojixmlrpc.ModXMLRPCRequestHandler.handle_rpc = handle_rpc
|
||||
if options.no_commit:
|
||||
db.DBWrapper.commit = skip_commit
|
||||
if options.config_option:
|
||||
override_load_config(options.config_option)
|
||||
|
||||
environ = {}
|
||||
environ['SCRIPT_FILENAME'] = kojixmlrpc.__file__
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ Contents
|
|||
image_build
|
||||
winbuild
|
||||
draft_builds
|
||||
repo_generation
|
||||
exporting_repositories
|
||||
tag_inheritance
|
||||
misc
|
||||
|
|
|
|||
342
docs/source/repo_generation.rst
Normal file
342
docs/source/repo_generation.rst
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
|
||||
Repo Generation
|
||||
===============
|
||||
|
||||
Koji generates repositories based on tag content. For the most part, this means yum repos
|
||||
from the rpm content, but Koji can also generate maven repos if configured to do so.
|
||||
|
||||
The *primary* purpose of these repos is to facilitate Koji's own build process.
|
||||
Most builds utilize a buildroot generated by the ``mock`` tool, which needs a yum repository
|
||||
to pull packages from.
|
||||
|
||||
Repositories can be triggered in different ways and with different parameters, but all
|
||||
repositories represent the contents of a tag at a specific point in time (i.e. an event).
|
||||
|
||||
|
||||
On demand generation
|
||||
--------------------
|
||||
|
||||
When Koji needs a repo for a tag, it files a *request* via a hub call.
|
||||
Typically this is done in a build process, but requests can also be triggered automatically
|
||||
without a build if configured. They can also be triggered manually.
|
||||
|
||||
::
|
||||
|
||||
repo.request(tag, min_event=None, at_event=None, opts=None, priority=None, force=False)
|
||||
description: Request a repo for a tag
|
||||
|
||||
:param int|str taginfo: tag id or name
|
||||
:param int|str min_event: minimum event for the repo (optional)
|
||||
:param int at_event: specific event for the repo (optional)
|
||||
:param dict opts: custom repo options (optional)
|
||||
:param bool force: force request creation, even if a matching repo exists
|
||||
|
||||
The special value min_event="last" uses the most recent event for the tag
|
||||
Otherwise min_event should be an integer
|
||||
|
||||
use opts=None (the default) to get default options for the tag.
|
||||
If opts is given, it should be a dictionary of repo options. These will override
|
||||
the defaults.
|
||||
|
||||
|
||||
Each repo request is for a single tag. The optional ``min_event`` parameter specifies how recent the
|
||||
repo needs to be. If not given, Koji chooses a suitably recent event. The optional ``opts`` specifies
|
||||
options for creating the repo. If not given, Koji uses the default options based on the tag.
|
||||
|
||||
When the hub responds to this call, it first checks to see if an existing repo satisfies the
|
||||
request. If so, then information for that repo is returned and no further action is taken.
|
||||
If there is no such repo yet, then Koji records the request and returns the request data.
|
||||
If an identical active request already exists, then Koji will return that.
|
||||
|
||||
|
||||
Build parameters
|
||||
----------------
|
||||
|
||||
For some types of builds, the user can affect the parameters of the repo request.
|
||||
|
||||
For rpms builds, the ``--wait-repo`` option will cause the build to request a *current* repo.
|
||||
That is, the ``min_event`` for the request will be the most recent event that affected the tag.
|
||||
For example, if a previous build has just been tagged into the buildroot, then this option will
|
||||
ensure that the new build gets a repo containing the previous one.
|
||||
|
||||
It's worth noting that rpm builds also accept ``--wait-build`` option(s) that will cause the build
|
||||
to wait for specific NVRs to be present in the repo. This option is not actually handled by the
|
||||
request mechanism. Instead, the build will wait for these NVRs to be tagged and then request a
|
||||
current repo.
|
||||
|
||||
|
||||
Repository Options
|
||||
------------------
|
||||
|
||||
There are a few options that govern how the repo is generated. At present these are:
|
||||
|
||||
src
|
||||
whether to include srpms in the repos
|
||||
|
||||
debuginfo
|
||||
whether to include debuginfo rpms
|
||||
|
||||
separate_src
|
||||
whether to create a separate src repo
|
||||
|
||||
maven
|
||||
whether to also create a maven repo
|
||||
|
||||
These options are normally determined by the tag that the repo is based on.
|
||||
Administrators can set ``repo.opts`` for a given tag to control these options.
|
||||
|
||||
Additionally the following pattern based hub options can be used:
|
||||
|
||||
SourceTags
|
||||
Tags matching these glob patterns will have the src option set
|
||||
|
||||
DebuginfoTags
|
||||
Tags matching these glob patterns will have the debuginfo option set
|
||||
|
||||
SeparateSourceTags
|
||||
Tags matching these glob patterns will have the separate_src option set
|
||||
|
||||
For historical reasons, the ``maven`` option can also controlled by setting the ``maven_support``
|
||||
field for the tag. E.g. ``koji edit-tag --maven-support MYTAG``
|
||||
|
||||
Note that the ``maven`` option is ignored if Maven support is disabled on the hub.
|
||||
|
||||
Manually requested repos can specify their own custom options.
|
||||
|
||||
|
||||
Automatic generation
|
||||
--------------------
|
||||
|
||||
Automatic generation can be configured setting ``repo.auto=True`` for a given tag.
|
||||
This requires administrative access.
|
||||
The system regularly requests repos for such tags.
|
||||
|
||||
|
||||
From Requests to Repos
|
||||
----------------------
|
||||
|
||||
All repo requests go into a queue that Koji regularly checks.
|
||||
As long as there is sufficient capacity, Koji will create ``newRepo`` tasks for these
|
||||
requests.
|
||||
|
||||
The status of a request can be checked with the ``repo.checkRequest`` api call
|
||||
|
||||
::
|
||||
|
||||
repo.checkRequest(req_id)
|
||||
description: Report status of repo request
|
||||
|
||||
:param int req_id the request id
|
||||
:return: status dictionary
|
||||
|
||||
The return dictionary will include 'request' and 'repo' fields
|
||||
|
||||
If the return includes a non-None ``repo`` field, then that repo satisfies the request.
|
||||
The ``request`` field will include ``task_id`` and ``task_state`` (may be None) to indicate
|
||||
progress.
|
||||
|
||||
|
||||
|
||||
Repository Data
|
||||
---------------
|
||||
|
||||
The hub stores key data about each repo in the database and this can be reported numerous ways.
|
||||
One common way is the ``repoInfo`` call, which returns data about a single repository. E.g.
|
||||
|
||||
::
|
||||
|
||||
$ koji call repoInfo 2398
|
||||
{'begin_event': 497152,
|
||||
'begin_ts': 1707888890.306149,
|
||||
'create_event': 497378,
|
||||
'create_ts': 1710216388.543129,
|
||||
'creation_time': '2024-03-12 00:06:28.541893-04:00',
|
||||
'creation_ts': 1710216388.541893,
|
||||
'custom_opts': None,
|
||||
'dist': False,
|
||||
'end_event': None,
|
||||
'end_ts': None,
|
||||
'id': 2398,
|
||||
'opts': {'debuginfo': False, 'separate_src': False, 'src': False},
|
||||
'state': 3,
|
||||
'state_time': '2024-03-17 17:03:49.820435-04:00',
|
||||
'state_ts': 1710709429.820435,
|
||||
'tag_id': 2,
|
||||
'tag_name': 'f24-build',
|
||||
'task_id': 13611,
|
||||
'task_state': 2}
|
||||
|
||||
Key fields
|
||||
|
||||
.. glossary::
|
||||
id
|
||||
The integer id of the repo itself
|
||||
|
||||
tag_id
|
||||
The integer id of the tag the repo was created from
|
||||
|
||||
tag_name
|
||||
The name of the tag the repo was created from
|
||||
|
||||
state
|
||||
The (integer) state of the repo. Corresponds to ``koji.REPO_STATES`` values
|
||||
|
||||
create_event
|
||||
The event id (moment in koji history) that the repo was created from. I.e. the contents
|
||||
of the repo come from the contents of the tag at this event.
|
||||
|
||||
create_ts
|
||||
This is the timestamp for the create_event.
|
||||
|
||||
creation_ts / creation_time
|
||||
This is the time that the repo was created, which may be quite different than the time
|
||||
of the repo's create_event. The ``creation_ts`` field is the numeric value and
|
||||
``creation_time`` is a string representation of that.
|
||||
|
||||
state_ts / state_time
|
||||
This is the time that the repo last changed state.
|
||||
|
||||
begin_event / end_event
|
||||
These events define the *range of validity* for the repo. Individual events do not
|
||||
necessarily affect a given tag, so for each repo there is actually a range of events
|
||||
where it accurately represents the tag contents.
|
||||
The ``begin_event`` is the first event in the range. This will often be the same as
|
||||
the create_event, but might not be.
|
||||
The ``end_event`` is the first event after creation that changes the tag. This is
|
||||
often None when a repo is created. Koji will update this field as tags change.
|
||||
|
||||
begin_ts / end_ts
|
||||
These are the numeric timestamps for the begin and end events.
|
||||
|
||||
opts
|
||||
This is dictionary of repo creation options
|
||||
|
||||
custom_opts
|
||||
This dictionary indicates which options were overridden by the request
|
||||
|
||||
task_id
|
||||
The numeric id of the task that created the repo
|
||||
|
||||
dist
|
||||
A boolean flag. True for dist repos.
|
||||
|
||||
|
||||
Repository Lifecycle
|
||||
--------------------
|
||||
|
||||
Generally, the lifecycle looks like:
|
||||
|
||||
::
|
||||
|
||||
INIT -> READY -> EXPIRED -> DELETED
|
||||
|
||||
Repositories begin in the ``INIT`` state when the ``newRepo`` task first initializes them.
|
||||
Repos in this state are incomplete and not ready to be used.
|
||||
|
||||
When Koji finishes creating a repo, it is moved to the ``READY`` state. Such repos are ready
|
||||
to be used. Their contents will remain unchanged until they are deleted.
|
||||
Note that this state does not mean the repo is current for its tag.
|
||||
|
||||
When a repo is no longer relevant, Koji will move it to the ``EXPIRED`` state. This means the
|
||||
repo is marked for deletion and should no longer be used.
|
||||
|
||||
Once a repo has been expired for a waiting period, Koji will move it to the ``DELETED`` state
|
||||
and remove its files from disc. The database entry will remain
|
||||
|
||||
In cases of unusual errors, a repo might be moved to the ``PROBLEM`` state. Such repos should
|
||||
not be used and will eventually be deleted.
|
||||
|
||||
|
||||
Hub Configuration
|
||||
-----------------
|
||||
|
||||
There are several hub configuration option governing repo generation behavior:
|
||||
|
||||
MaxRepoTasks
|
||||
The maximum number of ``newRepo`` tasks to run at one time. Default: ``10``
|
||||
|
||||
MaxRepoTasksMaven
|
||||
The maximum number of ``newRepo`` tasks for maven tags to run a one time. Default: ``2``
|
||||
|
||||
RepoRetries
|
||||
The number of times to retry a failed ``newRepo`` task per request. Default: ``3``
|
||||
|
||||
RequestCleanTime
|
||||
The number of minutes to wait before clearing an inactive repo request. Default: ``1440``
|
||||
|
||||
AllowNewRepo
|
||||
Whether to allow the legacy ``newRepo`` call. Default: ``True``
|
||||
|
||||
RepoLag
|
||||
This affects the default ``min_event`` value for normal repo requests.
|
||||
An event roughly this many seconds in the past is used. Default: ``3600``
|
||||
|
||||
RepoAutoLag
|
||||
Same as RepoLag, but for automatic requests. Default: ``7200``
|
||||
|
||||
RepoLagWindow
|
||||
This affects the granularity of the ``RepoLag`` and ``RepoAutoLag`` settings. Default: ``600``
|
||||
|
||||
RepoQueueUser
|
||||
The user that should own the ``newRepo`` tasks generated by repo requests. Default: ``kojira``
|
||||
|
||||
SourceTags
|
||||
Tags matching these glob patterns will have the src option set. Default: ``''``
|
||||
|
||||
DebuginfoTags
|
||||
Tags matching these glob patterns will have the debuginfo option set. Default: ``''``
|
||||
|
||||
SeparateSourceTags
|
||||
Tags matching these glob patterns will have the separate_src option set Default: ``''``
|
||||
|
||||
|
||||
Repository Layout
|
||||
-----------------
|
||||
|
||||
Koji's repositories live under ``/mnt/koji/repos``. From there, they are indexed by tag name and repo id.
|
||||
So, the full path to a given repository would look something like
|
||||
|
||||
::
|
||||
|
||||
/mnt/koji/repos/f40-build/6178041/
|
||||
|
||||
This directory will contain:
|
||||
|
||||
* ``repo.json`` -- data about the repo itself
|
||||
* ``groups`` -- a directory containing comps data
|
||||
* ``<ARCH>`` -- a directory for each tag arch containing a yum repo
|
||||
|
||||
The full path to an actual yum repo would be something like:
|
||||
|
||||
::
|
||||
|
||||
/mnt/koji/repos/f40-build/6178041/x86_64
|
||||
|
||||
This directory will contain:
|
||||
|
||||
* ``pkglist`` -- file listing the relative paths to the rpms for the repo
|
||||
* ``blocklist`` -- file listing the blocked package names for the tag
|
||||
* ``rpmlist.jsonl`` -- json data for the rpms in the repo
|
||||
* ``toplink`` -- a relative symlink to the top of Koji's directory tree (i.e. up to /mnt/koji)
|
||||
* ``repodata`` -- yum repo data
|
||||
|
||||
By default, source rpms are omitted. This can be controlled by repository options.
|
||||
If the ``src`` option is True, then source rpms will be added to each arch repo separately,
|
||||
similar to noarch rpms.
|
||||
If the ``separate_src`` option is True, then a separate ``src`` repo is created.
|
||||
|
||||
|
||||
Dist Repos
|
||||
----------
|
||||
|
||||
Dist repos are managed by a separate process.
|
||||
See :doc:`exporting_repositories` for more details.
|
||||
|
||||
|
||||
Older Koji Versions
|
||||
-------------------
|
||||
|
||||
Prior to Koji 1.35, the triggering of repo generation was quite different.
|
||||
The kojira service monitored all build tags and trigger ``newRepo`` tasks
|
||||
whenever the tag content changed. The work queue was managed in kojira.
|
||||
For large systems, this could lead to significant regeneration backlogs.
|
||||
145
koji/tasks.py
145
koji/tasks.py
|
|
@ -154,7 +154,8 @@ LEGACY_SIGNATURES = {
|
|||
[['srcs', 'target', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'waitrepo': [
|
||||
[['tag', 'newer_than', 'nvrs'], None, None, (None, None)],
|
||||
[['tag', 'newer_than', 'nvrs', 'min_event'], None, None, (None, None, None)],
|
||||
# [['tag', 'newer_than', 'nvrs'], None, None, (None, None)],
|
||||
],
|
||||
'createLiveMedia': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',
|
||||
|
|
@ -217,6 +218,8 @@ LEGACY_SIGNATURES = {
|
|||
'newRepo': [
|
||||
[['tag', 'event', 'src', 'debuginfo', 'separate_src'],
|
||||
None, None, (None, False, False, False)],
|
||||
[['tag', 'event', 'src', 'debuginfo', 'separate_src', 'opts'],
|
||||
None, None, (None, None, None, None, None)],
|
||||
],
|
||||
'createImage': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info',
|
||||
|
|
@ -581,35 +584,34 @@ class BaseTaskHandler(object):
|
|||
|
||||
def getRepo(self, tag, builds=None, wait=False):
|
||||
"""
|
||||
Get the active repo for the given tag. If there is no repo available,
|
||||
wait for a repo to be created.
|
||||
Get a repo that satisfies the given conditions. If there is no matching
|
||||
repo available, wait for one (via a waitrepo subtask).
|
||||
|
||||
if wait is True - always wait for new repo
|
||||
if builds are present, wait until repo doesn't contain these
|
||||
:param int|str tag: the tag for the requested repo
|
||||
:param list builds: require that the repo contain these builds
|
||||
:param bool wait: (misnamed) get a repo that is current as of our start time
|
||||
"""
|
||||
if wait:
|
||||
create_ts = time.time()
|
||||
else:
|
||||
create_ts = None
|
||||
repo_info = self.session.getRepo(tag)
|
||||
taginfo = self.session.getTag(tag, strict=True)
|
||||
if not repo_info:
|
||||
# make sure there is a target
|
||||
targets = self.session.getBuildTargets(buildTagID=taginfo['id'])
|
||||
if not targets:
|
||||
raise koji.BuildError('no repo (and no target) for tag %s' % taginfo['name'])
|
||||
wait = True
|
||||
elif builds:
|
||||
build_infos = [koji.parse_NVR(build) for build in builds]
|
||||
if not koji.util.checkForBuilds(self.session, taginfo['id'],
|
||||
build_infos, repo_info['create_event']):
|
||||
wait = True
|
||||
|
||||
if wait:
|
||||
task_id = self.session.host.subtask(method='waitrepo',
|
||||
arglist=[tag, create_ts, builds],
|
||||
parent=self.id)
|
||||
repo_info = self.wait(task_id)[task_id]
|
||||
# This option is now misnamed. Previously we would always wait to ensure a
|
||||
# current repo, but we have better options now
|
||||
min_event = "last"
|
||||
else:
|
||||
min_event = None
|
||||
|
||||
watcher = koji.util.RepoWatcher(self.session, tag, nvrs=builds, min_event=min_event,
|
||||
logger=self.logger)
|
||||
repoinfo = watcher.getRepo()
|
||||
|
||||
# Did we get a repo?
|
||||
if repoinfo:
|
||||
return repoinfo
|
||||
|
||||
# otherwise, we create a subtask to continue waiting for us
|
||||
# this makes the process more visible to the user
|
||||
args = watcher.task_args()
|
||||
task_id = self.session.host.subtask(method='waitrepo', arglist=args, parent=self.id)
|
||||
repo_info = self.wait(task_id)[task_id]
|
||||
return repo_info
|
||||
|
||||
def run_callbacks(self, plugin, *args, **kwargs):
|
||||
|
|
@ -865,75 +867,40 @@ class WaitrepoTask(BaseTaskHandler):
|
|||
# time in minutes before we fail this task
|
||||
TIMEOUT = 120
|
||||
|
||||
def handler(self, tag, newer_than=None, nvrs=None):
|
||||
def handler(self, tag, newer_than=None, nvrs=None, min_event=None):
|
||||
"""Wait for a repo for the tag, subject to given conditions
|
||||
|
||||
newer_than: create_event timestamp should be newer than this
|
||||
nvr: repo should contain this nvr (which may not exist at first)
|
||||
tag: the tag for the repo
|
||||
newer_than: (legacy) create_event timestamp should be newer than this
|
||||
nvrs: repo should contain these nvrs (which may not exist at first)
|
||||
min_event: minimum event for the repo
|
||||
|
||||
Only one of the options may be specified. If neither is, then
|
||||
the call will wait for the first ready repo.
|
||||
The newer_than arg is provided for backward compatibility. The min_event arg is preferred.
|
||||
|
||||
Returns the repo info (from getRepo) of the chosen repo
|
||||
Returns the repo info of the chosen repo
|
||||
"""
|
||||
|
||||
start = time.time()
|
||||
# handle legacy newer_than arg
|
||||
if newer_than is not None:
|
||||
if min_event is not None:
|
||||
raise koji.GenericError('newer_than and min_event args confict')
|
||||
if isinstance(newer_than, six.string_types) and newer_than.lower() == "now":
|
||||
min_event = "last"
|
||||
elif isinstance(newer_than, six.integer_types + (float,)):
|
||||
# here, we look for the first event where the tag changed after this time
|
||||
# or, if the tag has not changed since that time, we use its last change event
|
||||
base = self.session.getLastEvent(before=newer_than)
|
||||
min_event = self.session.tagFirstChangeEvent(tag, after=base) or "last"
|
||||
else:
|
||||
raise koji.GenericError("Invalid value for newer_than: %s" % newer_than)
|
||||
|
||||
taginfo = self.session.getTag(tag, strict=True)
|
||||
targets = self.session.getBuildTargets(buildTagID=taginfo['id'])
|
||||
if not targets:
|
||||
raise koji.GenericError("No build target for tag: %s" % taginfo['name'])
|
||||
watcher = koji.util.RepoWatcher(self.session, tag, nvrs=nvrs, min_event=min_event,
|
||||
logger=self.logger)
|
||||
watcher.PAUSE = self.PAUSE
|
||||
watcher.TIMEOUT = self.TIMEOUT
|
||||
# TODO config?
|
||||
repoinfo = watcher.waitrepo()
|
||||
return repoinfo
|
||||
|
||||
if isinstance(newer_than, six.string_types) and newer_than.lower() == "now":
|
||||
newer_than = start
|
||||
if not isinstance(newer_than, six.integer_types + (type(None), float)):
|
||||
raise koji.GenericError("Invalid value for newer_than: %s" % newer_than)
|
||||
|
||||
if newer_than and nvrs:
|
||||
raise koji.GenericError("only one of (newer_than, nvrs) may be specified")
|
||||
|
||||
if not nvrs:
|
||||
nvrs = []
|
||||
builds = [koji.parse_NVR(nvr) for nvr in nvrs]
|
||||
|
||||
last_repo = None
|
||||
|
||||
while True:
|
||||
try:
|
||||
taginfo = self.session.getTag(tag, strict=True)
|
||||
except koji.GenericError:
|
||||
self.logger.debug("Tag %s got lost while waiting for newrepo", tag)
|
||||
raise koji.GenericError("Unsuccessfully waited %s for %s repo. "
|
||||
"Tag was probably deleted meanwhile." %
|
||||
(koji.util.duration(start), tag))
|
||||
repo = self.session.getRepo(taginfo['id'])
|
||||
if repo and repo != last_repo:
|
||||
if builds:
|
||||
if koji.util.checkForBuilds(
|
||||
self.session, taginfo['id'], builds, repo['create_event']):
|
||||
self.logger.debug("Successfully waited %s for %s to appear "
|
||||
"in the %s repo" %
|
||||
(koji.util.duration(start), koji.util.printList(nvrs),
|
||||
taginfo['name']))
|
||||
return repo
|
||||
elif newer_than:
|
||||
if repo['create_ts'] > newer_than:
|
||||
self.logger.debug("Successfully waited %s for a new %s repo" %
|
||||
(koji.util.duration(start), taginfo['name']))
|
||||
return repo
|
||||
else:
|
||||
# no check requested -- return first ready repo
|
||||
return repo
|
||||
|
||||
if (time.time() - start) > (self.TIMEOUT * 60.0):
|
||||
if builds:
|
||||
raise koji.GenericError("Unsuccessfully waited %s for %s to appear "
|
||||
"in the %s repo" %
|
||||
(koji.util.duration(start), koji.util.printList(nvrs),
|
||||
taginfo['name']))
|
||||
else:
|
||||
raise koji.GenericError("Unsuccessfully waited %s for a new %s repo" %
|
||||
(koji.util.duration(start), taginfo['name']))
|
||||
|
||||
time.sleep(self.PAUSE)
|
||||
last_repo = repo
|
||||
# the end
|
||||
|
|
|
|||
240
koji/util.py
240
koji/util.py
|
|
@ -130,6 +130,240 @@ def checkForBuilds(session, tag, builds, event, latest=False):
|
|||
return True
|
||||
|
||||
|
||||
class RepoWatcher(object):
|
||||
|
||||
# timing defaults
|
||||
PAUSE = 6
|
||||
TIMEOUT = 120
|
||||
|
||||
def __init__(self, session, tag, nvrs=None, min_event=None, at_event=None, opts=None,
|
||||
logger=None):
|
||||
self.session = session
|
||||
self.taginfo = session.getTag(tag, strict=True)
|
||||
self.start = None
|
||||
if nvrs is None:
|
||||
nvrs = []
|
||||
self.nvrs = nvrs
|
||||
self.builds = [koji.parse_NVR(nvr) for nvr in nvrs]
|
||||
# note that we don't assume the nvrs exist yet
|
||||
self.at_event = at_event
|
||||
if min_event is None:
|
||||
self.min_event = None
|
||||
elif at_event is not None:
|
||||
raise koji.ParameterError('Cannot specify both min_event and at_event')
|
||||
elif min_event == "last":
|
||||
# TODO pass through?
|
||||
self.min_event = session.tagLastChangeEvent(self.taginfo['id'])
|
||||
else:
|
||||
self.min_event = int(min_event)
|
||||
# if opts is None we'll get the default opts
|
||||
self.opts = opts
|
||||
self.logger = logger or logging.getLogger('koji')
|
||||
|
||||
def get_start(self):
|
||||
# we don't want necessarily want to start the clock in init
|
||||
if not self.start:
|
||||
self.start = time.time()
|
||||
return self.start
|
||||
|
||||
def getRepo(self):
|
||||
"""Return repo if available now, without waiting
|
||||
|
||||
Returns repoinfo or None
|
||||
"""
|
||||
|
||||
self.logger.info('Need a repo for %s', self.taginfo['name'])
|
||||
|
||||
# check builds first
|
||||
if self.builds:
|
||||
# there is no point in requesting a repo if the builds aren't even tagged
|
||||
if not koji.util.checkForBuilds(self.session, self.taginfo['id'], self.builds,
|
||||
event=None):
|
||||
self.logger.debug('Builds %s not present in tag %s', self.nvrs,
|
||||
self.taginfo['name'])
|
||||
return None
|
||||
|
||||
check = self.request()
|
||||
repoinfo = check.get('repo')
|
||||
if repoinfo:
|
||||
# "he says they've already got one"
|
||||
self.logger.info('Request yielded repo: %r', check)
|
||||
if self.check_repo(repoinfo):
|
||||
return repoinfo
|
||||
|
||||
# TODO save our request to avoid duplication later
|
||||
# otherwise
|
||||
return None
|
||||
|
||||
def task_args(self):
|
||||
"""Return args for a waitrepo task matching our data"""
|
||||
tag = self.taginfo['name']
|
||||
newer_than = None # this legacy arg doesn't make sense for us
|
||||
if self.at_event:
|
||||
raise koji.GenericError('at_event not supported by waitrepo task')
|
||||
if self.opts:
|
||||
# TODO?
|
||||
raise koji.GenericError('opts not supported by waitrepo task')
|
||||
return [tag, newer_than, self.nvrs, self.min_event]
|
||||
|
||||
def waitrepo(self, anon=False):
|
||||
self.logger.info('Waiting on repo for %s', self.taginfo['name'])
|
||||
self.get_start()
|
||||
min_event = self.min_event
|
||||
self.logger.debug('min_event = %r, nvrs = %r', min_event, self.nvrs)
|
||||
repoinfo = None
|
||||
req = None
|
||||
while True:
|
||||
# wait on existing request if we have one
|
||||
if req:
|
||||
repoinfo = self.wait_request(req)
|
||||
if self.check_repo(repoinfo):
|
||||
break
|
||||
elif self.at_event is not None:
|
||||
# shouldn't happen
|
||||
raise koji.GenericError('Failed at_event request')
|
||||
else:
|
||||
min_event = self.session.tagLastChangeEvent(self.taginfo['id'])
|
||||
# we should have waited for builds before creating the request
|
||||
# this could indicate further tagging/untagging, or a bug
|
||||
self.logger.error('Repo request did not satisfy conditions')
|
||||
else:
|
||||
# check for repo directly
|
||||
# either first pass or anon mode
|
||||
repoinfo = self.session.repo.get(self.taginfo['id'], min_event=min_event,
|
||||
at_event=self.at_event, opts=self.opts)
|
||||
if repoinfo and self.check_repo(repoinfo):
|
||||
break
|
||||
# Otherwise, we'll need a new request
|
||||
if self.builds:
|
||||
# No point in requesting a repo if the builds aren't tagged yet
|
||||
self.wait_builds(self.builds)
|
||||
min_event = self.session.tagLastChangeEvent(self.taginfo['id'])
|
||||
self.logger.debug('Updated min_event to last change: %s', min_event)
|
||||
if not anon:
|
||||
# Request a repo
|
||||
check = self.request(min_event)
|
||||
repoinfo = check.get('repo')
|
||||
if repoinfo:
|
||||
self.logger.debug('Request yielded repo: %r', check)
|
||||
if self.check_repo(repoinfo):
|
||||
break
|
||||
# otherwise we'll loop and try again
|
||||
else:
|
||||
req = check['request']
|
||||
self.logger.info('Got request: %(id)s', req)
|
||||
self.logger.debug('Request data: %s', req)
|
||||
if min_event in ('last', None):
|
||||
min_event = req['min_event']
|
||||
self.logger.info('Updated min_event from hub: %s', min_event)
|
||||
self.pause()
|
||||
|
||||
self.logger.debug('Got repo: %r', repoinfo)
|
||||
return repoinfo
|
||||
|
||||
def request(self, min_event=None):
|
||||
if min_event is None:
|
||||
min_event = self.min_event
|
||||
self.logger.info('Requesting a repo')
|
||||
self.logger.debug('self.session.repo.request(%s, min_event=%s, at_event=%s, opts=%r)',
|
||||
self.taginfo['id'], min_event, self.at_event, self.opts)
|
||||
check = self.session.repo.request(self.taginfo['id'], min_event=min_event,
|
||||
at_event=self.at_event, opts=self.opts)
|
||||
return check
|
||||
|
||||
def wait_request(self, req):
|
||||
watch_fields = ('task_id', 'task_state', 'repo_id', 'active', 'tries')
|
||||
self.get_start()
|
||||
watch_data = dict([(f, req.get(f)) for f in watch_fields])
|
||||
while True:
|
||||
check = self.session.repo.checkRequest(req['id'])
|
||||
self.logger.debug('Request check: %r', check)
|
||||
repo = check.get('repo')
|
||||
if repo:
|
||||
return repo
|
||||
for f in watch_fields:
|
||||
val1 = watch_data[f]
|
||||
val2 = check['request'][f]
|
||||
if val1 != val2:
|
||||
watch_data[f] = val2
|
||||
if f == 'task_state':
|
||||
# convert if we can
|
||||
val1 = koji.TASK_STATES[val1] if val1 is not None else val1
|
||||
val2 = koji.TASK_STATES[val2] if val2 is not None else val2
|
||||
self.logger.info('Request updated: %s: %s -> %s', f, val1, val2)
|
||||
if self.check_timeout():
|
||||
raise koji.GenericError("Unsuccessfully waited %s for a new %s repo" %
|
||||
(koji.util.duration(self.start), self.taginfo['name']))
|
||||
if not check['request']['active']:
|
||||
raise koji.GenericError("Repo request no longer active")
|
||||
self.pause()
|
||||
|
||||
def wait_builds(self, builds):
|
||||
self.get_start()
|
||||
self.logger.info('Waiting for nvrs %s in tag %s', self.nvrs, self.taginfo['name'])
|
||||
while True:
|
||||
if koji.util.checkForBuilds(self.session, self.taginfo['id'], builds, event=None):
|
||||
self.logger.debug('Successfully waited for nvrs %s in tag %s', self.nvrs,
|
||||
self.taginfo['name'])
|
||||
return
|
||||
if self.check_timeout():
|
||||
raise koji.GenericError("Unsuccessfully waited %s for %s to appear in the %s repo"
|
||||
% (koji.util.duration(self.start),
|
||||
koji.util.printList(self.nvrs),
|
||||
self.taginfo['name']))
|
||||
self.logger.debug('Waiting for nvrs %s in tag %s', self.nvrs, self.taginfo['name'])
|
||||
self.pause()
|
||||
|
||||
def check_repo(self, repoinfo):
|
||||
"""See if the repo satifies our conditions"""
|
||||
|
||||
# Correct tag?
|
||||
if repoinfo['tag_id'] != self.taginfo['id']:
|
||||
# should not happen
|
||||
self.logger.error('Got repo for wrong tag, expected %s, got %s',
|
||||
self.taginfo['id'], repoinfo['tag_id'])
|
||||
return False
|
||||
|
||||
# Matching event?
|
||||
if self.at_event is not None:
|
||||
if repoinfo['create_event'] != self.at_event:
|
||||
self.logger.info('Got repo with wrong event. %s != %s',
|
||||
repoinfo['create_event'], self.at_event)
|
||||
return False
|
||||
elif self.min_event is not None:
|
||||
if repoinfo['create_event'] < self.min_event:
|
||||
self.logger.info('Got repo before min event. %s < %s',
|
||||
repoinfo['create_event'], self.min_event)
|
||||
return False
|
||||
|
||||
# Matching opts
|
||||
if self.opts is not None:
|
||||
if repoinfo['opts'] != self.opts:
|
||||
self.logger.info('Got repo with wrong opts. %s != %s',
|
||||
repoinfo['opts'], self.opts)
|
||||
return False
|
||||
|
||||
# Needed builds?
|
||||
if self.builds:
|
||||
if not koji.util.checkForBuilds(self.session, self.taginfo['id'], self.builds,
|
||||
event=repoinfo['create_event']):
|
||||
self.logger.info('Got repo without needed builds')
|
||||
return False
|
||||
|
||||
self.logger.debug('Repo satisfies our conditions')
|
||||
return True
|
||||
|
||||
def pause(self):
|
||||
self.logger.debug('Pausing')
|
||||
time.sleep(self.PAUSE)
|
||||
|
||||
def check_timeout(self):
|
||||
if (time.time() - self.start) > (self.TIMEOUT * 60.0):
|
||||
return True
|
||||
# else
|
||||
return False
|
||||
|
||||
|
||||
def duration(start):
|
||||
"""Return the duration between start and now in MM:SS format"""
|
||||
elapsed = time.time() - start
|
||||
|
|
@ -780,13 +1014,13 @@ def eventFromOpts(session, opts):
|
|||
ts: an event timestamp (int)
|
||||
repo: pull event from given repo
|
||||
"""
|
||||
event_id = getattr(opts, 'event')
|
||||
event_id = getattr(opts, 'event', None)
|
||||
if event_id:
|
||||
return session.getEvent(event_id)
|
||||
ts = getattr(opts, 'ts')
|
||||
ts = getattr(opts, 'ts', None)
|
||||
if ts is not None:
|
||||
return session.getLastEvent(before=ts)
|
||||
repo = getattr(opts, 'repo')
|
||||
repo = getattr(opts, 'repo', None)
|
||||
if repo is not None:
|
||||
rinfo = session.repoInfo(repo, strict=True)
|
||||
return {'id': rinfo['create_event'],
|
||||
|
|
|
|||
147
kojihub/db.py
147
kojihub/db.py
|
|
@ -114,7 +114,13 @@ class CursorWrapper:
|
|||
def quote(a, b):
|
||||
return a % b
|
||||
try:
|
||||
return quote(operation, parameters)
|
||||
sql = quote(operation, parameters)
|
||||
if isinstance(sql, bytes):
|
||||
try:
|
||||
sql = koji.util.decode_bytes(sql)
|
||||
except Exception:
|
||||
pass
|
||||
return sql
|
||||
except Exception:
|
||||
self.logger.exception(
|
||||
'Unable to quote query:\n%s\nParameters: %s', operation, parameters)
|
||||
|
|
@ -902,31 +908,47 @@ class QueryView:
|
|||
return self.get_query()
|
||||
|
||||
def get_query(self):
|
||||
self.extra_joins = []
|
||||
self.values = {}
|
||||
self.order_map = {}
|
||||
self._implicit_joins = []
|
||||
self._values = {}
|
||||
self._order_map = {}
|
||||
|
||||
self.check_opts()
|
||||
|
||||
tables = list(self.tables) # copy
|
||||
fields = self.get_fields(self.fields)
|
||||
columns, aliases = zip(*fields.items())
|
||||
clauses = self.get_clauses()
|
||||
# get_fields needs to be after clauses because it might consider other implicit joins
|
||||
fields = self.get_fields(self.fields)
|
||||
aliases, columns = zip(*fields.items())
|
||||
joins = self.get_joins()
|
||||
self._query = QueryProcessor(
|
||||
columns=columns, aliases=aliases,
|
||||
tables=tables, joins=joins,
|
||||
clauses=clauses, values=self.values,
|
||||
opts=self.opts, order_map=self.order_map)
|
||||
clauses=clauses, values=self._values,
|
||||
opts=self.opts, order_map=self._order_map)
|
||||
|
||||
return self._query
|
||||
|
||||
def get_fields(self, fields):
|
||||
fields = fields or self.default_fields
|
||||
if not fields or fields == '*':
|
||||
fields = sorted(self.fieldmap.keys())
|
||||
fields = fields or self.default_fields or ['*']
|
||||
if isinstance(fields, str):
|
||||
fields = [fields]
|
||||
|
||||
return {self.map_field(f): f for f in fields}
|
||||
# handle special field names
|
||||
flist = []
|
||||
for field in fields:
|
||||
if field == '*':
|
||||
# all fields that don't require additional joins
|
||||
for f in self.fieldmap:
|
||||
joinkey = self.fieldmap[f][1]
|
||||
if joinkey is None or joinkey in self._implicit_joins:
|
||||
flist.append(f)
|
||||
elif field == '**':
|
||||
# all fields
|
||||
flist.extend(self.fieldmap)
|
||||
else:
|
||||
flist.append(field)
|
||||
|
||||
return {f: self.map_field(f) for f in set(flist)}
|
||||
|
||||
def check_opts(self):
|
||||
# some options may trigger joins
|
||||
|
|
@ -936,7 +958,7 @@ class QueryView:
|
|||
for key in self.opts['order'].split(','):
|
||||
if key.startswith('-'):
|
||||
key = key[1:]
|
||||
self.order_map[key] = self.map_field(key)
|
||||
self._order_map[key] = self.map_field(key)
|
||||
if 'group' in self.opts:
|
||||
for key in self.opts['group'].split(','):
|
||||
self.map_field(key)
|
||||
|
|
@ -948,7 +970,7 @@ class QueryView:
|
|||
fullname, joinkey = f_info
|
||||
fullname = fullname or field
|
||||
if joinkey:
|
||||
self.extra_joins.append(joinkey)
|
||||
self._implicit_joins.append(joinkey)
|
||||
# duplicates removed later
|
||||
return fullname
|
||||
|
||||
|
|
@ -968,13 +990,13 @@ class QueryView:
|
|||
elif len(clause) == 3:
|
||||
field, op, value = clause
|
||||
op = op.upper()
|
||||
if op not in ('IN', '=', '!=', '>', '<', '>=', '<='):
|
||||
if op not in ('IN', '=', '!=', '>', '<', '>=', '<=', 'IS', 'IS NOT', '@>', '<@'):
|
||||
raise koji.ParameterError(f'Invalid operator: {op}')
|
||||
else:
|
||||
raise koji.ParameterError(f'Invalid clause: {clause}')
|
||||
fullname = self.map_field(field)
|
||||
key = f'v_{field}_{n}'
|
||||
self.values[key] = value
|
||||
self._values[key] = value
|
||||
result.append(f'{fullname} {op} %({key})s')
|
||||
|
||||
return result
|
||||
|
|
@ -982,8 +1004,8 @@ class QueryView:
|
|||
def get_joins(self):
|
||||
joins = list(self.joins)
|
||||
seen = set()
|
||||
# note we preserve the order that extra joins were added
|
||||
for joinkey in self.extra_joins:
|
||||
# note we preserve the order that implicit joins were added
|
||||
for joinkey in self._implicit_joins:
|
||||
if joinkey in seen:
|
||||
continue
|
||||
seen.add(joinkey)
|
||||
|
|
@ -1128,3 +1150,92 @@ def _applyQueryOpts(results, queryOpts):
|
|||
return len(results)
|
||||
else:
|
||||
return results
|
||||
|
||||
|
||||
class BulkUpdateProcessor(object):
|
||||
"""Build a bulk update statement using a from clause
|
||||
|
||||
table - the table to insert into
|
||||
data - list of dictionaries of update data (keys = row names)
|
||||
match_keys - the fields that are used to match
|
||||
|
||||
The row data is provided as a list of dictionaries. Each entry
|
||||
must contain the same keys.
|
||||
|
||||
The match_keys value indicate which keys are used to select the
|
||||
rows to update. The remaining keys are the actual updates.
|
||||
I.e. if you have data = [{'a':1, 'b':2}] with match_keys=['a'],
|
||||
this will set b=2 for rows where a=1
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, table, data=None, match_keys=None):
|
||||
self.table = table
|
||||
self.data = data or []
|
||||
if match_keys is None:
|
||||
self.match_keys = []
|
||||
else:
|
||||
self.match_keys = list(match_keys)
|
||||
self._values = {}
|
||||
|
||||
def __str__(self):
|
||||
return self.get_sql()
|
||||
|
||||
def get_sql(self):
|
||||
if not self.data or not self.match_keys:
|
||||
return "-- incomplete bulk update"
|
||||
set_keys, all_keys = self.get_keys()
|
||||
match_keys = list(self.match_keys)
|
||||
match_keys.sort()
|
||||
|
||||
utable = f'__kojibulk_{self.table}'
|
||||
utable.replace('.', '_') # in case schema qualified
|
||||
assigns = [f'{key} = {utable}.{key}' for key in set_keys]
|
||||
values = {} # values for lookup
|
||||
fdata = [] # data for VALUES clause
|
||||
for n, row in enumerate(self.data):
|
||||
# each row is a dictionary with all keys
|
||||
parts = []
|
||||
for key in all_keys:
|
||||
v_key = f'val_{key}_{n}'
|
||||
values[v_key] = row[key]
|
||||
parts.append(f'%({v_key})s')
|
||||
fdata.append('(%s)' % ', '.join(parts))
|
||||
|
||||
clauses = [f'{self.table}.{key} = {utable}.{key}' for key in match_keys]
|
||||
|
||||
parts = [
|
||||
'UPDATE %s SET %s\n' % (self.table, ', '.join(assigns)),
|
||||
'FROM (VALUES %s)\nAS %s (%s)\n' % (
|
||||
', '.join(fdata), utable, ', '.join(all_keys)),
|
||||
'WHERE (%s)' % ' AND '.join(clauses),
|
||||
]
|
||||
self._values = values
|
||||
return ''.join(parts)
|
||||
|
||||
def get_keys(self):
|
||||
if not self.data:
|
||||
raise ValueError('no update data')
|
||||
all_keys = list(self.data[0].keys())
|
||||
for key in all_keys:
|
||||
if not isinstance(key, str):
|
||||
raise TypeError('update data must use string keys')
|
||||
all_keys.sort()
|
||||
set_keys = [k for k in all_keys if k not in self.match_keys]
|
||||
set_keys.sort()
|
||||
# also check that data is sane
|
||||
required = set(all_keys)
|
||||
for row in self.data:
|
||||
if set(row.keys()) != required:
|
||||
raise ValueError('mismatched update keys')
|
||||
return set_keys, all_keys
|
||||
|
||||
def __repr__(self):
|
||||
return "<BulkUpdateProcessor: %r>" % vars(self)
|
||||
|
||||
def execute(self):
|
||||
sql = self.get_sql() # sets self._values
|
||||
return _dml(sql, self._values)
|
||||
|
||||
|
||||
# the end
|
||||
|
|
|
|||
|
|
@ -76,12 +76,14 @@ from koji.util import (
|
|||
safer_move,
|
||||
)
|
||||
from . import scheduler
|
||||
from . import repos
|
||||
from .auth import get_user_perms, get_user_groups
|
||||
from .db import ( # noqa: F401
|
||||
BulkInsertProcessor,
|
||||
DeleteProcessor,
|
||||
InsertProcessor,
|
||||
QueryProcessor,
|
||||
QueryView,
|
||||
Savepoint,
|
||||
UpdateProcessor,
|
||||
UpsertProcessor,
|
||||
|
|
@ -646,9 +648,9 @@ def make_task(method, arglist, **opts):
|
|||
opts.setdefault('priority', koji.PRIO_DEFAULT)
|
||||
# calling function should enforce priority limitations, if applicable
|
||||
opts.setdefault('arch', 'noarch')
|
||||
if not context.session.logged_in:
|
||||
raise koji.GenericError('task must have an owner')
|
||||
else:
|
||||
if 'owner' not in opts:
|
||||
if not context.session.logged_in:
|
||||
raise koji.GenericError('task must have an owner')
|
||||
opts['owner'] = context.session.user_id
|
||||
opts['label'] = None
|
||||
opts['parent'] = None
|
||||
|
|
@ -2678,22 +2680,29 @@ def maven_tag_archives(tag_id, event_id=None, inherit=True):
|
|||
return _iter_archives()
|
||||
|
||||
|
||||
def repo_init(tag, task_id=None, with_src=False, with_debuginfo=False, event=None,
|
||||
with_separate_src=False):
|
||||
"""Create a new repo entry in the INIT state, return full repo data
|
||||
def repo_init(tag, task_id=None, event=None, opts=None):
|
||||
"""Create a new repo entry in the INIT state
|
||||
|
||||
:param int|None task_id: (optional) the task that is creating the repo
|
||||
:param int|None event: (optional) specify the event to create the repo from
|
||||
:param dict|None opts: (optional) repo options (None for default settings)
|
||||
|
||||
Returns a dictionary containing
|
||||
repo_id, event_id
|
||||
"""
|
||||
task_id = convert_value(task_id, cast=int, none_allowed=True)
|
||||
event = convert_value(event, cast=int, none_allowed=True)
|
||||
state = koji.REPO_INIT
|
||||
tinfo = get_tag(tag, strict=True, event=event)
|
||||
koji.plugin.run_callbacks('preRepoInit', tag=tinfo, with_src=with_src,
|
||||
with_debuginfo=with_debuginfo, event=event, repo_id=None,
|
||||
with_separate_src=with_separate_src, task_id=task_id)
|
||||
opts, custom = repos.get_repo_opts(tinfo, override=opts)
|
||||
|
||||
# TODO: do we need to provide old callback opt params for compatibility?
|
||||
koji.plugin.run_callbacks('preRepoInit', tag=tinfo, event=event, repo_id=None, task_id=task_id,
|
||||
opts=opts)
|
||||
|
||||
tag_id = tinfo['id']
|
||||
repo_arches = {}
|
||||
if with_separate_src:
|
||||
if opts['separate_src']:
|
||||
repo_arches['src'] = 1
|
||||
if tinfo['arches']:
|
||||
for arch in tinfo['arches'].split():
|
||||
|
|
@ -2710,9 +2719,22 @@ def repo_init(tag, task_id=None, with_src=False, with_debuginfo=False, event=Non
|
|||
clauses=['id=%(event)s'], values={'event': event})
|
||||
query.singleValue(strict=True)
|
||||
event_id = event
|
||||
insert = InsertProcessor('repo')
|
||||
insert.set(id=repo_id, create_event=event_id, tag_id=tag_id, state=state, task_id=task_id)
|
||||
|
||||
# do the insert
|
||||
data = {
|
||||
'id': repo_id,
|
||||
'create_event': event_id,
|
||||
'begin_event': tag_last_change_event(tag_id, before=event_id) or event_id,
|
||||
'end_event': tag_first_change_event(tag_id, after=event_id), # None if unchanged
|
||||
'tag_id': tag_id,
|
||||
'state': state,
|
||||
'task_id': task_id,
|
||||
'opts': json.dumps(opts),
|
||||
'custom_opts': json.dumps(custom),
|
||||
}
|
||||
insert = InsertProcessor('repo', data=data)
|
||||
insert.execute()
|
||||
|
||||
# Need to pass event_id because even though this is a single transaction,
|
||||
# it is possible to see the results of other committed transactions
|
||||
latest = not tinfo['extra'].get('repo_include_all', False)
|
||||
|
|
@ -2741,9 +2763,12 @@ def repo_init(tag, task_id=None, with_src=False, with_debuginfo=False, event=Non
|
|||
'tag_id': tinfo['id'],
|
||||
'task_id': task_id,
|
||||
'event_id': event_id,
|
||||
'with_src': bool(with_src),
|
||||
'with_separate_src': bool(with_separate_src),
|
||||
'with_debuginfo': bool(with_debuginfo),
|
||||
'opts': opts,
|
||||
'custom_opts': custom,
|
||||
# also include these for compat:
|
||||
'with_src': opts['src'],
|
||||
'with_separate_src': opts['separate_src'],
|
||||
'with_debuginfo': opts['debuginfo'],
|
||||
}
|
||||
with open('%s/repo.json' % repodir, 'wt', encoding='utf-8') as fp:
|
||||
json.dump(repo_info, fp, indent=2)
|
||||
|
|
@ -2768,19 +2793,19 @@ def repo_init(tag, task_id=None, with_src=False, with_debuginfo=False, event=Non
|
|||
rpmlist[repoarch] = open(joinpath(archdir, 'rpmlist.jsonl'), 'wt', encoding='utf-8')
|
||||
# NOTE - rpms is a generator
|
||||
for rpminfo in rpms:
|
||||
if not with_debuginfo and koji.is_debuginfo(rpminfo['name']):
|
||||
if not opts['debuginfo'] and koji.is_debuginfo(rpminfo['name']):
|
||||
continue
|
||||
relpath = "%s/%s\n" % (builddirs[rpminfo['build_id']], relpathinfo.rpm(rpminfo))
|
||||
rpm_json = json.dumps(rpminfo, indent=None)
|
||||
# must be one line for nl-delimited json
|
||||
arch = rpminfo['arch']
|
||||
if arch == 'src':
|
||||
if with_src:
|
||||
if opts['src']:
|
||||
for repoarch in repo_arches:
|
||||
pkglist[repoarch].write(relpath)
|
||||
rpmlist[repoarch].write(rpm_json)
|
||||
rpmlist[repoarch].write('\n')
|
||||
if with_separate_src:
|
||||
if opts['separate_src']:
|
||||
pkglist[arch].write(relpath)
|
||||
rpmlist[arch].write(rpm_json)
|
||||
rpmlist[arch].write('\n')
|
||||
|
|
@ -2811,7 +2836,10 @@ def repo_init(tag, task_id=None, with_src=False, with_debuginfo=False, event=Non
|
|||
blocklist.write('\n')
|
||||
blocklist.close()
|
||||
|
||||
if context.opts.get('EnableMaven') and tinfo['maven_support']:
|
||||
if opts['maven']:
|
||||
if not context.opts.get('EnableMaven'):
|
||||
# either option override or recently disabled
|
||||
raise koji.GenericError("Maven support not enabled")
|
||||
artifact_dirs = {}
|
||||
dir_links = set()
|
||||
for archive in maven_tag_archives(tinfo['id'], event_id):
|
||||
|
|
@ -2844,9 +2872,9 @@ def repo_init(tag, task_id=None, with_src=False, with_debuginfo=False, event=Non
|
|||
for artifact_dir, artifacts in artifact_dirs.items():
|
||||
_write_maven_repo_metadata(artifact_dir, artifacts)
|
||||
|
||||
koji.plugin.run_callbacks('postRepoInit', tag=tinfo, with_src=with_src,
|
||||
with_debuginfo=with_debuginfo, event=event, repo_id=repo_id,
|
||||
with_separate_src=with_separate_src, task_id=task_id)
|
||||
koji.plugin.run_callbacks('postRepoInit', tag=tinfo, event=event, repo_id=repo_id,
|
||||
task_id=task_id, opts=opts)
|
||||
|
||||
return [repo_id, event_id]
|
||||
|
||||
|
||||
|
|
@ -2940,22 +2968,47 @@ def dist_repo_init(tag, keys, task_opts):
|
|||
|
||||
|
||||
def repo_set_state(repo_id, state, check=True):
|
||||
"""Set repo state"""
|
||||
"""Set repo state
|
||||
|
||||
:param int repo_id: repo id to change
|
||||
:param int state: the repo state to change to (from koji.REPO_STATES)
|
||||
:param bool check: deprecated
|
||||
"""
|
||||
repo_id = convert_value(repo_id, cast=int)
|
||||
try:
|
||||
name = koji.REPO_STATES[state]
|
||||
except IndexError:
|
||||
raise koji.ParameterError(f'Unknown repo state: {state}')
|
||||
|
||||
# we are always called with check=True
|
||||
if check:
|
||||
# The repo states are sequential, going backwards makes no sense
|
||||
# sanity check the state transition
|
||||
query = QueryProcessor(
|
||||
tables=['repo'], columns=['state'], clauses=['id = %(repo_id)i'],
|
||||
values={'repo_id': repo_id}, opts={'rowlock': True})
|
||||
oldstate = query.singleValue()
|
||||
if oldstate > state:
|
||||
raise koji.GenericError("Invalid repo state transition %s->%s"
|
||||
% (oldstate, state))
|
||||
oldname = koji.REPO_STATES[oldstate]
|
||||
# for the most part states should progress upward
|
||||
if oldstate > state and state != koji.REPO_DELETED:
|
||||
raise koji.GenericError(f'Invalid repo state transition for repo {repo_id}: '
|
||||
f'{oldname} -> {name}')
|
||||
elif oldstate == state:
|
||||
# historically we have allowed this no-op
|
||||
logger.warning(f'Repo {repo_id} is already in state {name}')
|
||||
return
|
||||
elif oldstate == koji.REPO_DELETED:
|
||||
# DELETED is a terminal state
|
||||
raise koji.GenericError(f'Repo {repo_id} is deleted')
|
||||
|
||||
update = UpdateProcessor('repo', clauses=['id=%(repo_id)s'],
|
||||
values={'repo_id': repo_id},
|
||||
data={'state': state})
|
||||
data={'state': state},
|
||||
rawdata={'state_time': 'NOW()'})
|
||||
update.execute()
|
||||
|
||||
if state == koji.REPO_READY:
|
||||
repos.repo_done_hook(repo_id)
|
||||
|
||||
|
||||
def repo_info(repo_id, strict=False):
|
||||
"""Get repo information
|
||||
|
|
@ -2966,22 +3019,9 @@ def repo_info(repo_id, strict=False):
|
|||
:returns: dict (id, state, create_event, creation_time, tag_id, tag_name,
|
||||
dist)
|
||||
"""
|
||||
fields = [
|
||||
('repo.id', 'id'),
|
||||
('repo.state', 'state'),
|
||||
('repo.task_id', 'task_id'),
|
||||
('repo.create_event', 'create_event'),
|
||||
('events.time', 'creation_time'), # for compatibility with getRepo
|
||||
("date_part('epoch', events.time)", 'create_ts'),
|
||||
('repo.tag_id', 'tag_id'),
|
||||
('tag.name', 'tag_name'),
|
||||
('repo.dist', 'dist'),
|
||||
]
|
||||
columns, aliases = zip(*fields)
|
||||
joins = ['tag ON tag_id=tag.id', 'events ON repo.create_event = events.id']
|
||||
query = QueryProcessor(tables=['repo'], columns=columns, aliases=aliases, joins=joins,
|
||||
clauses=['repo.id = %(repo_id)s'], values={'repo_id': repo_id})
|
||||
return query.executeOne(strict=strict)
|
||||
repo_id = convert_value(repo_id, cast=int)
|
||||
clauses = [['id', '=', repo_id]]
|
||||
return repos.RepoQuery(clauses, fields='**').executeOne(strict=strict)
|
||||
|
||||
|
||||
def repo_ready(repo_id):
|
||||
|
|
@ -3019,6 +3059,8 @@ def repo_expire_older(tag_id, event_id, dist=None):
|
|||
|
||||
If dist is not None, then only expire repos with the given dist value
|
||||
"""
|
||||
# this function is no longer used
|
||||
logger.warning("repo_expire_older is deprecated")
|
||||
st_ready = koji.REPO_READY
|
||||
clauses = ['tag_id = %(tag_id)s',
|
||||
'create_event < %(event_id)s',
|
||||
|
|
@ -3036,6 +3078,7 @@ def repo_references(repo_id):
|
|||
fields = [
|
||||
('buildroot_id', 'id'),
|
||||
('host_id', 'host_id'),
|
||||
('task_id', 'task_id'),
|
||||
('create_event', 'create_event'),
|
||||
('state', 'state'),
|
||||
]
|
||||
|
|
@ -3060,23 +3103,10 @@ def get_active_repos():
|
|||
|
||||
This is a list of all the repos that the repo daemon needs to worry about.
|
||||
"""
|
||||
fields = (
|
||||
('repo.id', 'id'),
|
||||
('repo.state', 'state'),
|
||||
('repo.task_id', 'task_id'),
|
||||
('repo.create_event', 'create_event'),
|
||||
("date_part('epoch', events.time)", 'create_ts'),
|
||||
('repo.tag_id', 'tag_id'),
|
||||
('repo.dist', 'dist'),
|
||||
('tag.name', 'tag_name'),
|
||||
)
|
||||
fields, aliases = zip(*fields)
|
||||
values = {'st_deleted': koji.REPO_DELETED}
|
||||
joins = ['tag ON repo.tag_id=tag.id', 'events ON repo.create_event = events.id']
|
||||
clauses = ['repo.state != %(st_deleted)s']
|
||||
query = QueryProcessor(columns=fields, aliases=aliases, tables=['repo'],
|
||||
joins=joins, clauses=clauses, values=values)
|
||||
return query.execute()
|
||||
clauses = [['state', '!=', koji.REPO_DELETED]]
|
||||
fields = ('id', 'tag_id', 'create_event', 'create_ts', 'state', 'dist', 'task_id', 'tag_name',
|
||||
'creation_ts', 'state_ts', 'end_event', 'opts')
|
||||
return repos.RepoQuery(clauses, fields).execute()
|
||||
|
||||
|
||||
def tag_changed_since_event(event, taglist):
|
||||
|
|
@ -3118,6 +3148,237 @@ def tag_changed_since_event(event, taglist):
|
|||
return False
|
||||
|
||||
|
||||
def tag_last_change_event(tag, before=None, inherit=True):
|
||||
"""Report the most recent event that changed the tag, or None
|
||||
|
||||
:param tag: tag to consider
|
||||
:type tag: int or str
|
||||
:param before: only consider events before this value
|
||||
:type before: int, optional
|
||||
:param inherit: follow inheritance
|
||||
:type inherit: bool
|
||||
|
||||
:returns: event id or None
|
||||
:rtype: int or NoneType
|
||||
"""
|
||||
taginfo = get_tag(tag, strict=True, event="auto")
|
||||
tag_id = taginfo['id']
|
||||
before = convert_value(before, int, none_allowed=True)
|
||||
tag_delete = taginfo.get('revoke_event')
|
||||
if before is None and tag_delete:
|
||||
return tag_delete
|
||||
|
||||
# get inheritance at the event
|
||||
tags = [tag_id]
|
||||
if inherit:
|
||||
tags += [link['parent_id'] for link in readFullInheritance(tag_id, event=before)]
|
||||
|
||||
data = {
|
||||
'before': before,
|
||||
'tags': tags,
|
||||
}
|
||||
|
||||
# first check the tag_updates table
|
||||
tag_clause = 'tag_id IN %(tags)s'
|
||||
clauses = [tag_clause]
|
||||
if before is not None:
|
||||
clauses.append('update_event < %(before)s')
|
||||
query = QueryProcessor(tables=['tag_updates'], clauses=clauses,
|
||||
columns=['max(update_event)'], values=data)
|
||||
update_event = query.singleValue()
|
||||
logger.debug('tag_update event %s', update_event)
|
||||
events = [update_event]
|
||||
|
||||
# check for changes in versioned tables
|
||||
tables = (
|
||||
'tag_listing',
|
||||
'tag_inheritance',
|
||||
'tag_config',
|
||||
'tag_packages',
|
||||
'tag_external_repos',
|
||||
'tag_extra',
|
||||
'group_package_listing',
|
||||
'group_req_listing',
|
||||
'group_config',
|
||||
)
|
||||
for table in tables:
|
||||
# create events
|
||||
clauses = [tag_clause]
|
||||
if before is not None:
|
||||
clauses.append('create_event < %(before)s')
|
||||
query = QueryProcessor(tables=[table], columns=['max(create_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s create event %s', table, events[-1])
|
||||
|
||||
# revoke events
|
||||
clauses = [tag_clause]
|
||||
if before is not None:
|
||||
clauses.append('revoke_event < %(before)s')
|
||||
query = QueryProcessor(tables=[table], columns=['max(revoke_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s revoke event %s', table, events[-1])
|
||||
|
||||
# also check external repo changes
|
||||
repos = set()
|
||||
for tag_id in tags:
|
||||
for tag_repo in get_tag_external_repos(tag_info=tag_id, event=before):
|
||||
repos.add(tag_repo['external_repo_id'])
|
||||
if repos:
|
||||
repos = list(repos)
|
||||
repos.sort()
|
||||
repo_clause = 'external_repo_id IN %(repos)s'
|
||||
data['repos'] = repos
|
||||
tables = (
|
||||
'external_repo_config',
|
||||
'external_repo_data',
|
||||
)
|
||||
for table in tables:
|
||||
# create events
|
||||
clauses = [repo_clause]
|
||||
if before is not None:
|
||||
clauses.append('create_event < %(before)s')
|
||||
query = QueryProcessor(tables=[table], columns=['max(create_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s create event %s', table, events[-1])
|
||||
|
||||
# revoke events
|
||||
clauses = [repo_clause]
|
||||
if before is not None:
|
||||
clauses.append('revoke_event < %(before)s')
|
||||
query = QueryProcessor(tables=[table], columns=['max(revoke_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s revoke event %s', table, events[-1])
|
||||
|
||||
# return the most recent event
|
||||
events = [ev for ev in events if ev is not None]
|
||||
if not events:
|
||||
# this could happen if our before value is before the tag existed
|
||||
return None
|
||||
elif tag_delete:
|
||||
return min(tag_delete, max(events))
|
||||
else:
|
||||
return max(events)
|
||||
|
||||
|
||||
def tag_first_change_event(tag, after=None, inherit=True):
|
||||
"""Report the earliest event that changed the tag, or None if unchanged
|
||||
|
||||
:param tag: tag to consider
|
||||
:type tag: int or str
|
||||
:param after: only consider events after this value
|
||||
:type after: int, optional
|
||||
:param inherit: follow inheritance
|
||||
:type inherit: bool
|
||||
|
||||
:returns: event id or None
|
||||
:rtype: int or NoneType
|
||||
"""
|
||||
tag_id = get_tag_id(tag, strict=True)
|
||||
after = convert_value(after, int, none_allowed=True)
|
||||
|
||||
query = QueryProcessor(tables=['tag_config'], columns=['min(create_event)'],
|
||||
clauses=['tag_id = %(tag_id)s'], values={'tag_id': tag_id})
|
||||
tag_create = query.singleValue()
|
||||
|
||||
if after is None:
|
||||
return tag_create
|
||||
|
||||
# get tag list
|
||||
tags = [tag_id]
|
||||
if inherit:
|
||||
tags += [link['parent_id'] for link in readFullInheritance(tag_id, event=after)]
|
||||
|
||||
data = {
|
||||
'after': after,
|
||||
'tags': tags,
|
||||
}
|
||||
|
||||
# first check the tag_updates table
|
||||
tag_clause = 'tag_id IN %(tags)s'
|
||||
clauses = [tag_clause]
|
||||
if after:
|
||||
clauses.append('update_event > %(after)s')
|
||||
query = QueryProcessor(tables=['tag_updates'], clauses=clauses,
|
||||
columns=['min(update_event)'], values=data)
|
||||
update_event = query.singleValue()
|
||||
logger.debug('tag_update event %s', update_event)
|
||||
events = [update_event]
|
||||
|
||||
# check for changes in versioned tables
|
||||
tables = (
|
||||
'tag_listing',
|
||||
'tag_inheritance',
|
||||
'tag_config',
|
||||
'tag_packages',
|
||||
'tag_external_repos',
|
||||
'tag_extra',
|
||||
'group_package_listing',
|
||||
'group_req_listing',
|
||||
'group_config',
|
||||
)
|
||||
for table in tables:
|
||||
clauses = [tag_clause]
|
||||
if after is not None:
|
||||
clauses.append('create_event > %(after)s')
|
||||
query = QueryProcessor(tables=[table], columns=['min(create_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s create event %s', table, events[-1])
|
||||
|
||||
clauses = [tag_clause]
|
||||
if after is not None:
|
||||
clauses.append('revoke_event > %(after)s')
|
||||
query = QueryProcessor(tables=[table], columns=['min(revoke_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s revoke event %s', table, events[-1])
|
||||
|
||||
# also check external repo changes
|
||||
repos = set()
|
||||
for tag_id in tags:
|
||||
for tag_repo in get_tag_external_repos(tag_info=tag_id, event=after):
|
||||
repos.add(tag_repo['external_repo_id'])
|
||||
if repos:
|
||||
repos = list(repos)
|
||||
repos.sort()
|
||||
repo_clause = 'external_repo_id IN %(repos)s'
|
||||
data['repos'] = repos
|
||||
tables = (
|
||||
'external_repo_config',
|
||||
'external_repo_data',
|
||||
)
|
||||
for table in tables:
|
||||
# create events
|
||||
clauses = [repo_clause]
|
||||
if after is not None:
|
||||
clauses.append('create_event > %(after)s')
|
||||
query = QueryProcessor(tables=[table], columns=['min(create_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s create event %s', table, events[-1])
|
||||
|
||||
# revoke events
|
||||
clauses = [repo_clause]
|
||||
if after is not None:
|
||||
clauses.append('revoke_event > %(after)s')
|
||||
query = QueryProcessor(tables=[table], columns=['min(revoke_event)'],
|
||||
clauses=clauses, values=data)
|
||||
events.append(query.singleValue())
|
||||
logger.debug('%s revoke event %s', table, events[-1])
|
||||
|
||||
# return the most recent event
|
||||
events = [ev for ev in events if ev is not None]
|
||||
if not events:
|
||||
# no subsequent changes found
|
||||
return None
|
||||
else:
|
||||
return max(min(events), tag_create)
|
||||
|
||||
|
||||
def set_tag_update(tag_id, utype, event_id=None, user_id=None):
|
||||
"""Record a non-versioned tag update"""
|
||||
utype_id = koji.TAG_UPDATE_TYPES.getnum(utype)
|
||||
|
|
@ -4085,8 +4346,7 @@ def get_tag_external_repos(tag_info=None, repo_info=None, event=None):
|
|||
clauses = [eventCondition(event, table='tag_external_repos'),
|
||||
eventCondition(event, table='external_repo_config')]
|
||||
if tag_info:
|
||||
tag = get_tag(tag_info, strict=True, event=event)
|
||||
tag_id = tag['id']
|
||||
tag_id = get_tag_id(tag_info, strict=True)
|
||||
clauses.append('tag.id = %(tag_id)i')
|
||||
if repo_info:
|
||||
repo = get_external_repo(repo_info, strict=True, event=event)
|
||||
|
|
@ -8305,6 +8565,7 @@ def query_history(tables=None, **kwargs):
|
|||
'tag_extra': ['tag_id', 'key', 'value'],
|
||||
'build_target_config': ['build_target_id', 'build_tag', 'dest_tag'],
|
||||
'external_repo_config': ['external_repo_id', 'url'],
|
||||
'external_repo_data': ['external_repo_id', 'data'],
|
||||
'host_config': ['host_id', 'arches', 'capacity', 'description', 'comment', 'enabled'],
|
||||
'host_channels': ['host_id', 'channel_id'],
|
||||
'tag_external_repos': ['tag_id', 'external_repo_id', 'priority', 'merge_mode'],
|
||||
|
|
@ -10775,25 +11036,32 @@ class RootExports(object):
|
|||
else:
|
||||
return context.opts
|
||||
|
||||
def getEvent(self, id):
|
||||
def getEvent(self, id, strict=True):
|
||||
"""
|
||||
Get information about the event with the given id.
|
||||
|
||||
:param int id: the event id
|
||||
:param bool strict: if True (the default), error on invalid event
|
||||
:returns: dict or None
|
||||
|
||||
A map will be returned with the following keys:
|
||||
- id (integer): id of the event
|
||||
- ts (float): timestamp the event was created, in
|
||||
seconds since the epoch
|
||||
|
||||
If no event with the given id exists, an error will be raised.
|
||||
If the event is not in the database, an error will be raised in the strict
|
||||
case, otherwise the call will return None.
|
||||
"""
|
||||
event_id = convert_value(id, cast=int)
|
||||
strict = convert_value(strict, cast=bool)
|
||||
fields = [
|
||||
('id', 'id'),
|
||||
("date_part('epoch', time)", 'ts')
|
||||
]
|
||||
columns, aliases = zip(*fields)
|
||||
query = QueryProcessor(tables=['events'], columns=columns, aliases=aliases,
|
||||
clauses=['id = %(id)i'], values={'id': id})
|
||||
return query.executeOne(strict=True)
|
||||
clauses=['id = %(id)s'], values={'id': event_id})
|
||||
return query.executeOne(strict=strict)
|
||||
|
||||
def getLastEvent(self, before=None):
|
||||
"""
|
||||
|
|
@ -10838,6 +11106,8 @@ class RootExports(object):
|
|||
can make arbitrary tasks. You need to supply all *args and **opts
|
||||
accordingly to the task."""
|
||||
context.session.assertPerm('admin')
|
||||
if 'owner' in opts:
|
||||
raise koji.ActionNotAllowed('The owner option is not allowed here')
|
||||
return make_task(*args, **opts)
|
||||
|
||||
def uploadFile(self, path, name, size, md5sum, offset, data, volume=None, checksum=None):
|
||||
|
|
@ -13214,45 +13484,7 @@ class RootExports(object):
|
|||
taginfo['extra'][k] = v[1]
|
||||
return taginfo
|
||||
|
||||
def getRepo(self, tag, state=None, event=None, dist=False):
|
||||
"""Get individual repository data based on tag and additional filters.
|
||||
If more repos fits, most recent is returned.
|
||||
|
||||
:param int|str tag: tag ID or name
|
||||
:param int state: value from koji.REPO_STATES
|
||||
:param int event: event ID
|
||||
:param bool dist: True = dist repo, False = regular repo
|
||||
|
||||
:returns: dict with repo data (id, state, create_event, time, dist)
|
||||
"""
|
||||
if isinstance(tag, int):
|
||||
id = tag
|
||||
else:
|
||||
id = get_tag_id(tag, strict=True)
|
||||
|
||||
fields = ['repo.id', 'repo.state', 'repo.task_id', 'repo.create_event', 'events.time',
|
||||
"date_part('epoch', events.time)", 'repo.dist']
|
||||
aliases = ['id', 'state', 'task_id', 'create_event', 'creation_time', 'create_ts', 'dist']
|
||||
joins = ['events ON repo.create_event = events.id']
|
||||
clauses = ['repo.tag_id = %(id)i']
|
||||
if dist:
|
||||
clauses.append('repo.dist is true')
|
||||
else:
|
||||
clauses.append('repo.dist is false')
|
||||
if event:
|
||||
# the repo table doesn't have all the fields of a _config table, just create_event
|
||||
clauses.append('create_event <= %(event)i')
|
||||
else:
|
||||
if state is None:
|
||||
state = koji.REPO_READY
|
||||
clauses.append('repo.state = %(state)s')
|
||||
|
||||
query = QueryProcessor(columns=fields, aliases=aliases,
|
||||
tables=['repo'], joins=joins, clauses=clauses,
|
||||
values=locals(),
|
||||
opts={'order': '-creation_time', 'limit': 1})
|
||||
return query.executeOne()
|
||||
|
||||
getRepo = staticmethod(repos.old_get_repo)
|
||||
repoInfo = staticmethod(repo_info)
|
||||
getActiveRepos = staticmethod(get_active_repos)
|
||||
|
||||
|
|
@ -13281,6 +13513,8 @@ class RootExports(object):
|
|||
|
||||
def newRepo(self, tag, event=None, src=False, debuginfo=False, separate_src=False):
|
||||
"""Create a newRepo task. returns task id"""
|
||||
if not context.opts.get('AllowNewRepo', False):
|
||||
raise koji.ActionNotAllowed('newRepo api call is disabled')
|
||||
if context.session.hasPerm('repo'):
|
||||
pass
|
||||
else:
|
||||
|
|
@ -13318,6 +13552,8 @@ class RootExports(object):
|
|||
repo_problem(repo_id)
|
||||
|
||||
tagChangedSinceEvent = staticmethod(tag_changed_since_event)
|
||||
tagLastChangeEvent = staticmethod(tag_last_change_event)
|
||||
tagFirstChangeEvent = staticmethod(tag_first_change_event)
|
||||
createBuildTarget = staticmethod(create_build_target)
|
||||
editBuildTarget = staticmethod(edit_build_target)
|
||||
deleteBuildTarget = staticmethod(delete_build_target)
|
||||
|
|
@ -14870,6 +15106,8 @@ class HostExports(object):
|
|||
ptask = Task(parent)
|
||||
ptask.assertHost(host.id)
|
||||
opts['parent'] = parent
|
||||
if 'owner' in opts:
|
||||
raise koji.ActionNotAllowed('The owner option is not allowed here')
|
||||
if 'label' in opts:
|
||||
# first check for existing task with this parent/label
|
||||
query = QueryProcessor(tables=['task'], columns=['id'],
|
||||
|
|
@ -15700,34 +15938,30 @@ class HostExports(object):
|
|||
|
||||
return br.updateArchiveList(archives, project)
|
||||
|
||||
def repoInit(self, tag, task_id=None, with_src=False, with_debuginfo=False, event=None,
|
||||
with_separate_src=False):
|
||||
def repoInit(self, tag, task_id=None, event=None, opts=None):
|
||||
"""Initialize a new repo for tag"""
|
||||
host = Host()
|
||||
host.verify()
|
||||
return repo_init(tag, task_id=task_id, with_src=with_src, with_debuginfo=with_debuginfo,
|
||||
event=event, with_separate_src=with_separate_src)
|
||||
return repo_init(tag, task_id=task_id, event=event, opts=opts)
|
||||
|
||||
def repoDone(self, repo_id, data, expire=False, repo_json_updates=None):
|
||||
"""Finalize a repo
|
||||
|
||||
repo_id: the id of the repo
|
||||
data: a dictionary of repo files in the form:
|
||||
{ arch: [uploadpath, [file1, file2, ...]], ...}
|
||||
expire: if set to true, mark the repo expired immediately [*]
|
||||
repo_json_updates: dict - if provided it will be shallow copied
|
||||
into repo.json file
|
||||
:param int repo_id: the id of the repo
|
||||
:param dict data: a dictionary of repo files in the form:
|
||||
:param bool expire: (legacy) if true, mark repo expired
|
||||
:param dict repo_json_updates: updates for repo.json file
|
||||
|
||||
The data parameter should be of the form:
|
||||
{ arch: [uploadpath, [file1, file2, ...]], ...}
|
||||
|
||||
Actions:
|
||||
* Move uploaded repo files into place
|
||||
* Mark repo ready
|
||||
* Expire earlier repos
|
||||
* Move/create 'latest' symlink
|
||||
* Mark repo ready (or expired)
|
||||
* Move/create 'latest' symlink if appropriate
|
||||
|
||||
For dist repos, the move step is skipped (that is handled in
|
||||
distRepoMove).
|
||||
|
||||
* This is used when a repo from an older event is generated
|
||||
"""
|
||||
host = Host()
|
||||
host.verify()
|
||||
|
|
@ -15759,26 +15993,14 @@ class HostExports(object):
|
|||
raise koji.GenericError("uploaded file missing: %s" % src)
|
||||
safer_move(src, dst)
|
||||
if expire:
|
||||
logger.warning("expire option for repoDone call is deprecated")
|
||||
repo_expire(repo_id)
|
||||
koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)
|
||||
return
|
||||
# else:
|
||||
repo_ready(repo_id)
|
||||
repo_expire_older(rinfo['tag_id'], rinfo['create_event'], rinfo['dist'])
|
||||
|
||||
# make a latest link
|
||||
if rinfo['dist']:
|
||||
latestrepolink = koji.pathinfo.distrepo('latest', rinfo['tag_name'])
|
||||
else:
|
||||
latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name'])
|
||||
# XXX - this is a slight abuse of pathinfo
|
||||
try:
|
||||
if os.path.lexists(latestrepolink):
|
||||
os.unlink(latestrepolink)
|
||||
os.symlink(str(repo_id), latestrepolink)
|
||||
except OSError:
|
||||
# making this link is nonessential
|
||||
log_error("Unable to create latest link for repo: %s" % repodir)
|
||||
repo_ready(repo_id)
|
||||
repos.symlink_if_latest(rinfo)
|
||||
|
||||
koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)
|
||||
|
||||
def distRepoMove(self, repo_id, uploadpath, arch):
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ from koji.xmlrpcplus import ExtendedMarshaller, Fault, dumps, getparser
|
|||
from . import auth
|
||||
from . import db
|
||||
from . import scheduler
|
||||
from . import repos
|
||||
|
||||
|
||||
# HTTP headers included in every request
|
||||
|
|
@ -516,6 +517,20 @@ def load_config(environ):
|
|||
['SoftRefusalTimeout', 'integer', 900],
|
||||
['HostTimeout', 'integer', 900],
|
||||
['RunInterval', 'integer', 60],
|
||||
|
||||
# repo options
|
||||
['MaxRepoTasks', 'integer', 10],
|
||||
['MaxRepoTasksMaven', 'integer', 2],
|
||||
['RepoRetries', 'integer', 3],
|
||||
['RequestCleanTime', 'integer', 60 * 24], # in minutes
|
||||
['AllowNewRepo', 'bool', True],
|
||||
['RepoLag', 'int', 3600],
|
||||
['RepoAutoLag', 'int', 7200],
|
||||
['RepoLagWindow', 'int', 600],
|
||||
['RepoQueueUser', 'str', 'kojira'],
|
||||
['DebuginfoTags', 'str', ''],
|
||||
['SourceTags', 'str', ''],
|
||||
['SeparateSourceTags', 'str', ''],
|
||||
]
|
||||
opts = {}
|
||||
for name, dtype, default in cfgmap:
|
||||
|
|
@ -864,9 +879,11 @@ def get_registry(opts, plugins):
|
|||
functions = kojihub.RootExports()
|
||||
hostFunctions = kojihub.HostExports()
|
||||
schedulerFunctions = scheduler.SchedulerExports()
|
||||
repoFunctions = repos.RepoExports()
|
||||
registry.register_instance(functions)
|
||||
registry.register_module(hostFunctions, "host")
|
||||
registry.register_module(schedulerFunctions, "scheduler")
|
||||
registry.register_module(repoFunctions, "repo")
|
||||
registry.register_function(auth.login)
|
||||
registry.register_function(auth.sslLogin)
|
||||
registry.register_function(auth.logout)
|
||||
|
|
|
|||
915
kojihub/repos.py
Normal file
915
kojihub/repos.py
Normal file
|
|
@ -0,0 +1,915 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import time
|
||||
|
||||
import koji
|
||||
from . import kojihub
|
||||
|
||||
from koji.context import context
|
||||
from kojihub.db import (QueryView, UpdateProcessor, BulkUpdateProcessor, InsertProcessor, nextval,
|
||||
Savepoint, QueryProcessor, db_lock, DeleteProcessor)
|
||||
|
||||
|
||||
logger = logging.getLogger('koji.repo')
|
||||
|
||||
|
||||
class RepoQuery(QueryView):
|
||||
|
||||
tables = ['repo']
|
||||
joinmap = {
|
||||
'tag': 'tag ON repo.tag_id = tag.id',
|
||||
'create_ev': 'events AS create_ev ON repo.create_event = create_ev.id',
|
||||
'begin_ev': 'LEFT JOIN events AS begin_ev ON repo.begin_event = begin_ev.id',
|
||||
'end_ev': 'LEFT JOIN events AS end_ev ON repo.end_event = end_ev.id',
|
||||
'task': 'LEFT JOIN task ON repo.task_id = task.id',
|
||||
}
|
||||
fieldmap = {
|
||||
'id': ['repo.id', None],
|
||||
'tag_id': ['repo.tag_id', None],
|
||||
'creation_time': ['repo.creation_time', None],
|
||||
'creation_ts': ["date_part('epoch', repo.creation_time)", None],
|
||||
'state_time': ['repo.state_time', None],
|
||||
'state_ts': ["date_part('epoch', repo.state_time)", None],
|
||||
'create_event': ['repo.create_event', None],
|
||||
'create_ts': ["date_part('epoch', create_ev.time)", 'create_ev'],
|
||||
'begin_event': ['repo.begin_event', None],
|
||||
'begin_ts': ["date_part('epoch', begin_ev.time)", 'begin_ev'],
|
||||
'end_event': ['repo.end_event', None],
|
||||
'end_ts': ["date_part('epoch', end_ev.time)", 'end_ev'],
|
||||
'state': ['repo.state', None],
|
||||
'dist': ['repo.dist', None],
|
||||
'opts': ['repo.opts', None],
|
||||
'custom_opts': ['repo.custom_opts', None],
|
||||
'task_id': ['repo.task_id', None],
|
||||
'task_state': ['task.state', 'task'],
|
||||
'tag_name': ['tag.name', 'tag'],
|
||||
}
|
||||
default_fields = ('id', 'tag_id', 'create_event', 'state', 'dist', 'task_id', 'opts',
|
||||
'custom_opts')
|
||||
# Note that we avoid joins by default
|
||||
|
||||
|
||||
class RepoQueueQuery(QueryView):
|
||||
|
||||
tables = ['repo_queue']
|
||||
joinmap = {
|
||||
'tag': 'tag ON repo_queue.tag_id = tag.id',
|
||||
'task': 'LEFT JOIN task ON repo_queue.task_id = task.id',
|
||||
'users': 'users ON repo_queue.owner = users.id',
|
||||
}
|
||||
fieldmap = {
|
||||
'id': ['repo_queue.id', None],
|
||||
'create_time': ['repo_queue.create_time', None],
|
||||
'create_ts': ["date_part('epoch', repo_queue.create_time)", None],
|
||||
'owner': ['repo_queue.owner', None],
|
||||
'owner_name': ['users.name', 'users'],
|
||||
'priority': ['repo_queue.priority', None],
|
||||
'tag_id': ['repo_queue.tag_id', None],
|
||||
'tag_name': ['tag.name', 'tag'],
|
||||
'at_event': ['repo_queue.at_event', None],
|
||||
'min_event': ['repo_queue.min_event', None],
|
||||
'opts': ['repo_queue.opts', None],
|
||||
'update_time': ['repo_queue.update_time', None],
|
||||
'update_ts': ["date_part('epoch', repo_queue.update_time)", None],
|
||||
'active': ['repo_queue.active', None],
|
||||
'task_id': ['repo_queue.task_id', None],
|
||||
'task_state': ['task.state', 'task'],
|
||||
'tries': ['repo_queue.tries', None],
|
||||
'repo_id': ['repo_queue.repo_id', None],
|
||||
}
|
||||
default_fields = ('id', 'tag_id', 'at_event', 'min_event', 'create_ts',
|
||||
'task_id', 'tries', 'repo_id', 'opts', 'active', 'update_ts')
|
||||
|
||||
|
||||
def check_repo_queue():
|
||||
if not db_lock('repo-queue', wait=False):
|
||||
return
|
||||
|
||||
clauses = [['repo_id', 'IS', None], ['active', 'IS', True]]
|
||||
fields = ('*', 'task_state')
|
||||
waiting = RepoQueueQuery(clauses, fields=fields, opts={'order': 'priority,id'}).execute()
|
||||
logger.debug('Got %i waiting repo requests', len(waiting))
|
||||
|
||||
n_tasks = 0
|
||||
n_maven = 0
|
||||
max_tasks = context.opts['MaxRepoTasks']
|
||||
max_maven = context.opts['MaxRepoTasksMaven']
|
||||
q_updates = {}
|
||||
|
||||
# first pass -- check on tasks
|
||||
for req in waiting:
|
||||
updates = q_updates.setdefault(req['id'], {})
|
||||
|
||||
# check on task, if any
|
||||
if not req['task_id']:
|
||||
continue
|
||||
|
||||
logger.debug('Req with task: %r', req)
|
||||
retry = False
|
||||
if req['task_state'] == koji.TASK_STATES['CLOSED']:
|
||||
# we don't normally expect to hit this because of repo_done_hook
|
||||
logger.warning('Repo task did not trigger repo_done_hook: %(task_id)s', req)
|
||||
repo = get_repo_from_task(req['task_id'])
|
||||
if not repo:
|
||||
logger.error('Repo task did not produce repo: %(task_id)s', req)
|
||||
retry = True
|
||||
else:
|
||||
if valid_repo(req, repo):
|
||||
logger.info('Got valid repo for request: %r', req)
|
||||
# record repo_id and mark inactive
|
||||
updates['repo_id'] = repo['id']
|
||||
updates['active'] = False
|
||||
else:
|
||||
# (valid_repo already logged an error)
|
||||
retry = True
|
||||
elif req['task_state'] in (koji.TASK_STATES['CANCELED'], koji.TASK_STATES['FAILED']):
|
||||
logger.warning('Repo task did not complete: %(task_id)s', req)
|
||||
retry = True
|
||||
else:
|
||||
# task still active
|
||||
n_tasks += 1
|
||||
if req['opts'].get('maven'):
|
||||
n_maven += 1
|
||||
|
||||
if retry:
|
||||
# something went wrong with the task. retry if we can
|
||||
if req['tries'] > context.opts['RepoRetries']:
|
||||
logger.error('Retries exhausted for repo request: %r', req)
|
||||
updates['active'] = False
|
||||
else:
|
||||
# forget task id so it can be rescheduled
|
||||
updates['task_id'] = None
|
||||
req['task_id'] = None
|
||||
# tries is incremented later when we make the task
|
||||
|
||||
logger.debug('Found %i active repo request tasks', n_tasks)
|
||||
|
||||
# second pass -- trigger new tasks if we can
|
||||
for req in waiting:
|
||||
if req['task_id']:
|
||||
continue
|
||||
if n_tasks >= max_tasks:
|
||||
logger.debug('Reached max_tasks=%i', max_tasks)
|
||||
break
|
||||
if req['opts'].get('maven') and n_maven >= max_maven:
|
||||
logger.debug('Reached max_maven=%i', max_maven)
|
||||
continue
|
||||
|
||||
logger.debug('Request needs task: %r', req)
|
||||
|
||||
task_id = repo_queue_task(req)
|
||||
n_tasks += 1
|
||||
if req['opts'].get('maven'):
|
||||
n_maven += 1
|
||||
|
||||
updates = q_updates.setdefault(req['id'], {})
|
||||
tries = req['tries'] or 0
|
||||
updates['task_id'] = task_id
|
||||
updates['tries'] = tries + 1
|
||||
logger.info('Created task %i for repo request %i', task_id, req['id'])
|
||||
|
||||
# third pass -- apply updates
|
||||
made_updates = False
|
||||
for req in waiting:
|
||||
updates = q_updates.get(req['id'])
|
||||
if not updates:
|
||||
continue
|
||||
made_updates = True
|
||||
upd = UpdateProcessor('repo_queue', data=updates, clauses=['id = %(id)s'], values=req)
|
||||
upd.rawset(update_time='NOW()')
|
||||
upd.execute()
|
||||
|
||||
# clean up
|
||||
if made_updates:
|
||||
clean_repo_queue()
|
||||
|
||||
|
||||
def clean_repo_queue():
|
||||
"""Delete old inactive entries from the repo queue"""
|
||||
# these entries need to persist for at least a little while after fulfillment so that
|
||||
# clients can find the results of their requests
|
||||
delete = DeleteProcessor(
|
||||
'repo_queue',
|
||||
clauses=['active IS FALSE', 'update_time < NOW() - %(age)s::interval'],
|
||||
values={'age': '%s minutes' % context.opts['RequestCleanTime']},
|
||||
)
|
||||
n = delete.execute()
|
||||
if n:
|
||||
logger.info('Cleaned %s repo queue entries', n)
|
||||
|
||||
|
||||
def get_repo_from_task(task_id):
|
||||
"""Get repo from finished newRepo task"""
|
||||
try:
|
||||
result = kojihub.Task(task_id).getResult()
|
||||
# task returns [repo_id, event_id]
|
||||
repo_id = int(result[0])
|
||||
except Exception as e:
|
||||
logger.error('Error reading result for task %s: %s', task_id, e)
|
||||
return None
|
||||
|
||||
result = RepoQuery([['id', '=', repo_id]]).execute()
|
||||
if not result:
|
||||
logger.error('Task %s reported invalid repo: %r', task_id, repo_id)
|
||||
return None
|
||||
|
||||
return result[0]
|
||||
|
||||
|
||||
def valid_repo(req, repo):
|
||||
# right tag
|
||||
if repo['tag_id'] != req['tag_id']:
|
||||
logger.error('Request %i got repo %i with wrong tag: got %s, expected %s',
|
||||
req['id'], repo['id'], repo['tag_id'], req['tag_id'])
|
||||
return False
|
||||
|
||||
# right state
|
||||
if repo['state'] != koji.REPO_STATES['READY']:
|
||||
logger.error('Request %i got repo %i with wrong state: got %s',
|
||||
req['id'], repo['id'], repo['state'])
|
||||
return False
|
||||
|
||||
# matching event
|
||||
if req['at_event'] is not None:
|
||||
if repo['create_event'] != req['at_event']:
|
||||
logger.error('Request %i got repo %i at wrong event: %s != %s',
|
||||
req['id'], repo['id'], repo['create_event'], req['at_event'])
|
||||
return False
|
||||
elif repo['create_event'] < req['min_event']:
|
||||
logger.error('Request %i got repo %i before min_event: %s < %s',
|
||||
req['id'], repo['id'], repo['create_event'], req['min_event'])
|
||||
return False
|
||||
|
||||
# matching opts
|
||||
if not repo['opts']:
|
||||
# should not happen
|
||||
logger.error('Requested repo has no opts: %r %r', req, repo)
|
||||
return False
|
||||
for key in req['opts']:
|
||||
# all request options should have applied
|
||||
if key not in repo['opts']:
|
||||
# should not happen
|
||||
logger.error('Requested repo has missing opts: %r %r', req, repo)
|
||||
return False
|
||||
elif req['opts'][key] != repo['opts'][key]:
|
||||
logger.error('Requested repo has wrong opts: %r %r', req, repo)
|
||||
return False
|
||||
for key in repo.get('custom_opts', {}):
|
||||
# any custom options should come from request
|
||||
if key not in req['opts'] or repo['custom_opts'][key] != req['opts'][key]:
|
||||
logger.error('Requested repo has wrong opts: %r %r', req, repo)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def repo_done_hook(repo_id):
|
||||
"""Check if newly ready repo satisfies requests"""
|
||||
savepoint = Savepoint('repo_done_hook')
|
||||
try:
|
||||
repo = RepoQuery([['id', '=', repo_id]]).executeOne()
|
||||
if not repo:
|
||||
# shouldn't happen, but...
|
||||
logger.error('No such repo: %i', repo_id)
|
||||
return
|
||||
if repo['dist']:
|
||||
return
|
||||
opts = repo['opts']
|
||||
custom = repo['custom_opts']
|
||||
if opts is None or custom is None:
|
||||
# should not happen
|
||||
logger.error('Repo with invalid opts values: %r', repo)
|
||||
return
|
||||
|
||||
# query for matching requests
|
||||
fields = ['id']
|
||||
qopts = {'order': 'id'}
|
||||
base_clauses = [
|
||||
['tag_id', '=', repo['tag_id']],
|
||||
['active', 'IS', True],
|
||||
['repo_id', 'IS', None],
|
||||
['opts', '<@', json.dumps(opts)],
|
||||
['opts', '@>', json.dumps(custom)],
|
||||
# i.e. repo matches all opts in request and request matches all custom opts in repo
|
||||
]
|
||||
# TODO adjust this once QueryView supports OR
|
||||
clauses = base_clauses + [['min_event', '<=', repo['create_event']]]
|
||||
reqs1 = RepoQueueQuery(clauses, fields, qopts).execute()
|
||||
clauses = base_clauses + [['at_event', '=', repo['create_event']]]
|
||||
reqs2 = RepoQueueQuery(clauses, fields, qopts).execute()
|
||||
reqs = reqs1 + reqs2
|
||||
|
||||
# and update!
|
||||
if reqs:
|
||||
update = UpdateProcessor('repo_queue',
|
||||
clauses=['id IN %(ids)s'],
|
||||
values={'ids': [r['id'] for r in reqs]},
|
||||
data={'repo_id': repo['id'], 'active': False},
|
||||
rawdata={'update_time': 'NOW()'})
|
||||
update.execute()
|
||||
except Exception:
|
||||
# We're being very careful since we're a callback
|
||||
savepoint.rollback()
|
||||
logger.exception('Failed to update repo queue')
|
||||
|
||||
|
||||
def symlink_if_latest(repo):
|
||||
"""Point latest symlink at repo, if appropriate
|
||||
|
||||
:param dict repo: repo data
|
||||
:returns: bool
|
||||
|
||||
Returns True if the latest symlink was updated, False otherwise
|
||||
"""
|
||||
|
||||
if repo['custom_opts'] and not repo['dist']:
|
||||
# only symlink non-dist repos if they have the default opts
|
||||
logger.debug('Skipping latest symlink. Not default opts.')
|
||||
return False
|
||||
|
||||
# only symlink if we are actually latest
|
||||
clauses = [
|
||||
['tag_id', '=', repo['tag_id']],
|
||||
['state', '=', koji.REPO_READY],
|
||||
['create_event', '>=', repo['create_event']]]
|
||||
if repo['dist']:
|
||||
clauses.append(['dist', 'IS', True])
|
||||
else:
|
||||
clauses.append(['custom_opts', '=', '{}'])
|
||||
query = RepoQuery(clauses)
|
||||
newer = query.execute()
|
||||
if newer:
|
||||
logger.debug('Skipping latest symlink, %i newer repos found', len(newer))
|
||||
return False
|
||||
|
||||
if repo['dist']:
|
||||
latestrepolink = koji.pathinfo.distrepo('latest', repo['tag_name'])
|
||||
else:
|
||||
latestrepolink = koji.pathinfo.repo('latest', repo['tag_name'])
|
||||
# TODO - avoid abusing pathinfo like this
|
||||
try:
|
||||
if os.path.lexists(latestrepolink):
|
||||
os.unlink(latestrepolink)
|
||||
os.symlink(str(repo['id']), latestrepolink)
|
||||
except OSError:
|
||||
# making this link is nonessential
|
||||
logger.error("Unable to create latest link: %s" % latestrepolink)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def repo_queue_task(req):
|
||||
opts = req['opts'] or {}
|
||||
# should already be valid, but just in case
|
||||
opts = convert_repo_opts(opts, strict=True)
|
||||
kwargs = {'opts': opts}
|
||||
if req['at_event'] is not None:
|
||||
kwargs['event'] = req['at_event']
|
||||
# otherwise any new repo will satisfy any valid min_event
|
||||
|
||||
args = koji.encode_args(req['tag_id'], **kwargs)
|
||||
taskopts = {'priority': 15, 'channel': 'createrepo'}
|
||||
user_id = kojihub.get_id('users', context.opts['RepoQueueUser'], strict=False)
|
||||
# TODO should we error if user doesn't exist
|
||||
if user_id:
|
||||
taskopts['owner'] = user_id
|
||||
task_id = kojihub.make_task('newRepo', args, **taskopts)
|
||||
return task_id
|
||||
# caller should update request entry if needed
|
||||
|
||||
|
||||
def update_end_events():
|
||||
"""Update end_event for all ready repos that don't have one yet"""
|
||||
query = RepoQuery(
|
||||
clauses=[['end_event', 'IS', None], ['state', '=', koji.REPO_READY]],
|
||||
fields=('id', 'tag_id', 'create_event'),
|
||||
opts={'order': 'id'})
|
||||
repos = query.execute()
|
||||
n_cached = 0
|
||||
tag_last = {}
|
||||
updates = []
|
||||
for repo in query.execute():
|
||||
tag_id = repo['tag_id']
|
||||
# use cache to avoid redundant calls
|
||||
if tag_id in tag_last and tag_last[tag_id] <= repo['create_event']:
|
||||
# we already know that tag hasn't changed
|
||||
n_cached += 1
|
||||
continue
|
||||
end_event = kojihub.tag_first_change_event(repo['tag_id'], after=repo['create_event'])
|
||||
if end_event is None:
|
||||
tag_last[tag_id] = kojihub.tag_last_change_event(tag_id)
|
||||
else:
|
||||
updates.append({'id': repo['id'], 'end_event': end_event})
|
||||
if updates:
|
||||
BulkUpdateProcessor('repo', data=updates, match_keys=('id',)).execute()
|
||||
logger.debug('Checked end events for %i repos', len(repos))
|
||||
logger.debug('Got no change for %i distinct tags', len(tag_last))
|
||||
logger.debug('Avoided %i checks due to cache', n_cached)
|
||||
logger.debug('Added end events for %i repos', len(updates))
|
||||
|
||||
|
||||
def get_external_repo_data(erepo):
|
||||
external_repo_id = kojihub.get_external_repo_id(erepo, strict=True)
|
||||
query = QueryProcessor(
|
||||
tables=['external_repo_data'],
|
||||
clauses=['external_repo_id = %(id)s', 'active IS TRUE'],
|
||||
columns=['data'],
|
||||
values={'id': external_repo_id})
|
||||
return query.singleValue(strict=False)
|
||||
|
||||
|
||||
def set_external_repo_data(erepo, data):
|
||||
"""Update tracking data for an external repo
|
||||
|
||||
This is intended to be used by kojira
|
||||
"""
|
||||
|
||||
external_repo_id = kojihub.get_external_repo_id(erepo, strict=True)
|
||||
data = kojihub.convert_value(data, cast=dict)
|
||||
|
||||
values = {
|
||||
'external_repo_id': external_repo_id,
|
||||
'data': json.dumps(data),
|
||||
}
|
||||
|
||||
# revoke old entry, if any
|
||||
update = UpdateProcessor(
|
||||
table='external_repo_data',
|
||||
clauses=['external_repo_id = %(external_repo_id)s'],
|
||||
values=values)
|
||||
update.make_revoke()
|
||||
update.execute()
|
||||
|
||||
# insert new entry
|
||||
insert = InsertProcessor(table='external_repo_data', data=values)
|
||||
insert.make_create()
|
||||
insert.execute()
|
||||
|
||||
|
||||
def do_auto_requests():
|
||||
"""Request repos for tag configured to auto-regen"""
|
||||
|
||||
# query the extra configs we need
|
||||
query = QueryProcessor(
|
||||
tables=['tag_extra'],
|
||||
columns=['tag_id', 'key', 'value'],
|
||||
clauses=['key IN %(keys)s', 'active IS TRUE'],
|
||||
values={'keys': ['repo.auto', 'repo.lag']})
|
||||
|
||||
# figure out which tags to handle and if they have lag settings
|
||||
auto_tags = []
|
||||
lags = {}
|
||||
for row in query.execute():
|
||||
if row['value'] is None:
|
||||
# blocked entry, ignore
|
||||
continue
|
||||
# tag_extra values are TEXT, but contain json values
|
||||
try:
|
||||
value = json.loads(row['value'])
|
||||
except Exception:
|
||||
# logging will be too noisy if it actually happens
|
||||
continue
|
||||
if row['key'] == 'repo.auto':
|
||||
if value:
|
||||
auto_tags.append(row['tag_id'])
|
||||
elif row['key'] == 'repo.lag':
|
||||
if not isinstance(value, int):
|
||||
# just ignore
|
||||
continue
|
||||
lags[row['tag_id']] = value
|
||||
|
||||
logger.debug('Found %i tags for automatic repos', len(auto_tags))
|
||||
|
||||
reqs = {}
|
||||
dups = {}
|
||||
default_lag = context.opts['RepoAutoLag']
|
||||
window = context.opts['RepoLagWindow']
|
||||
for tag_id in auto_tags:
|
||||
# choose min_event similar to default_min_event, but different lag
|
||||
# TODO unify code?
|
||||
last = kojihub.tag_last_change_event(tag_id)
|
||||
if last is None:
|
||||
# shouldn't happen
|
||||
# last event cannot be None for a valid tag, but we only queried tag_extra
|
||||
logger.error('No last event for tag %i', tag_id)
|
||||
continue
|
||||
lag = lags.get(tag_id, default_lag)
|
||||
base_ts = time.time() - lag
|
||||
base_ts = (base_ts // window) * window
|
||||
base = context.handlers.get('getLastEvent')(before=base_ts)['id']
|
||||
check = request_repo(tag_id, min_event=min(base, last), priority=5)
|
||||
# lower priority so they don't block on-demand
|
||||
if check['duplicate']:
|
||||
dups[tag_id] = check
|
||||
elif check['request']:
|
||||
reqs[tag_id] = check
|
||||
|
||||
logger.debug('Auto repo requests: %s', len(reqs))
|
||||
logger.debug('Auto repo duplicates: %s', len(dups))
|
||||
|
||||
|
||||
def old_get_repo(tag, state=None, event=None, dist=False, min_event=None):
|
||||
"""Get individual repository data based on tag and additional filters.
|
||||
If more repos fits, most recent is returned.
|
||||
|
||||
:param int|str tag: tag ID or name
|
||||
:param int state: value from koji.REPO_STATES
|
||||
:param int event: maximum event ID. legacy arg
|
||||
:param bool dist: True = dist repo, False = regular repo
|
||||
:param int min_event: minimum event ID
|
||||
|
||||
:returns: dict with repo data
|
||||
"""
|
||||
tag_id = kojihub.get_tag_id(tag, strict=True)
|
||||
state = kojihub.convert_value(state, int, none_allowed=True)
|
||||
event = kojihub.convert_value(event, int, none_allowed=True)
|
||||
min_event = kojihub.convert_value(min_event, int, none_allowed=True)
|
||||
dist = kojihub.convert_value(dist, bool)
|
||||
|
||||
fields = '**'
|
||||
clauses = [['tag_id', '=', tag_id]]
|
||||
if dist:
|
||||
clauses.append(['dist', 'IS', True])
|
||||
else:
|
||||
clauses.append(['dist', 'IS', False])
|
||||
if event:
|
||||
# the event arg was originally used to report a repo for an old tag event
|
||||
# hence, looking for events before that and omitting the state
|
||||
clauses.append(['create_event', '<=', event])
|
||||
else:
|
||||
if state is None:
|
||||
state = koji.REPO_READY
|
||||
clauses.append(['state', '=', state])
|
||||
if min_event is not None:
|
||||
clauses.append(['create_event', '>=', min_event])
|
||||
|
||||
opts = {'order': '-creation_time', 'limit': 1}
|
||||
return RepoQuery(clauses, fields, opts).executeOne()
|
||||
|
||||
|
||||
def get_repo(tag, min_event=None, at_event=None, opts=None):
|
||||
"""Get best ready repo matching given requirements
|
||||
|
||||
:param int|str tag: tag ID or name
|
||||
:param int min_event: minimum event ID
|
||||
:param int at_event: specific event ID
|
||||
:param dict opts: repo options
|
||||
|
||||
:returns: dict with repo data
|
||||
"""
|
||||
tag_id = kojihub.get_tag_id(tag, strict=True)
|
||||
min_event = kojihub.convert_value(min_event, int, none_allowed=True)
|
||||
at_event = kojihub.convert_value(at_event, int, none_allowed=True)
|
||||
opts = convert_repo_opts(opts, strict=True)
|
||||
|
||||
fields = '**'
|
||||
clauses = [
|
||||
['tag_id', '=', tag_id],
|
||||
['dist', 'IS', False],
|
||||
['state', '=', koji.REPO_READY],
|
||||
# note the slightly esoteric containment operator for the opts conditions
|
||||
['opts', '@>', json.dumps(opts)],
|
||||
['custom_opts', '<@', json.dumps(opts)],
|
||||
# I.e. you'll get all the options you asked for, but you won't get any
|
||||
# custom options you didn't ask for
|
||||
]
|
||||
# TODO: should we expand usage to include dist?
|
||||
if at_event is not None:
|
||||
clauses.append(['create_event', '=', at_event])
|
||||
elif min_event is not None:
|
||||
clauses.append(['create_event', '>=', min_event])
|
||||
|
||||
qopts = {'order': '-create_event', 'limit': 1}
|
||||
return RepoQuery(clauses, fields, qopts).executeOne()
|
||||
|
||||
|
||||
def get_repo_opts(tag, override=None):
|
||||
"""Determine repo options from taginfo and apply given overrides
|
||||
|
||||
:param dict tag: taginfo (via get_tag)
|
||||
:param dict|None override: repo options to override. optional.
|
||||
:returns: opts, custom
|
||||
|
||||
Returns a pair of option dictionaries: opts, custom
|
||||
- opts gives the repo options with overrides applied
|
||||
- custom gives effective overrides (those that differed from tag default)
|
||||
"""
|
||||
|
||||
# base options
|
||||
opts = {
|
||||
'src': False,
|
||||
'debuginfo': False,
|
||||
'separate_src': False,
|
||||
'maven': False,
|
||||
}
|
||||
|
||||
# emulate original kojira config
|
||||
debuginfo_pat = context.opts['DebuginfoTags'].split()
|
||||
src_pat = context.opts['SourceTags'].split()
|
||||
separate_src_pat = context.opts['SeparateSourceTags'].split()
|
||||
if debuginfo_pat:
|
||||
if koji.util.multi_fnmatch(tag['name'], debuginfo_pat):
|
||||
opts['debuginfo'] = True
|
||||
if src_pat:
|
||||
if koji.util.multi_fnmatch(tag['name'], src_pat):
|
||||
opts['src'] = True
|
||||
if separate_src_pat:
|
||||
if koji.util.multi_fnmatch(tag['name'], separate_src_pat):
|
||||
opts['separate_src'] = True
|
||||
|
||||
# also consider tag config
|
||||
tag_opts = tag['extra'].get('repo.opts', {})
|
||||
if 'with_debuginfo' in tag['extra']:
|
||||
# for compat with old newRepo
|
||||
if 'repo.opts' in tag['extra']:
|
||||
logger.warning('Ignoring legacy with_debuginfo config, overridden by repo.opts')
|
||||
else:
|
||||
tag_opts['debuginfo'] = bool(tag['extra']['with_debuginfo'])
|
||||
tag_opts = convert_repo_opts(tag_opts, strict=False)
|
||||
opts.update(tag_opts)
|
||||
|
||||
# maven opt
|
||||
if not context.opts.get('EnableMaven'):
|
||||
opts['maven'] = False
|
||||
if override and override.get('maven'):
|
||||
logger.warning('maven repo override requested but maven support is not enabled')
|
||||
# we don't error here, but repo_init will
|
||||
elif tag['maven_support']:
|
||||
opts['maven'] = True
|
||||
# might also have been set via repo.opts above
|
||||
|
||||
# apply overrides
|
||||
custom = {}
|
||||
if override is not None:
|
||||
override = convert_repo_opts(override)
|
||||
custom = {k: override[k] for k in override if override[k] != opts[k]}
|
||||
opts.update(custom)
|
||||
|
||||
return opts, custom
|
||||
|
||||
|
||||
def convert_repo_opts(opts, strict=False):
|
||||
"""Ensure repo_opts has correct form
|
||||
|
||||
:param dict|None opts: repo options
|
||||
:param bool strict: error if opts are invalid
|
||||
:returns: (opts, full)
|
||||
|
||||
Returns updated opts dictionary.
|
||||
If strict is true, will error on invalid opt values, otherwise they are ignored
|
||||
"""
|
||||
|
||||
if opts is None:
|
||||
return {}
|
||||
|
||||
if not isinstance(opts, dict):
|
||||
if strict:
|
||||
raise koji.ParameterError('Repo opts must be a dictionary')
|
||||
else:
|
||||
logger.warning('Ignoring invalid repo opts: %r', opts)
|
||||
return {}
|
||||
|
||||
all_opts = {'src', 'debuginfo', 'separate_src', 'maven'}
|
||||
new_opts = {}
|
||||
for key in opts:
|
||||
if key not in all_opts:
|
||||
if strict:
|
||||
raise koji.ParameterError(f'Invalid repo option: {key}')
|
||||
else:
|
||||
logger.warning('Ignoring invalid repo opt: %s', key)
|
||||
continue
|
||||
# at the moment, all known opts are boolean, so this is fairly easy
|
||||
value = opts[key]
|
||||
if value is None:
|
||||
# treat as unspecified
|
||||
logger.info('Received None value in repo opts: %r', opts)
|
||||
continue
|
||||
new_opts[key] = kojihub.convert_value(value, bool)
|
||||
|
||||
return new_opts
|
||||
|
||||
|
||||
def request_repo(tag, min_event=None, at_event=None, opts=None, priority=None, force=False):
|
||||
"""Request a repo for a tag
|
||||
|
||||
:param int|str taginfo: tag id or name
|
||||
:param int|str min_event: minimum event for the repo (optional)
|
||||
:param int at_event: specific event for the repo (optional)
|
||||
:param dict opts: custom repo options (optional)
|
||||
:param bool force: force request creation, even if a matching repo exists
|
||||
|
||||
The special value min_event="last" uses the most recent event for the tag
|
||||
Otherwise min_event should be an integer
|
||||
|
||||
use opts=None (the default) to get default options for the tag.
|
||||
If opts is given, it should be a dictionary of repo options. These will override
|
||||
the defaults.
|
||||
"""
|
||||
|
||||
context.session.assertLogin()
|
||||
taginfo = kojihub.get_tag(tag, strict=True)
|
||||
opts = convert_repo_opts(opts, strict=True)
|
||||
if opts.get('maven') and not context.opts.get('EnableMaven'):
|
||||
raise koji.GenericError('Maven support not enabled')
|
||||
if at_event is not None:
|
||||
if min_event is not None:
|
||||
raise koji.ParameterError('The min_event and at_event options conflict')
|
||||
at_event = kojihub.convert_value(at_event, cast=int)
|
||||
ev = context.handlers.get('getEvent')(at_event, strict=False)
|
||||
if not ev:
|
||||
raise koji.ParameterError(f'Invalid event: {at_event}')
|
||||
elif min_event == "last":
|
||||
min_event = kojihub.tag_last_change_event(taginfo['id'])
|
||||
logger.debug('Using last event %s for repo request', min_event)
|
||||
elif min_event is None:
|
||||
min_event = default_min_event(taginfo)
|
||||
logger.debug('Using event %s for repo request', min_event)
|
||||
else:
|
||||
min_event = kojihub.convert_value(min_event, cast=int)
|
||||
ev = context.handlers.get('getEvent')(min_event, strict=False)
|
||||
if not ev:
|
||||
raise koji.ParameterError(f'Invalid event: {min_event}')
|
||||
|
||||
if priority is None:
|
||||
priority = koji.PRIO_DEFAULT
|
||||
else:
|
||||
priority = kojihub.convert_value(priority, cast=int)
|
||||
# similar to the various build calls, we treat the input priority as relative
|
||||
if priority < 0:
|
||||
if not context.session.hasPerm('admin'):
|
||||
raise koji.ActionNotAllowed('only admins may create high-priority requests')
|
||||
priority = koji.PRIO_DEFAULT + priority
|
||||
|
||||
ret = {'repo': None, 'request': None, 'duplicate': False}
|
||||
|
||||
if not force:
|
||||
# do we have an existing repo?
|
||||
repo = get_repo(taginfo['id'], min_event=min_event, at_event=at_event, opts=opts)
|
||||
if repo:
|
||||
ret['repo'] = repo
|
||||
# TODO: do we need to record a request entry for stats?
|
||||
return ret
|
||||
|
||||
# do we have a matching request already?
|
||||
clauses = [
|
||||
['tag_id', '=', taginfo['id']],
|
||||
['active', 'IS', True],
|
||||
['opts', '=', json.dumps(opts)],
|
||||
]
|
||||
if at_event is not None:
|
||||
clauses.append(['at_event', '=', at_event])
|
||||
else:
|
||||
clauses.append(['min_event', '>=', min_event])
|
||||
check = RepoQueueQuery(clauses, fields='**', opts={'order': 'priority,id'}).execute()
|
||||
if check:
|
||||
# if there is more than one, pick the first
|
||||
# we've queried in queue order
|
||||
req = check[0]
|
||||
# TODO stats?
|
||||
# note that active requests should not have a repo yet
|
||||
if req['priority'] > priority:
|
||||
# update the priority if appropriate (lower number, higher priority)
|
||||
set_request_priority(req['id'], priority)
|
||||
# also update our return data
|
||||
req['priority'] = priority
|
||||
ret['request'] = req
|
||||
ret['duplicate'] = True
|
||||
return ret
|
||||
|
||||
# otherwise we make one
|
||||
req_id = nextval('repo_queue_id_seq')
|
||||
data = {
|
||||
'id': req_id,
|
||||
'owner': context.session.user_id,
|
||||
'priority': priority,
|
||||
'tag_id': taginfo['id'],
|
||||
'at_event': at_event,
|
||||
'min_event': min_event,
|
||||
'opts': json.dumps(opts),
|
||||
}
|
||||
insert = InsertProcessor('repo_queue', data=data)
|
||||
insert.execute()
|
||||
logger.info('New repo request for %(name)s', taginfo)
|
||||
|
||||
# query to make return consistent with above
|
||||
req = RepoQueueQuery(clauses=[['id', '=', req_id]], fields='**').executeOne()
|
||||
ret['request'] = req
|
||||
return ret
|
||||
|
||||
|
||||
def default_min_event(taginfo):
|
||||
"""Get the default min_event for repo requests"""
|
||||
last = kojihub.tag_last_change_event(taginfo['id'])
|
||||
# last event cannot be None for a valid tag
|
||||
lag = taginfo['extra'].get('repo.lag')
|
||||
if lag is not None and not isinstance(lag, int):
|
||||
logger.warning('Invalid repo.lag setting for tag %s: %r', taginfo['name'], lag)
|
||||
lag = None
|
||||
if lag is None:
|
||||
lag = context.opts['RepoLag']
|
||||
window = context.opts['RepoLagWindow']
|
||||
base_ts = time.time() - lag
|
||||
# We round base_ts to nearest window so that duplicate requests will get same event if they
|
||||
# are close in time.
|
||||
base_ts = (base_ts // window) * window
|
||||
base = context.handlers.get('getLastEvent')(before=base_ts)['id']
|
||||
# If the tag has changed recently, we allow a bit of lag.
|
||||
# Otherwise, we use the most recent event for the tag.
|
||||
return min(base, last)
|
||||
|
||||
|
||||
def check_repo_request(req_id):
|
||||
"""Report status of repo request
|
||||
|
||||
:param int req_id the request id
|
||||
:return: status dictionary
|
||||
|
||||
The return dictionary will include 'request' and 'repo' fields
|
||||
"""
|
||||
req_id = kojihub.convert_value(req_id, int)
|
||||
clauses = [['id', '=', req_id]]
|
||||
req = RepoQueueQuery(clauses, fields='**').executeOne()
|
||||
if not req:
|
||||
raise koji.GenericError('No such request')
|
||||
|
||||
ret = {'repo': None, 'request': req}
|
||||
|
||||
# do we have a repo yet?
|
||||
if req['repo_id']:
|
||||
ret['repo'] = kojihub.repo_info(req['repo_id'])
|
||||
|
||||
# do we have a task?
|
||||
elif req['task_id']:
|
||||
ret['task'] = kojihub.Task(req['task_id']).getInfo()
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def set_request_priority(req_id, priority):
|
||||
"""Set priority for request
|
||||
|
||||
:param int req_id the request id
|
||||
:param int priority the new priority
|
||||
"""
|
||||
req_id = kojihub.convert_value(req_id, int)
|
||||
priority = kojihub.convert_value(priority, int)
|
||||
upd = UpdateProcessor('repo_queue',
|
||||
data={'priority': priority},
|
||||
clauses=['id = %(id)s'],
|
||||
values={'id': req_id})
|
||||
upd.execute()
|
||||
|
||||
|
||||
class RepoExports:
|
||||
|
||||
request = staticmethod(request_repo)
|
||||
get = staticmethod(get_repo)
|
||||
checkRequest = staticmethod(check_repo_request)
|
||||
|
||||
def setRequestPriority(self, req_id, priority):
|
||||
context.session.assertPerm('admin')
|
||||
set_request_priority(req_id, priority)
|
||||
|
||||
getExternalRepoData = staticmethod(get_external_repo_data)
|
||||
|
||||
def references(self, repo_id):
|
||||
"""Return a list of buildroots that reference the repo"""
|
||||
return kojihub.repo_references(repo_id)
|
||||
|
||||
def setState(self, repo_id, state):
|
||||
"""Set repo state"""
|
||||
context.session.assertPerm('repo')
|
||||
kojihub.repo_set_state(repo_id, state)
|
||||
|
||||
def query(self, clauses, fields=None, opts=None):
|
||||
query = RepoQuery(clauses, fields, opts)
|
||||
return query.iterate()
|
||||
|
||||
def setExternalRepoData(self, external_repo_id, data):
|
||||
"""Update tracking data for an external repo"""
|
||||
context.session.assertPerm('repo')
|
||||
set_external_repo_data(external_repo_id, data)
|
||||
|
||||
def autoRequests(self):
|
||||
"""[kojira] trigger automatic repo requests"""
|
||||
context.session.assertPerm('repo')
|
||||
do_auto_requests()
|
||||
|
||||
def checkQueue(self):
|
||||
"""[kojira] trigger automatic repo requests"""
|
||||
context.session.assertPerm('repo')
|
||||
check_repo_queue()
|
||||
|
||||
def queryQueue(self, clauses=None, fields=None, opts=None):
|
||||
query = RepoQueueQuery(clauses, fields, opts)
|
||||
return query.execute()
|
||||
|
||||
def updateEndEvents(self):
|
||||
"""[kojira] update end events for repos"""
|
||||
context.session.assertPerm('repo')
|
||||
update_end_events()
|
||||
|
||||
|
||||
# the end
|
||||
|
|
@ -18,11 +18,11 @@ from kojihub import (
|
|||
get_build_target,
|
||||
get_tag,
|
||||
get_user,
|
||||
make_task,
|
||||
policy_get_user,
|
||||
readInheritanceData,
|
||||
)
|
||||
from kojihub.db import QueryProcessor, nextval
|
||||
from kojihub.repos import request_repo
|
||||
|
||||
CONFIG_FILE = "/etc/koji-hub/plugins/sidetag.conf"
|
||||
CONFIG = None
|
||||
|
|
@ -156,13 +156,14 @@ def createSideTag(basetag, debuginfo=False, suffix=None):
|
|||
_create_build_target(sidetag_name, sidetag_id, sidetag_id)
|
||||
|
||||
if TRIGGER_NEW_REPO:
|
||||
# little higher priority than other newRepo tasks
|
||||
args = koji.encode_args(sidetag_name, debuginfo=debuginfo)
|
||||
task_id = make_task('newRepo', args, priority=14, channel='createrepo')
|
||||
check = request_repo(sidetag_id)
|
||||
request = check['request']
|
||||
else:
|
||||
task_id = None
|
||||
request = None
|
||||
# in our case, the request will not have a task yet
|
||||
task_id = None
|
||||
|
||||
return {"name": sidetag_name, "id": sidetag_id, 'task_id': task_id}
|
||||
return {"name": sidetag_name, "id": sidetag_id, 'task_id': task_id, 'request': request}
|
||||
|
||||
|
||||
@export
|
||||
|
|
|
|||
|
|
@ -3,7 +3,51 @@
|
|||
|
||||
BEGIN;
|
||||
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS rpminfo_nvra
|
||||
ON rpminfo(name,version,release,arch,external_repo_id);
|
||||
-- repos on demand
|
||||
ALTER TABLE repo ADD COLUMN creation_time TIMESTAMPTZ DEFAULT NOW();
|
||||
ALTER TABLE repo ADD COLUMN state_time TIMESTAMPTZ DEFAULT NOW();
|
||||
ALTER TABLE repo ADD COLUMN begin_event INTEGER REFERENCES events(id);
|
||||
ALTER TABLE repo ADD COLUMN end_event INTEGER REFERENCES events(id);
|
||||
ALTER TABLE repo ADD COLUMN opts JSONB;
|
||||
ALTER TABLE repo ADD COLUMN custom_opts JSONB;
|
||||
|
||||
CREATE TABLE repo_queue (
|
||||
id SERIAL NOT NULL PRIMARY KEY,
|
||||
create_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
owner INTEGER REFERENCES users(id) NOT NULL,
|
||||
priority INTEGER NOT NULL,
|
||||
tag_id INTEGER NOT NULL REFERENCES tag(id),
|
||||
at_event INTEGER REFERENCES events(id),
|
||||
min_event INTEGER REFERENCES events(id),
|
||||
opts JSONB NOT NULL,
|
||||
CONSTRAINT only_one_event CHECK (at_event IS NULL OR min_event IS NULL),
|
||||
update_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
active BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
task_id INTEGER REFERENCES task(id),
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
repo_id INTEGER REFERENCES repo(id),
|
||||
CONSTRAINT active_sane CHECK (NOT active OR repo_id IS NULL)
|
||||
) WITHOUT OIDS;
|
||||
|
||||
CREATE TABLE external_repo_data (
|
||||
external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
|
||||
data JSONB,
|
||||
create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
|
||||
revoke_event INTEGER REFERENCES events(id),
|
||||
creator_id INTEGER NOT NULL REFERENCES users(id),
|
||||
revoker_id INTEGER REFERENCES users(id),
|
||||
active BOOLEAN DEFAULT 'true' CHECK (active),
|
||||
CONSTRAINT active_revoke_sane CHECK (
|
||||
(active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
|
||||
OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
|
||||
PRIMARY KEY (create_event, external_repo_id),
|
||||
UNIQUE (external_repo_id, active)
|
||||
) WITHOUT OIDS;
|
||||
|
||||
INSERT INTO locks(name) VALUES('repo-queue');
|
||||
|
||||
COMMIT;
|
||||
|
||||
-- this must be outside the transaction block
|
||||
CREATE INDEX CONCURRENTLY IF NOT EXISTS rpminfo_nvra
|
||||
ON rpminfo(name,version,release,arch,external_repo_id);
|
||||
|
|
|
|||
|
|
@ -464,14 +464,51 @@ CREATE TABLE build_target_config (
|
|||
|
||||
-- track repos
|
||||
CREATE TABLE repo (
|
||||
id SERIAL NOT NULL PRIMARY KEY,
|
||||
create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
|
||||
tag_id INTEGER NOT NULL REFERENCES tag(id),
|
||||
state INTEGER,
|
||||
dist BOOLEAN DEFAULT 'false',
|
||||
task_id INTEGER NULL REFERENCES task(id)
|
||||
id SERIAL NOT NULL PRIMARY KEY,
|
||||
creation_time TIMESTAMPTZ DEFAULT NOW(),
|
||||
create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
|
||||
-- creation_time is the time that the repo entry was created
|
||||
-- create_event is the event that the repo was created *from*
|
||||
-- because a repo can be created from an old event, the two can refer to quite different
|
||||
-- points in time.
|
||||
state_time TIMESTAMPTZ DEFAULT NOW(),
|
||||
-- state_time is changed when the repo changes state
|
||||
begin_event INTEGER REFERENCES events(id),
|
||||
end_event INTEGER REFERENCES events(id),
|
||||
-- begin_event records the "tag last changed" event for the tag at creation
|
||||
-- end_event records the first event where the tag changes after creation
|
||||
-- i.e. these are the event boundaries where the repo matches its tag
|
||||
tag_id INTEGER NOT NULL REFERENCES tag(id),
|
||||
state INTEGER,
|
||||
dist BOOLEAN DEFAULT 'false',
|
||||
opts JSONB,
|
||||
custom_opts JSONB,
|
||||
task_id INTEGER REFERENCES task(id)
|
||||
) WITHOUT OIDS;
|
||||
|
||||
|
||||
-- repo requests
|
||||
CREATE TABLE repo_queue (
|
||||
id SERIAL NOT NULL PRIMARY KEY,
|
||||
create_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
owner INTEGER REFERENCES users(id) NOT NULL,
|
||||
priority INTEGER NOT NULL,
|
||||
tag_id INTEGER NOT NULL REFERENCES tag(id),
|
||||
at_event INTEGER REFERENCES events(id),
|
||||
min_event INTEGER REFERENCES events(id),
|
||||
opts JSONB NOT NULL,
|
||||
CONSTRAINT only_one_event CHECK (at_event IS NULL OR min_event IS NULL),
|
||||
-- the above should be constant for the life the entry
|
||||
update_time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
active BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
task_id INTEGER REFERENCES task(id),
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
repo_id INTEGER REFERENCES repo(id),
|
||||
CONSTRAINT active_sane CHECK (NOT active OR repo_id IS NULL)
|
||||
-- active requests shouldn't already have a repo_id
|
||||
) WITHOUT OIDS;
|
||||
|
||||
|
||||
-- external yum repos
|
||||
create table external_repo (
|
||||
id SERIAL NOT NULL PRIMARY KEY,
|
||||
|
|
@ -480,6 +517,7 @@ create table external_repo (
|
|||
-- fake repo id for internal stuff (needed for unique index)
|
||||
INSERT INTO external_repo (id, name) VALUES (0, 'INTERNAL');
|
||||
|
||||
|
||||
CREATE TABLE external_repo_config (
|
||||
external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
|
||||
url TEXT NOT NULL,
|
||||
|
|
@ -496,6 +534,25 @@ CREATE TABLE external_repo_config (
|
|||
UNIQUE (external_repo_id, active)
|
||||
) WITHOUT OIDS;
|
||||
|
||||
|
||||
-- kojira uses the table to record info about external repos
|
||||
CREATE TABLE external_repo_data (
|
||||
external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
|
||||
data JSONB,
|
||||
-- versioned - see earlier description of versioning
|
||||
create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
|
||||
revoke_event INTEGER REFERENCES events(id),
|
||||
creator_id INTEGER NOT NULL REFERENCES users(id),
|
||||
revoker_id INTEGER REFERENCES users(id),
|
||||
active BOOLEAN DEFAULT 'true' CHECK (active),
|
||||
CONSTRAINT active_revoke_sane CHECK (
|
||||
(active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
|
||||
OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
|
||||
PRIMARY KEY (create_event, external_repo_id),
|
||||
UNIQUE (external_repo_id, active)
|
||||
) WITHOUT OIDS;
|
||||
|
||||
|
||||
CREATE TABLE tag_external_repos (
|
||||
tag_id INTEGER NOT NULL REFERENCES tag(id),
|
||||
external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
|
||||
|
|
@ -1052,5 +1109,6 @@ CREATE TABLE locks (
|
|||
) WITHOUT OIDS;
|
||||
INSERT INTO locks(name) VALUES('protonmsg-plugin');
|
||||
INSERT INTO locks(name) VALUES('scheduler');
|
||||
INSERT INTO locks(name) VALUES('repo-queue');
|
||||
|
||||
COMMIT WORK;
|
||||
|
|
|
|||
|
|
@ -8291,7 +8291,7 @@
|
|||
"varkw": null
|
||||
},
|
||||
"host.repoInit": {
|
||||
"desc": "(tag, task_id=None, with_src=False, with_debuginfo=False, event=None, with_separate_src=False)",
|
||||
"desc": "(tag, task_id=None, event=None, opts=None)",
|
||||
"args": [
|
||||
{
|
||||
"name": "tag"
|
||||
|
|
@ -8300,21 +8300,13 @@
|
|||
"name": "task_id",
|
||||
"default": "None"
|
||||
},
|
||||
{
|
||||
"name": "with_src",
|
||||
"default": "False"
|
||||
},
|
||||
{
|
||||
"name": "with_debuginfo",
|
||||
"default": "False"
|
||||
},
|
||||
{
|
||||
"name": "event",
|
||||
"default": "None"
|
||||
},
|
||||
{
|
||||
"name": "with_separate_src",
|
||||
"default": "False"
|
||||
"name": "opts",
|
||||
"default": "None"
|
||||
}
|
||||
],
|
||||
"varargs": null,
|
||||
|
|
|
|||
|
|
@ -63,7 +63,8 @@ class TestParseTaskParams(unittest.TestCase):
|
|||
# match
|
||||
self.assertIn(list(spec), koji.tasks.LEGACY_SIGNATURES[method])
|
||||
|
||||
if len(missing) > 0.1 * len(koji.tasks.LEGACY_SIGNATURES):
|
||||
# we should hit most of the legacy entries this way
|
||||
raise Exception('Unable to test enough legacy signatures. Missing: '
|
||||
external = ['runroot', 'saveFailedTree', 'vmExec', 'winbuild']
|
||||
missing = [m for m in missing if m not in external]
|
||||
if missing:
|
||||
raise Exception('Unable to test legacy signatures. Missing: '
|
||||
'%r' % missing)
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ admin commands:
|
|||
lock-tag Lock a tag
|
||||
make-task Create an arbitrary task
|
||||
prune-signed-copies Prune signed copies
|
||||
regen-repo Force a repo to be regenerated
|
||||
regen-repo Generate a current repo if there is not one
|
||||
remove-external-repo Remove an external repo from a tag or tags, or remove entirely
|
||||
remove-group Remove group from tag
|
||||
remove-host-from-channel Remove a host from a channel
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ admin commands:
|
|||
lock-tag Lock a tag
|
||||
make-task Create an arbitrary task
|
||||
prune-signed-copies Prune signed copies
|
||||
regen-repo Force a repo to be regenerated
|
||||
regen-repo Generate a current repo if there is not one
|
||||
remove-external-repo Remove an external repo from a tag or tags, or remove entirely
|
||||
remove-group Remove group from tag
|
||||
remove-host-from-channel Remove a host from a channel
|
||||
|
|
@ -130,6 +130,7 @@ miscellaneous commands:
|
|||
import-comps Import group/package information from a comps file
|
||||
moshimoshi Introduce yourself
|
||||
promote-build Promote a draft build
|
||||
request-repo Request a repo for a tag
|
||||
version Report client and hub versions
|
||||
|
||||
monitor commands:
|
||||
|
|
@ -142,6 +143,7 @@ monitor commands:
|
|||
scheduler-logs Query scheduler logs
|
||||
unblock-notification Unblock user's notification
|
||||
wait-repo Wait for a repo to be regenerated
|
||||
wait-repo-request Wait for an existing repo request
|
||||
watch-logs Watch logs in realtime
|
||||
watch-task Track progress of particular tasks
|
||||
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ Options:
|
|||
--no-rebuild-srpm Force not to rebuild srpm for scratch build only
|
||||
--wait Wait on the build, even if running in the background
|
||||
--nowait Don't wait on build
|
||||
--wait-repo Wait for the actual buildroot repo of given target
|
||||
--wait-repo Wait for a current repo for the build tag
|
||||
--wait-build=NVR Wait for the given nvr to appear in buildroot repo
|
||||
--quiet Do not print the task information
|
||||
--arch-override=ARCH_OVERRIDE
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ class TestRegenRepo(utils.CliTestCase):
|
|||
self.options.weburl = 'https://localhost.local'
|
||||
|
||||
self.session = mock.MagicMock()
|
||||
self.session.hub_version = (1, 35, 0)
|
||||
self.session.getTag.return_value = copy.deepcopy(self.TAG)
|
||||
self.session.newRepo.return_value = self.task_id
|
||||
self.session.getBuildTarget.return_value = {'build_tag_name': self.tag_name}
|
||||
|
|
@ -104,7 +105,7 @@ class TestRegenRepo(utils.CliTestCase):
|
|||
self.session.getTag.return_value = copy.copy(self.TAG)
|
||||
self.session.getBuildTargets.return_value = []
|
||||
expected_warn = "%s is not a build tag" % self.tag_name + "\n"
|
||||
self.__run_test_handle_regen_repo([self.tag_name], return_value=True,
|
||||
self.__run_test_handle_regen_repo([self.tag_name, '--make-task'], return_value=True,
|
||||
expected_warn=expected_warn)
|
||||
|
||||
self.resetMocks()
|
||||
|
|
@ -113,12 +114,12 @@ class TestRegenRepo(utils.CliTestCase):
|
|||
noarch_tag.update({'arches': ''})
|
||||
self.session.getTag.return_value = noarch_tag
|
||||
expected_warn += "Tag %s has an empty arch list" % noarch_tag['name'] + "\n"
|
||||
self.__run_test_handle_regen_repo([self.tag_name], return_value=True,
|
||||
self.__run_test_handle_regen_repo([self.tag_name, '--make-task'], return_value=True,
|
||||
expected_warn=expected_warn)
|
||||
|
||||
def test_handle_regen_repo_with_target_opt(self):
|
||||
"""Test handle_regen_repo function with --target option"""
|
||||
arguments = [self.tag_name, '--target']
|
||||
arguments = [self.tag_name, '--target', '--make-task']
|
||||
|
||||
# show error if target is not matched
|
||||
self.session.getBuildTarget.return_value = {}
|
||||
|
|
@ -138,11 +139,11 @@ class TestRegenRepo(utils.CliTestCase):
|
|||
def test_handle_regen_repo_with_other_opts(self):
|
||||
"""Test handle_regen_repo function with options"""
|
||||
# --nowait
|
||||
self.__run_test_handle_regen_repo([self.tag_name, '--nowait'], return_value=None)
|
||||
self.__run_test_handle_regen_repo([self.tag_name, '--nowait', '--make-task'], return_value=None)
|
||||
self.resetMocks()
|
||||
|
||||
# --source && --debuginfo
|
||||
self.__run_test_handle_regen_repo([self.tag_name, '--source', '--debuginfo'],
|
||||
self.__run_test_handle_regen_repo([self.tag_name, '--source', '--debuginfo', '--make-task'],
|
||||
return_value=True)
|
||||
self.session.newRepo.assert_called_with(self.tag_name, **{'debuginfo': True, 'src': True})
|
||||
|
||||
|
|
@ -150,10 +151,10 @@ class TestRegenRepo(utils.CliTestCase):
|
|||
"""Test handle_regen_repo function errors and exceptions"""
|
||||
tests = [
|
||||
# [ arguments, error_string ]
|
||||
[[], self.format_error_message("A tag name must be specified")],
|
||||
[['tag1', 'tag2'],
|
||||
[['--make-task'], self.format_error_message("A tag name must be specified")],
|
||||
[['tag1', 'tag2', '--make-task'],
|
||||
self.format_error_message("Only a single tag name may be specified")],
|
||||
[['tag1', 'tag2', '--target'],
|
||||
[['tag1', 'tag2', '--target', '--make-task'],
|
||||
self.format_error_message("Only a single target may be specified")],
|
||||
]
|
||||
|
||||
|
|
@ -180,10 +181,14 @@ Options:
|
|||
--wait Wait on for regen to finish, even if running in the
|
||||
background
|
||||
--nowait Don't wait on for regen to finish
|
||||
--make-task Directly create a newRepo task
|
||||
--debuginfo Include debuginfo rpms in repo
|
||||
--source, --src Include source rpms in each of repos
|
||||
--separate-source, --separate-src
|
||||
Include source rpms in separate src repo
|
||||
--timeout=TIMEOUT Wait timeout (default: 120)
|
||||
-v, --verbose More verbose output
|
||||
--quiet Reduced output
|
||||
""" % self.progname)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,14 +6,19 @@ import unittest
|
|||
import copy
|
||||
|
||||
import mock
|
||||
import pytest
|
||||
import six
|
||||
|
||||
import koji
|
||||
from koji_cli.commands import anon_handle_wait_repo
|
||||
from . import utils
|
||||
|
||||
|
||||
class TestWaitRepo(utils.CliTestCase):
|
||||
|
||||
"""
|
||||
These older tests cover the non-request code path for the cli handler
|
||||
"""
|
||||
|
||||
# Show long diffs in error output...
|
||||
maxDiff = None
|
||||
longMessage = True
|
||||
|
|
@ -22,7 +27,7 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
'maven_support': False,
|
||||
'locked': False,
|
||||
'name': 'fedora26-build',
|
||||
'extra': {},
|
||||
'extra': {'repo.auto': True},
|
||||
'perm': None,
|
||||
'id': 2,
|
||||
'arches': 'x86_64',
|
||||
|
|
@ -40,6 +45,7 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
self.options.weburl = 'https://localhost.local'
|
||||
|
||||
self.session = mock.MagicMock()
|
||||
self.session.hub_version = (1, 35, 0)
|
||||
self.session.getTag.return_value = copy.deepcopy(self.TAG)
|
||||
self.session.newRepo.return_value = self.task_id
|
||||
self.session.getBuildTarget.return_value = {'build_tag_name': self.tag_name}
|
||||
|
|
@ -55,18 +61,19 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
def setUpMocks(self):
|
||||
self.activate_session = mock.patch('koji_cli.commands.activate_session').start()
|
||||
self.ensure_connection = mock.patch('koji_cli.commands.ensure_connection').start()
|
||||
self.checkForBuilds = mock.patch('koji.util.checkForBuilds').start()
|
||||
self.watcher = mock.MagicMock()
|
||||
self.RepoWatcher = mock.patch('koji.util.RepoWatcher', return_value=self.watcher).start()
|
||||
self.wait_logger = mock.MagicMock()
|
||||
self.getLogger = mock.patch('logging.getLogger', return_value=self.wait_logger).start()
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
@mock.patch('time.time')
|
||||
@mock.patch('sys.stdout', new_callable=six.StringIO)
|
||||
@mock.patch('sys.stderr', new_callable=six.StringIO)
|
||||
def __test_wait_repo(self, args, expected, stderr, stdout, time_mock, ret_code=0,
|
||||
def __test_wait_repo(self, args, expected, stderr, stdout, ret_code=0,
|
||||
expected_warn=''):
|
||||
self.options.quiet = False
|
||||
time_mock.side_effect = [0, 1, 2, 3]
|
||||
if ret_code:
|
||||
with self.assertRaises(SystemExit) as ex:
|
||||
anon_handle_wait_repo(self.options, self.session, args)
|
||||
|
|
@ -79,59 +86,46 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
self.assert_console_message(stderr, expected_warn)
|
||||
self.assertIn(rv, [0, None])
|
||||
|
||||
@mock.patch('time.time')
|
||||
@mock.patch('sys.stdout', new_callable=six.StringIO)
|
||||
@mock.patch('sys.stderr', new_callable=six.StringIO)
|
||||
def __test_wait_repo_timeout(self, args, expected, stderr, stdout, time_mock, ret_code=0):
|
||||
self.options.quiet = False
|
||||
time_mock.side_effect = [0, 61, 62]
|
||||
if ret_code:
|
||||
with self.assertRaises(SystemExit) as ex:
|
||||
anon_handle_wait_repo(self.options, self.session, args + ['--timeout', '1'])
|
||||
self.assertExitCode(ex, ret_code)
|
||||
self.assert_console_message(stderr, expected)
|
||||
self.assert_console_message(stdout, '')
|
||||
else:
|
||||
rv = anon_handle_wait_repo(self.options, self.session, args + ['--timeout', '1'])
|
||||
self.assert_console_message(stdout, expected)
|
||||
self.assert_console_message(stderr, '')
|
||||
self.assertIn(rv, [0, None])
|
||||
|
||||
def test_anon_handle_wait_repo(self):
|
||||
"""Test anon_handle_wait_repo function"""
|
||||
arguments = [self.tag_name]
|
||||
arguments = [self.tag_name, '--no-request']
|
||||
|
||||
self.options.quiet = False
|
||||
self.session.getRepo.side_effect = [{}, {}, {'id': 1, 'name': 'DEFAULT'}]
|
||||
expected = 'Successfully waited 0:03 for a new %s repo' % self.tag_name + '\n'
|
||||
self.watcher.waitrepo.return_value = {'id': 1, 'name': 'DEFAULT'}
|
||||
expected = (
|
||||
'Got repo 1\n'
|
||||
'Repo info: https://localhost.local/repoinfo?repoID=1\n'
|
||||
)
|
||||
self.__test_wait_repo(arguments, expected)
|
||||
|
||||
def test_anon_handle_wait_repo_with_target_opt(self):
|
||||
"""Test anon_handle_wait_repo function with --target option"""
|
||||
arguments = [self.tag_name, '--target']
|
||||
arguments = [self.tag_name, '--target', '--no-request']
|
||||
|
||||
self.options.quiet = False
|
||||
self.session.getBuildTarget.return_value = {'build_tag_name': self.tag_name,
|
||||
'build_tag': 1}
|
||||
self.session.getRepo.side_effect = [{}, {}, {'id': 1, 'name': 'DEFAULT'}]
|
||||
expected = 'Successfully waited 0:03 for a new %s repo' % self.tag_name + '\n'
|
||||
self.watcher.waitrepo.return_value = {'id': 1, 'name': 'DEFAULT'}
|
||||
expected = (
|
||||
'Got repo 1\n'
|
||||
'Repo info: https://localhost.local/repoinfo?repoID=1\n'
|
||||
)
|
||||
self.__test_wait_repo(arguments, expected)
|
||||
|
||||
def test_anon_handle_wait_repo_timeout(self):
|
||||
"""Test anon_handle_wait_repo function on timeout case"""
|
||||
arguments = [self.tag_name]
|
||||
arguments = [self.tag_name, '--no-request']
|
||||
|
||||
self.options.quiet = False
|
||||
self.session.getRepo.return_value = {}
|
||||
self.checkForBuilds.return_value = True
|
||||
expected = 'Unsuccessfully waited 1:02 for a new %s repo' % self.tag_name + '\n'
|
||||
self.__test_wait_repo_timeout(arguments, expected, ret_code=1)
|
||||
self.watcher.waitrepo.side_effect = koji.GenericError('timeout')
|
||||
expected = 'Failed to get repo -- timeout\n'
|
||||
self.__test_wait_repo(arguments, expected, ret_code=1)
|
||||
|
||||
def test_anon_handle_wait_repo_with_build(self):
|
||||
"""Test anon_handle_wait_repo function with --build options"""
|
||||
builds = ['bash-4.4.12-5.fc26', 'sed-4.4-1.fc26']
|
||||
new_ver = 'bash-4.4.12-7.fc26'
|
||||
arguments = [self.tag_name]
|
||||
arguments = [self.tag_name, '--no-request']
|
||||
pkgs = ''
|
||||
for b in builds:
|
||||
arguments += ['--build', b]
|
||||
|
|
@ -142,21 +136,22 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
self.session.getLatestBuilds.side_effect = [
|
||||
[{'nvr': new_ver}], []
|
||||
]
|
||||
self.checkForBuilds.return_value = True
|
||||
self.session.getRepo.side_effect = [
|
||||
{}, {}, {'id': 1, 'name': 'DEFAULT', 'create_event': 1}
|
||||
]
|
||||
expected_warn = 'nvr %s is not current in tag %s\n latest build in %s is %s' % \
|
||||
(builds[0], self.tag_name, self.tag_name, new_ver) + "\n"
|
||||
self.watcher.waitrepo.return_value = {'id': 1, 'name': 'DEFAULT', 'create_event': 1}
|
||||
|
||||
expected_warn = 'nvr %s is not current in tag %s\n latest build is %s' % \
|
||||
(builds[0], self.tag_name, new_ver) + "\n"
|
||||
expected_warn += 'No sed builds in tag %s' % self.tag_name + '\n'
|
||||
expected = 'Successfully waited 0:03 for %s to appear in the ' \
|
||||
'%s repo\n' % (pkgs, self.tag_name)
|
||||
expected = (
|
||||
'Got repo 1\n'
|
||||
'Repo info: https://localhost.local/repoinfo?repoID=1\n'
|
||||
)
|
||||
self.__test_wait_repo(arguments, expected, expected_warn=expected_warn)
|
||||
self.RepoWatcher.assert_called_with(self.session, self.TAG['id'], nvrs=builds, min_event=None, logger=self.wait_logger)
|
||||
|
||||
def test_anon_handle_wait_repo_with_build_timeout(self):
|
||||
"""Test anon_handle_wait_repo function with --build options on timeout cases"""
|
||||
builds = ['bash-4.4.12-5.fc26', 'sed-4.4-1.fc26']
|
||||
arguments = [self.tag_name]
|
||||
arguments = [self.tag_name, '--no-request']
|
||||
pkgs = ''
|
||||
for b in builds:
|
||||
arguments += ['--build', b]
|
||||
|
|
@ -168,20 +163,18 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
[{'nvr': builds[0]}],
|
||||
[{'nvr': builds[1]}],
|
||||
]
|
||||
self.checkForBuilds.return_value = True
|
||||
self.session.getRepo.return_value = {}
|
||||
expected = 'Unsuccessfully waited 1:02 for %s to appear in the %s ' \
|
||||
'repo\n' % (pkgs, self.tag_name)
|
||||
self.__test_wait_repo_timeout(arguments, expected, ret_code=1)
|
||||
self.watcher.waitrepo.side_effect = koji.GenericError('timeout')
|
||||
expected = 'Failed to get repo -- timeout\n'
|
||||
self.__test_wait_repo(arguments, expected, ret_code=1)
|
||||
|
||||
def test_anon_handle_wait_repo_errors(self):
|
||||
"""Test anon_handle_wait_repo function errors and exceptions"""
|
||||
tests = [
|
||||
# [ arguments, error_string ]
|
||||
[[], "Please specify a tag name"],
|
||||
[['tag1', 'tag2'], "Only one tag may be specified"],
|
||||
[[self.tag_name], "No such tag: %s" % self.tag_name],
|
||||
[[self.tag_name, '--target'], "No such build target: %s" % self.tag_name],
|
||||
[['--no-request'], "Please specify a tag name"],
|
||||
[['tag1', 'tag2', '--no-request'], "Only one tag may be specified"],
|
||||
[[self.tag_name, '--no-request'], "No such tag: %s" % self.tag_name],
|
||||
[[self.tag_name, '--target', '--no-request'], "No such build target: %s" % self.tag_name],
|
||||
]
|
||||
|
||||
self.session.getBuildTarget.return_value = {}
|
||||
|
|
@ -200,17 +193,20 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
@mock.patch('sys.stderr', new_callable=six.StringIO)
|
||||
def test_anon_handle_wait_repo_target_not_found(self, stderr):
|
||||
"""Test anon_handle_wait_repo function on target not found cases"""
|
||||
# Should warn, but continue to watch
|
||||
|
||||
# Case 1. both build and dest targets are not found
|
||||
self.session.getTag.return_value = self.TAG.copy()
|
||||
self.session.getBuildTargets.return_value = []
|
||||
with self.assertRaises(SystemExit) as ex:
|
||||
anon_handle_wait_repo(self.options, self.session, [self.tag_name])
|
||||
self.assertExitCode(ex, 1)
|
||||
|
||||
anon_handle_wait_repo(self.options, self.session, [self.tag_name, '--no-request'])
|
||||
|
||||
expected = "%(name)s is not a build tag for any target" % self.TAG + "\n"
|
||||
self.assert_console_message(stderr, expected)
|
||||
self.RepoWatcher.assert_called_with(self.session, self.TAG['id'], nvrs=[], min_event=None, logger=self.wait_logger)
|
||||
|
||||
# Cas 2. dest is matched, show suggestion
|
||||
self.RepoWatcher.reset_mock()
|
||||
self.session.getBuildTargets.side_effect = [
|
||||
[],
|
||||
[
|
||||
|
|
@ -219,12 +215,11 @@ class TestWaitRepo(utils.CliTestCase):
|
|||
{'build_tag_name': 'build-tag-3'},
|
||||
],
|
||||
]
|
||||
with self.assertRaises(SystemExit) as ex:
|
||||
anon_handle_wait_repo(self.options, self.session, [self.tag_name])
|
||||
self.assertExitCode(ex, 1)
|
||||
anon_handle_wait_repo(self.options, self.session, [self.tag_name, '--no-request'])
|
||||
expected = "%(name)s is not a build tag for any target" % self.TAG + "\n"
|
||||
expected += "Suggested tags: build-tag-1, build-tag-2, build-tag-3\n"
|
||||
self.assert_console_message(stderr, expected)
|
||||
self.RepoWatcher.assert_called_with(self.session, self.TAG['id'], nvrs=[], min_event=None, logger=self.wait_logger)
|
||||
|
||||
def test_anon_handle_wait_repo_help(self):
|
||||
"""Test anon_handle_wait_repo help message"""
|
||||
|
|
@ -238,8 +233,11 @@ Options:
|
|||
--build=NVR Check that the given build is in the newly-generated repo
|
||||
(may be used multiple times)
|
||||
--target Interpret the argument as a build target name
|
||||
--request Create a repo request (requires auth)
|
||||
--no-request Do not create a repo request (the default)
|
||||
--timeout=TIMEOUT Amount of time to wait (in minutes) before giving up
|
||||
(default: 120)
|
||||
-v, --verbose Be verbose
|
||||
--quiet Suppress output, success or failure will be indicated by
|
||||
the return value only
|
||||
""" % self.progname)
|
||||
|
|
|
|||
91
tests/test_hub/test_bulkupdate_processor.py
Normal file
91
tests/test_hub/test_bulkupdate_processor.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
import mock
|
||||
import unittest
|
||||
|
||||
from kojihub import db
|
||||
|
||||
|
||||
class TestUpdateProcessor(unittest.TestCase):
|
||||
|
||||
maxDiff = None
|
||||
|
||||
def setUp(self):
|
||||
self.context = mock.patch('kojihub.db.context').start()
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
def test_basic_instantiation(self):
|
||||
proc = db.BulkUpdateProcessor('sometable')
|
||||
repr(proc)
|
||||
# No exception!
|
||||
|
||||
def test_basic_bulk_update(self):
|
||||
data = [{'id': n, 'field': f'value {n}'} for n in range(2)]
|
||||
proc = db.BulkUpdateProcessor('sometable', data=data, match_keys=('id',))
|
||||
|
||||
# check sql
|
||||
actual = str(proc)
|
||||
expected_sql = ('UPDATE sometable SET field = __kojibulk_sometable.field\n'
|
||||
'FROM (VALUES (%(val_field_0)s, %(val_id_0)s), (%(val_field_1)s, %(val_id_1)s))\n'
|
||||
'AS __kojibulk_sometable (field, id)\n'
|
||||
'WHERE (sometable.id = __kojibulk_sometable.id)')
|
||||
self.assertEqual(actual, expected_sql)
|
||||
|
||||
# check values
|
||||
expected_values = {'val_field_0': 'value 0',
|
||||
'val_field_1': 'value 1',
|
||||
'val_id_0': 0,
|
||||
'val_id_1': 1}
|
||||
self.assertEqual(proc._values, expected_values)
|
||||
|
||||
# verify execution
|
||||
cursor = mock.MagicMock()
|
||||
self.context.cnx.cursor.return_value = cursor
|
||||
proc.execute()
|
||||
cursor.execute.assert_called_once_with(
|
||||
expected_sql,
|
||||
expected_values,
|
||||
log_errors=True,
|
||||
)
|
||||
|
||||
def test_incomplete(self):
|
||||
proc = db.BulkUpdateProcessor('sometable')
|
||||
expected = '-- incomplete bulk update'
|
||||
self.assertEqual(str(proc), expected)
|
||||
|
||||
with self.assertRaises(ValueError) as ex:
|
||||
proc.get_keys()
|
||||
expected = 'no update data'
|
||||
self.assertEqual(str(ex.exception), expected)
|
||||
|
||||
def test_bad_key(self):
|
||||
data = [{'id': n, 100: f'value {n}'} for n in range(2)]
|
||||
proc = db.BulkUpdateProcessor('sometable', data=data, match_keys=('id',))
|
||||
with self.assertRaises(TypeError) as ex:
|
||||
str(proc)
|
||||
expected = 'update data must use string keys'
|
||||
self.assertEqual(str(ex.exception), expected)
|
||||
|
||||
def test_key_mismatch(self):
|
||||
# extra key in later row
|
||||
data = [
|
||||
{'id': 1, 'A': 1},
|
||||
{'id': 2, 'A': 1, 'B': 2},
|
||||
]
|
||||
proc = db.BulkUpdateProcessor('sometable', data=data, match_keys=('id',))
|
||||
with self.assertRaises(ValueError) as ex:
|
||||
str(proc)
|
||||
expected = 'mismatched update keys'
|
||||
self.assertEqual(str(ex.exception), expected)
|
||||
|
||||
# missing key in later row
|
||||
data = [
|
||||
{'id': 1, 'A': 1},
|
||||
{'id': 2},
|
||||
]
|
||||
proc = db.BulkUpdateProcessor('sometable', data=data, match_keys=('id',))
|
||||
with self.assertRaises(ValueError) as ex:
|
||||
str(proc)
|
||||
expected = 'mismatched update keys'
|
||||
self.assertEqual(str(ex.exception), expected)
|
||||
|
|
@ -1,40 +1,33 @@
|
|||
import mock
|
||||
import unittest
|
||||
import koji
|
||||
import kojihub
|
||||
import kojihub.repos
|
||||
|
||||
from koji.context import context
|
||||
|
||||
|
||||
QP = kojihub.QueryProcessor
|
||||
RQ = kojihub.repos.RepoQuery
|
||||
|
||||
|
||||
class TestGetActiveRepos(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.QueryProcessor = mock.patch('kojihub.kojihub.QueryProcessor',
|
||||
side_effect=self.getQuery).start()
|
||||
self.context = mock.patch('kojihub.db.context').start()
|
||||
self.RepoQuery = mock.patch('kojihub.kojihub.repos.RepoQuery',
|
||||
side_effect=self.getQuery).start()
|
||||
self.queries = []
|
||||
|
||||
def getQuery(self, *args, **kwargs):
|
||||
query = QP(*args, **kwargs)
|
||||
query.execute = mock.MagicMock()
|
||||
self.queries.append(query)
|
||||
return query
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
def getQuery(self, *args, **kwargs):
|
||||
query = RQ(*args, **kwargs)
|
||||
#query.execute = mock.MagicMock()
|
||||
self.queries.append(query)
|
||||
return query
|
||||
|
||||
def test_get_active_repos(self):
|
||||
# currently not really a lot of parameters to test
|
||||
kojihub.get_active_repos()
|
||||
self.assertEqual(len(self.queries), 1)
|
||||
self.RepoQuery.assert_called_once()
|
||||
query = self.queries[0]
|
||||
# make sure the following does not error
|
||||
str(query)
|
||||
self.assertEqual(query.tables, ['repo'])
|
||||
columns = ['repo.id', 'repo.state', 'repo.task_id', 'repo.create_event',
|
||||
"date_part('epoch', events.time)", 'repo.tag_id', 'repo.dist', 'tag.name']
|
||||
self.assertEqual(set(query.columns), set(columns))
|
||||
self.assertEqual(query.clauses, ['repo.state != %(st_deleted)s'])
|
||||
self.assertEqual(query.joins, ['tag ON repo.tag_id=tag.id',
|
||||
'events ON repo.create_event = events.id'])
|
||||
self.assertEqual(query.values['st_deleted'], koji.REPO_DELETED)
|
||||
self.assertEqual(len(query.clauses), 1)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ class TestGetTagExternalRepos(DBQueryTestCase):
|
|||
def setUp(self):
|
||||
super(TestGetTagExternalRepos, self).setUp()
|
||||
self.maxDiff = None
|
||||
self.get_tag_id = mock.patch('kojihub.kojihub.get_tag_id').start()
|
||||
self.get_tag = mock.patch('kojihub.kojihub.get_tag').start()
|
||||
self.get_external_repo = mock.patch('kojihub.kojihub.get_external_repo').start()
|
||||
self.exports = kojihub.RootExports()
|
||||
|
|
@ -23,6 +24,7 @@ class TestGetTagExternalRepos(DBQueryTestCase):
|
|||
mock.patch.stopall()
|
||||
|
||||
def test_valid(self):
|
||||
self.get_tag_id.return_value = self.build_tag_info['id']
|
||||
self.get_tag.return_value = self.build_tag_info
|
||||
self.get_external_repo.return_value = self.repo_info
|
||||
kojihub.get_tag_external_repos(tag_info=self.build_tag, repo_info=self.repo)
|
||||
|
|
|
|||
|
|
@ -407,7 +407,7 @@ class TestQueryHistory(DBQueryTestCase):
|
|||
def test_external_repo_key(self):
|
||||
self.get_external_repo_id.return_value = 49
|
||||
kojihub.query_history(external_repo='test-ext-repo')
|
||||
self.assertEqual(len(self.queries), 2)
|
||||
self.assertEqual(len(self.queries), 3)
|
||||
query = self.queries[0]
|
||||
self.assertEqual(query.tables, ['external_repo_config'])
|
||||
self.assertEqual(query.clauses, ['external_repo.id = %(external_repo_id)i'])
|
||||
|
|
@ -430,6 +430,28 @@ class TestQueryHistory(DBQueryTestCase):
|
|||
self.assertEqual(query.values, {'external_repo_id': 49})
|
||||
|
||||
query = self.queries[1]
|
||||
self.assertEqual(query.tables, ['external_repo_data'])
|
||||
self.assertEqual(query.clauses, ['external_repo.id = %(external_repo_id)i'])
|
||||
self.assertEqual(query.columns, ['external_repo_data.active',
|
||||
'external_repo_data.create_event',
|
||||
"date_part('epoch', ev1.time) AS create_ts",
|
||||
'external_repo_data.creator_id', 'creator.name',
|
||||
'external_repo_data.data',
|
||||
'external_repo.name',
|
||||
'external_repo_data.external_repo_id',
|
||||
'external_repo_data.revoke_event',
|
||||
"date_part('epoch', ev2.time) AS revoke_ts",
|
||||
'external_repo_data.revoker_id', 'revoker.name',
|
||||
])
|
||||
self.assertEqual(query.joins,
|
||||
["events AS ev1 ON ev1.id = create_event",
|
||||
"LEFT OUTER JOIN events AS ev2 ON ev2.id = revoke_event",
|
||||
"users AS creator ON creator.id = creator_id",
|
||||
"LEFT OUTER JOIN users AS revoker ON revoker.id = revoker_id",
|
||||
'LEFT OUTER JOIN external_repo ON external_repo_id = external_repo.id'])
|
||||
self.assertEqual(query.values, {'external_repo_id': 49})
|
||||
|
||||
query = self.queries[2]
|
||||
self.assertEqual(query.tables, ['tag_external_repos'])
|
||||
self.assertEqual(query.clauses, ['external_repo.id = %(external_repo_id)i'])
|
||||
self.assertEqual(query.columns, ['tag_external_repos.active',
|
||||
|
|
|
|||
|
|
@ -62,5 +62,5 @@ class TestQueryView(unittest.TestCase):
|
|||
self.assertEqual(set(view.query.aliases), set(self.viewclass.default_fields))
|
||||
|
||||
def test_all_fields(self):
|
||||
view = self.viewclass(fields='*')
|
||||
view = self.viewclass(fields='**')
|
||||
self.assertEqual(set(view.query.aliases), set(self.viewclass.fieldmap.keys()))
|
||||
|
|
|
|||
|
|
@ -1,14 +1,472 @@
|
|||
import json
|
||||
import mock
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from xml.etree import ElementTree
|
||||
|
||||
import koji
|
||||
import kojihub
|
||||
|
||||
|
||||
IP = kojihub.InsertProcessor
|
||||
QP = kojihub.QueryProcessor
|
||||
|
||||
|
||||
class TestRepoInit(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.context = mock.MagicMock()
|
||||
self.context.opts = {
|
||||
'EnableMaven': False,
|
||||
}
|
||||
mock.patch('kojihub.repos.context', new=self.context).start()
|
||||
mock.patch('kojihub.kojihub.context', new=self.context).start()
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.pathinfo = koji.PathInfo(self.tempdir)
|
||||
mock.patch('koji.pathinfo', new=self.pathinfo).start()
|
||||
self.get_tag = mock.patch('kojihub.kojihub.get_tag').start()
|
||||
self.taginfo = {'id': 137, 'name': 'TAG', 'arches': 'x86_64 aarch64', 'extra': {}}
|
||||
self.get_tag.return_value = self.taginfo
|
||||
self.readTaggedRPMS = mock.patch('kojihub.kojihub.readTaggedRPMS').start()
|
||||
self.readTaggedRPMS.return_value = [], []
|
||||
self.readTagGroups = mock.patch('kojihub.kojihub.readTagGroups').start()
|
||||
self.readPackageList = mock.patch('kojihub.kojihub.readPackageList').start()
|
||||
self.maven_tag_archives = mock.patch('kojihub.kojihub.maven_tag_archives').start()
|
||||
self.tag_first_change_event = mock.patch('kojihub.kojihub.tag_first_change_event').start()
|
||||
self.tag_last_change_event = mock.patch('kojihub.kojihub.tag_last_change_event').start()
|
||||
self.get_repo_opts = mock.patch('kojihub.repos.get_repo_opts').start()
|
||||
self.default_opts = {'src': False, 'debuginfo': False, 'separate_src': False,
|
||||
'maven': False}
|
||||
self.get_repo_opts.return_value = self.default_opts, {}
|
||||
|
||||
self.InsertProcessor = mock.patch('kojihub.kojihub.InsertProcessor',
|
||||
side_effect=self.getInsert).start()
|
||||
self.inserts = []
|
||||
self.QueryProcessor = mock.patch('kojihub.kojihub.QueryProcessor',
|
||||
side_effect=self.getQuery).start()
|
||||
self.queries = []
|
||||
self.query_execute = mock.MagicMock()
|
||||
self.query_executeOne = mock.MagicMock()
|
||||
self.query_singleValue = mock.MagicMock()
|
||||
|
||||
self.singleValue = mock.patch('kojihub.kojihub._singleValue').start()
|
||||
self.singleValue.return_value = 'EVENTID'
|
||||
self.nextval = mock.patch('kojihub.kojihub.nextval').start()
|
||||
self.nextval.return_value = 'REPOID'
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def getInsert(self, *args, **kwargs):
|
||||
insert = IP(*args, **kwargs)
|
||||
insert.execute = mock.MagicMock()
|
||||
self.inserts.append(insert)
|
||||
return insert
|
||||
|
||||
def getQuery(self, *args, **kwargs):
|
||||
query = QP(*args, **kwargs)
|
||||
query.execute = self.query_execute
|
||||
query.executeOne = self.query_executeOne
|
||||
query.singleValue = self.query_singleValue
|
||||
self.queries.append(query)
|
||||
return query
|
||||
|
||||
def test_repo_init_wrong_type_typeID(self):
|
||||
task_id = 'test-task_id'
|
||||
with self.assertRaises(koji.ParameterError) as cm:
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
self.assertEqual(f"Invalid type for value '{task_id}': {type(task_id)}, "
|
||||
f"expected type <class 'int'>", str(cm.exception))
|
||||
|
||||
def test_maven_disabled(self):
|
||||
self.context.opts['EnableMaven'] = False
|
||||
opts = dict(self.default_opts, maven=True)
|
||||
custom = {'maven': True}
|
||||
self.get_repo_opts.return_value = opts, custom
|
||||
with self.assertRaises(koji.GenericError) as cm:
|
||||
kojihub.repo_init('test-tag', 100, opts=custom)
|
||||
self.assertEqual('Maven support not enabled', str(cm.exception))
|
||||
|
||||
def test_empty_repo(self):
|
||||
# fairly trivial case of empty repo
|
||||
task_id = 100
|
||||
self.readTaggedRPMS.return_value = [], []
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['aarch64', 'groups', 'repo.json', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
with open(f'{repodir}/repo.json', 'rt') as fo:
|
||||
info = json.load(fo)
|
||||
self.assertEqual(info['id'], 'REPOID')
|
||||
self.assertEqual(info['tag'], 'TAG')
|
||||
self.assertEqual(info['tag_id'], 137)
|
||||
self.assertEqual(info['task_id'], 100)
|
||||
self.assertEqual(info['event_id'], 'EVENTID')
|
||||
self.assertEqual(info['opts'], self.default_opts)
|
||||
self.assertEqual(info['custom_opts'], {})
|
||||
|
||||
# basic comps check
|
||||
with open(f'{repodir}/groups/comps.xml', 'rt') as fo:
|
||||
root = ElementTree.fromstring(fo.read())
|
||||
|
||||
for arch in ['x86_64', 'aarch64']:
|
||||
# contents
|
||||
expect = ['blocklist', 'pkglist', 'rpmlist.jsonl', 'toplink']
|
||||
self.assertEqual(sorted(os.listdir(f'{repodir}/{arch}')), expect)
|
||||
|
||||
# check toplink
|
||||
if not os.path.samefile(f'{repodir}/{arch}/toplink', self.tempdir):
|
||||
raise Exception('invalid toplink')
|
||||
|
||||
# pkglist should be blank
|
||||
with open(f'{repodir}/{arch}/pkglist', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
# blocklist should be blank
|
||||
with open(f'{repodir}/{arch}/blocklist', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
# rpmlist should be blank
|
||||
with open(f'{repodir}/{arch}/rpmlist.jsonl', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
DATA1 = [
|
||||
[
|
||||
# srpm
|
||||
{
|
||||
'arch': 'src',
|
||||
'build_id': 575,
|
||||
'draft': False,
|
||||
'id': 6100,
|
||||
'name': 'mypackage',
|
||||
'release': '36',
|
||||
'version': '1.1',
|
||||
},
|
||||
# noarch
|
||||
{
|
||||
'arch': 'noarch',
|
||||
'build_id': 575,
|
||||
'draft': False,
|
||||
'id': 6101,
|
||||
'name': 'mypackage',
|
||||
'release': '36',
|
||||
'version': '1.1',
|
||||
},
|
||||
# x86_64
|
||||
{
|
||||
'arch': 'x86_64',
|
||||
'build_id': 575,
|
||||
'draft': False,
|
||||
'id': 6102,
|
||||
'name': 'mypackage-binary',
|
||||
'release': '36',
|
||||
'version': '1.1',
|
||||
},
|
||||
# alpha -- not in list
|
||||
{
|
||||
'arch': 'alpha',
|
||||
'build_id': 575,
|
||||
'draft': False,
|
||||
'id': 6103,
|
||||
'name': 'mypackage-binary',
|
||||
'release': '36',
|
||||
'version': '1.1',
|
||||
},
|
||||
# debuginfo
|
||||
{
|
||||
'arch': 'x86_64',
|
||||
'build_id': 575,
|
||||
'draft': False,
|
||||
'id': 6104,
|
||||
'name': 'mypackage-debuginfo',
|
||||
'release': '36',
|
||||
'version': '1.1',
|
||||
},
|
||||
],
|
||||
# builds
|
||||
[
|
||||
{
|
||||
'draft': False,
|
||||
'id': 575,
|
||||
'name': 'mypackage',
|
||||
'nvr': 'mypackage-1.1-36',
|
||||
'package_id': 370,
|
||||
'package_name': 'mypackage',
|
||||
'release': '36',
|
||||
'source': 'mypackage-1.1-36.src.rpm',
|
||||
'state': 1,
|
||||
'version': '1.1',
|
||||
'volume_id': 0,
|
||||
'volume_name': 'DEFAULT',
|
||||
},
|
||||
]
|
||||
]
|
||||
|
||||
def test_repo_with_rpms(self):
|
||||
task_id = 100
|
||||
rpms, builds = self.DATA1
|
||||
self.readTaggedRPMS.return_value = rpms, builds
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['aarch64', 'groups', 'repo.json', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
with open(f'{repodir}/repo.json', 'rt') as fo:
|
||||
info = json.load(fo)
|
||||
self.assertEqual(info['id'], 'REPOID')
|
||||
self.assertEqual(info['tag'], 'TAG')
|
||||
self.assertEqual(info['tag_id'], 137)
|
||||
self.assertEqual(info['task_id'], 100)
|
||||
self.assertEqual(info['event_id'], 'EVENTID')
|
||||
self.assertEqual(info['opts'], self.default_opts)
|
||||
self.assertEqual(info['custom_opts'], {})
|
||||
|
||||
# basic comps check
|
||||
with open(f'{repodir}/groups/comps.xml', 'rt') as fo:
|
||||
root = ElementTree.fromstring(fo.read())
|
||||
|
||||
for arch in ['x86_64', 'aarch64']:
|
||||
# contents
|
||||
expect = ['blocklist', 'pkglist', 'rpmlist.jsonl', 'toplink']
|
||||
self.assertEqual(sorted(os.listdir(f'{repodir}/{arch}')), expect)
|
||||
|
||||
# check toplink
|
||||
if not os.path.samefile(f'{repodir}/{arch}/toplink', self.tempdir):
|
||||
raise Exception('invalid toplink')
|
||||
|
||||
# blocklist should be blank
|
||||
with open(f'{repodir}/{arch}/blocklist', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
# check rpm contents
|
||||
arch_rpms = [r for r in rpms if r['arch'] in ('noarch', arch)
|
||||
and 'debug' not in r['name']]
|
||||
with open(f'{repodir}/{arch}/rpmlist.jsonl', 'rt') as fo:
|
||||
repo_rpms = [json.loads(line) for line in fo]
|
||||
self.assertEqual(repo_rpms, arch_rpms)
|
||||
|
||||
with open(f'{repodir}/{arch}/pkglist', 'rt') as fo:
|
||||
lines = fo.readlines()
|
||||
self.assertEqual(len(lines), len(arch_rpms))
|
||||
|
||||
def test_separate_source(self):
|
||||
task_id = 100
|
||||
rpms, builds = self.DATA1
|
||||
self.readTaggedRPMS.return_value = rpms, builds
|
||||
opts = dict(self.default_opts, separate_src=True)
|
||||
custom = {'separate_src': True}
|
||||
self.get_repo_opts.return_value = opts, custom
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['aarch64', 'groups', 'repo.json', 'src', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
with open(f'{repodir}/repo.json', 'rt') as fo:
|
||||
info = json.load(fo)
|
||||
self.assertEqual(info['id'], 'REPOID')
|
||||
self.assertEqual(info['tag'], 'TAG')
|
||||
self.assertEqual(info['tag_id'], 137)
|
||||
self.assertEqual(info['task_id'], 100)
|
||||
self.assertEqual(info['event_id'], 'EVENTID')
|
||||
self.assertEqual(info['opts'], opts)
|
||||
self.assertEqual(info['custom_opts'], custom)
|
||||
|
||||
# basic comps check
|
||||
with open(f'{repodir}/groups/comps.xml', 'rt') as fo:
|
||||
root = ElementTree.fromstring(fo.read())
|
||||
|
||||
for arch in ['x86_64', 'aarch64', 'src']:
|
||||
# contents
|
||||
expect = ['blocklist', 'pkglist', 'rpmlist.jsonl', 'toplink']
|
||||
self.assertEqual(sorted(os.listdir(f'{repodir}/{arch}')), expect)
|
||||
|
||||
# check toplink
|
||||
if not os.path.samefile(f'{repodir}/{arch}/toplink', self.tempdir):
|
||||
raise Exception('invalid toplink')
|
||||
|
||||
# blocklist should be blank
|
||||
with open(f'{repodir}/{arch}/blocklist', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
# check rpm contents
|
||||
# srpms go only into src repo
|
||||
if arch == 'src':
|
||||
arch_rpms = [r for r in rpms if r['arch'] == 'src']
|
||||
else:
|
||||
arch_rpms = [r for r in rpms if r['arch'] in ('noarch', arch)
|
||||
and 'debug' not in r['name']]
|
||||
with open(f'{repodir}/{arch}/rpmlist.jsonl', 'rt') as fo:
|
||||
repo_rpms = [json.loads(line) for line in fo]
|
||||
self.assertEqual(repo_rpms, arch_rpms)
|
||||
|
||||
with open(f'{repodir}/{arch}/pkglist', 'rt') as fo:
|
||||
lines = fo.readlines()
|
||||
self.assertEqual(len(lines), len(arch_rpms))
|
||||
|
||||
def test_with_src(self):
|
||||
task_id = 100
|
||||
rpms, builds = self.DATA1
|
||||
self.readTaggedRPMS.return_value = rpms, builds
|
||||
opts = dict(self.default_opts, src=True)
|
||||
custom = {'src': True}
|
||||
self.get_repo_opts.return_value = opts, custom
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['aarch64', 'groups', 'repo.json', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
with open(f'{repodir}/repo.json', 'rt') as fo:
|
||||
info = json.load(fo)
|
||||
self.assertEqual(info['id'], 'REPOID')
|
||||
self.assertEqual(info['tag'], 'TAG')
|
||||
self.assertEqual(info['tag_id'], 137)
|
||||
self.assertEqual(info['task_id'], 100)
|
||||
self.assertEqual(info['event_id'], 'EVENTID')
|
||||
self.assertEqual(info['opts'], opts)
|
||||
self.assertEqual(info['custom_opts'], custom)
|
||||
|
||||
# basic comps check
|
||||
with open(f'{repodir}/groups/comps.xml', 'rt') as fo:
|
||||
root = ElementTree.fromstring(fo.read())
|
||||
|
||||
for arch in ['x86_64', 'aarch64']:
|
||||
# contents
|
||||
expect = ['blocklist', 'pkglist', 'rpmlist.jsonl', 'toplink']
|
||||
self.assertEqual(sorted(os.listdir(f'{repodir}/{arch}')), expect)
|
||||
|
||||
# check toplink
|
||||
if not os.path.samefile(f'{repodir}/{arch}/toplink', self.tempdir):
|
||||
raise Exception('invalid toplink')
|
||||
|
||||
# blocklist should be blank
|
||||
with open(f'{repodir}/{arch}/blocklist', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
# check rpm contents
|
||||
# all arch repos get noarch AND src
|
||||
arch_rpms = [r for r in rpms if r['arch'] in ('noarch', 'src', arch)
|
||||
and 'debug' not in r['name']]
|
||||
with open(f'{repodir}/{arch}/rpmlist.jsonl', 'rt') as fo:
|
||||
repo_rpms = [json.loads(line) for line in fo]
|
||||
self.assertEqual(repo_rpms, arch_rpms)
|
||||
|
||||
with open(f'{repodir}/{arch}/pkglist', 'rt') as fo:
|
||||
lines = fo.readlines()
|
||||
self.assertEqual(len(lines), len(arch_rpms))
|
||||
|
||||
def test_repo_with_debuginfo(self):
|
||||
task_id = 100
|
||||
rpms, builds = self.DATA1
|
||||
self.readTaggedRPMS.return_value = rpms, builds
|
||||
opts = dict(self.default_opts, debuginfo=True)
|
||||
custom = {'debuginfo': True}
|
||||
self.get_repo_opts.return_value = opts, custom
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['aarch64', 'groups', 'repo.json', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
with open(f'{repodir}/repo.json', 'rt') as fo:
|
||||
info = json.load(fo)
|
||||
self.assertEqual(info['id'], 'REPOID')
|
||||
self.assertEqual(info['tag'], 'TAG')
|
||||
self.assertEqual(info['tag_id'], 137)
|
||||
self.assertEqual(info['task_id'], 100)
|
||||
self.assertEqual(info['event_id'], 'EVENTID')
|
||||
self.assertEqual(info['opts'], opts)
|
||||
self.assertEqual(info['custom_opts'], custom)
|
||||
|
||||
# basic comps check
|
||||
with open(f'{repodir}/groups/comps.xml', 'rt') as fo:
|
||||
root = ElementTree.fromstring(fo.read())
|
||||
|
||||
for arch in ['x86_64', 'aarch64']:
|
||||
# contents
|
||||
expect = ['blocklist', 'pkglist', 'rpmlist.jsonl', 'toplink']
|
||||
self.assertEqual(sorted(os.listdir(f'{repodir}/{arch}')), expect)
|
||||
|
||||
# check toplink
|
||||
if not os.path.samefile(f'{repodir}/{arch}/toplink', self.tempdir):
|
||||
raise Exception('invalid toplink')
|
||||
|
||||
# blocklist should be blank
|
||||
with open(f'{repodir}/{arch}/blocklist', 'rt') as fo:
|
||||
self.assertEqual(fo.read(), '')
|
||||
|
||||
# check rpm contents
|
||||
# debuginfo included
|
||||
arch_rpms = [r for r in rpms if r['arch'] in ('noarch', arch)]
|
||||
with open(f'{repodir}/{arch}/rpmlist.jsonl', 'rt') as fo:
|
||||
repo_rpms = [json.loads(line) for line in fo]
|
||||
self.assertEqual(repo_rpms, arch_rpms)
|
||||
|
||||
with open(f'{repodir}/{arch}/pkglist', 'rt') as fo:
|
||||
lines = fo.readlines()
|
||||
self.assertEqual(len(lines), len(arch_rpms))
|
||||
|
||||
def test_taginfo_filtered_arches(self):
|
||||
# noarch and src should in the tag arch list should be ignored
|
||||
self.taginfo['arches'] = 'x86_64 noarch src'
|
||||
task_id = 100
|
||||
self.readTaggedRPMS.return_value = [], []
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['groups', 'repo.json', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
def test_blocklist(self):
|
||||
task_id = 100
|
||||
self.readTaggedRPMS.return_value = [], []
|
||||
blocked = [{'id': n, 'package_name': f'package-{n}', 'blocked': True} for n in range(20)]
|
||||
notblocked = [{'id': n, 'package_name': f'package-{n}', 'blocked': False}
|
||||
for n in range(20, 30)]
|
||||
packages = {p['id']: p for p in blocked + notblocked}
|
||||
self.readPackageList.return_value = packages
|
||||
kojihub.repo_init('test-tag', task_id)
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
|
||||
for arch in ['x86_64', 'aarch64']:
|
||||
# contents
|
||||
expect = ['blocklist', 'pkglist', 'rpmlist.jsonl', 'toplink']
|
||||
self.assertEqual(sorted(os.listdir(f'{repodir}/{arch}')), expect)
|
||||
|
||||
# check blocklist
|
||||
expected = [p['package_name'] for p in blocked]
|
||||
with open(f'{repodir}/{arch}/blocklist', 'rt') as fo:
|
||||
self.assertEqual(fo.read().splitlines(), expected)
|
||||
|
||||
def test_repo_at_event(self):
|
||||
task_id = 100
|
||||
self.readTaggedRPMS.return_value = [], []
|
||||
kojihub.repo_init('test-tag', task_id, event=101010)
|
||||
|
||||
self.singleValue.assert_not_called()
|
||||
|
||||
repodir = f'{self.tempdir}/repos/TAG/REPOID'
|
||||
expect = ['aarch64', 'groups', 'repo.json', 'x86_64']
|
||||
self.assertEqual(sorted(os.listdir(repodir)), expect)
|
||||
|
||||
with open(f'{repodir}/repo.json', 'rt') as fo:
|
||||
info = json.load(fo)
|
||||
self.assertEqual(info['id'], 'REPOID')
|
||||
self.assertEqual(info['tag'], 'TAG')
|
||||
self.assertEqual(info['tag_id'], 137)
|
||||
self.assertEqual(info['task_id'], 100)
|
||||
self.assertEqual(info['event_id'], 101010)
|
||||
self.assertEqual(info['opts'], self.default_opts)
|
||||
self.assertEqual(info['custom_opts'], {})
|
||||
|
||||
|
||||
# the end
|
||||
|
|
|
|||
1427
tests/test_hub/test_repo_requests.py
Normal file
1427
tests/test_hub/test_repo_requests.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -3,22 +3,25 @@ import mock
|
|||
import unittest
|
||||
import datetime
|
||||
|
||||
from koji.context import context
|
||||
|
||||
import koji
|
||||
import kojihub
|
||||
import kojihub.repos
|
||||
|
||||
|
||||
QP = kojihub.QueryProcessor
|
||||
IP = kojihub.InsertProcessor
|
||||
UP = kojihub.UpdateProcessor
|
||||
RQ = kojihub.repos.RepoQuery
|
||||
|
||||
|
||||
class TestRepoFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.QueryProcessor = mock.patch('kojihub.kojihub.QueryProcessor',
|
||||
side_effect=self.getQuery).start()
|
||||
self.RepoQuery = mock.patch('kojihub.repos.RepoQuery',
|
||||
side_effect=self.getQuery).start()
|
||||
self.queries = []
|
||||
self.InsertProcessor = mock.patch('kojihub.kojihub.InsertProcessor',
|
||||
self.InsertProcessor = mock.patch('kojihub.InsertProcessor',
|
||||
side_effect=self.getInsert).start()
|
||||
self.inserts = []
|
||||
self.UpdateProcessor = mock.patch('kojihub.kojihub.UpdateProcessor',
|
||||
|
|
@ -27,15 +30,16 @@ class TestRepoFunctions(unittest.TestCase):
|
|||
self._dml = mock.patch('kojihub.kojihub._dml').start()
|
||||
self.exports = kojihub.RootExports()
|
||||
self.get_tag = mock.patch('kojihub.kojihub.get_tag').start()
|
||||
self.get_tag_id = mock.patch('kojihub.kojihub.get_tag_id').start()
|
||||
self.query_executeOne = mock.MagicMock()
|
||||
self.context = mock.patch('kojihub.db.context').start()
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
def getQuery(self, *args, **kwargs):
|
||||
query = QP(*args, **kwargs)
|
||||
query.execute = mock.MagicMock()
|
||||
query.executeOne = self.query_executeOne
|
||||
query = RQ(*args, **kwargs)
|
||||
#query.execute = mock.MagicMock()
|
||||
self.queries.append(query)
|
||||
return query
|
||||
|
||||
|
|
@ -81,44 +85,46 @@ class TestRepoFunctions(unittest.TestCase):
|
|||
raise Exception('Missing dist condition')
|
||||
|
||||
def test_repo_info(self):
|
||||
repo_row = {'id': 10,
|
||||
'state': 0,
|
||||
'task_id': 15,
|
||||
'create_event': 32,
|
||||
'creation_time': datetime.datetime(2021, 3, 30, 12, 34, 5, 204023,
|
||||
tzinfo=datetime.timezone.utc),
|
||||
'create_ts': 1617107645.204023,
|
||||
'tag_id': 3,
|
||||
'tag_name': 'test-tag',
|
||||
'dist': False}
|
||||
self.query_executeOne.return_value = repo_row
|
||||
rv = kojihub.repo_info(3)
|
||||
self.assertEqual(rv, repo_row)
|
||||
self.RepoQuery.assert_called_once()
|
||||
|
||||
def test_get_repo_default(self):
|
||||
self.exports.getRepo(2)
|
||||
self.get_tag_id.return_value = 100
|
||||
|
||||
self.exports.getRepo('TAG')
|
||||
|
||||
self.RepoQuery.assert_called_once()
|
||||
qv = self.queries[0]
|
||||
self.assertEqual(len(self.queries), 1)
|
||||
query = self.queries[0]
|
||||
# make sure the following does not error
|
||||
str(query)
|
||||
self.assertEqual(query.tables, ['repo'])
|
||||
columns = ['repo.id', 'repo.state', 'repo.task_id', 'repo.create_event',
|
||||
"date_part('epoch', events.time)", 'repo.dist', 'events.time']
|
||||
self.assertEqual(set(query.columns), set(columns))
|
||||
self.assertEqual(query.joins, ['events ON repo.create_event = events.id'])
|
||||
self.assertEqual(query.clauses, ['repo.dist is false', 'repo.state = %(state)s',
|
||||
'repo.tag_id = %(id)i'])
|
||||
self.assertEqual(qv.clauses,
|
||||
[['tag_id', '=', 100], ['dist', 'IS', False], ['state', '=', 1]])
|
||||
|
||||
def test_get_repo_with_dist_and_event(self):
|
||||
self.exports.getRepo(2, event=111, dist=True)
|
||||
self.get_tag_id.return_value = 100
|
||||
|
||||
self.exports.getRepo('TAG', event=111, dist=True)
|
||||
|
||||
self.RepoQuery.assert_called_once()
|
||||
qv = self.queries[0]
|
||||
self.assertEqual(len(self.queries), 1)
|
||||
query = self.queries[0]
|
||||
# make sure the following does not error
|
||||
str(query)
|
||||
self.assertEqual(query.tables, ['repo'])
|
||||
columns = ['repo.id', 'repo.state', 'repo.task_id', 'repo.create_event',
|
||||
"date_part('epoch', events.time)", 'repo.dist', 'events.time']
|
||||
self.assertEqual(set(query.columns), set(columns))
|
||||
self.assertEqual(query.joins, ['events ON repo.create_event = events.id'])
|
||||
self.assertEqual(query.clauses, ['create_event <= %(event)i', 'repo.dist is true',
|
||||
'repo.tag_id = %(id)i'])
|
||||
self.assertEqual(qv.clauses,
|
||||
[['tag_id', '=', 100],
|
||||
['dist', 'IS', True],
|
||||
['create_event', '<=', 111]])
|
||||
|
||||
def test_get_repo_with_min_event(self):
|
||||
self.get_tag_id.return_value = 100
|
||||
|
||||
self.exports.getRepo('TAG', min_event=101010)
|
||||
|
||||
self.RepoQuery.assert_called_once()
|
||||
qv = self.queries[0]
|
||||
self.assertEqual(len(self.queries), 1)
|
||||
self.assertEqual(qv.clauses,
|
||||
[['tag_id', '=', 100],
|
||||
['dist', 'IS', False],
|
||||
['state', '=', 1],
|
||||
['create_event', '>=', 101010]])
|
||||
|
||||
|
||||
# the end
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
self.get_task_refusals = mock.patch('kojihub.scheduler.get_task_refusals').start()
|
||||
self.get_task_runs = mock.patch('kojihub.scheduler.get_task_runs').start()
|
||||
self.check_repo_queue = mock.patch('kojihub.repos.check_repo_queue').start()
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
|
|
|||
222
tests/test_hub/test_tag_last_change_event.py
Normal file
222
tests/test_hub/test_tag_last_change_event.py
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
from __future__ import absolute_import
|
||||
import mock
|
||||
import os
|
||||
import re
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
import koji
|
||||
import kojihub
|
||||
|
||||
|
||||
QP = kojihub.QueryProcessor
|
||||
|
||||
|
||||
class TestTagChangeEvent(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.QueryProcessor = mock.patch('kojihub.kojihub.QueryProcessor',
|
||||
side_effect=self.get_query).start()
|
||||
self.queries = []
|
||||
self.singleValue = mock.MagicMock()
|
||||
self.get_tag_id = mock.patch('kojihub.kojihub.get_tag_id').start()
|
||||
self.get_tag = mock.patch('kojihub.kojihub.get_tag').start()
|
||||
self.readFullInheritance = mock.patch('kojihub.kojihub.readFullInheritance').start()
|
||||
self.get_tag_external_repos = mock.patch('kojihub.kojihub.get_tag_external_repos').start()
|
||||
self.get_tag_external_repos.return_value = []
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
def get_query(self, *args, **kwargs):
|
||||
query = QP(*args, **kwargs)
|
||||
query.execute = mock.MagicMock()
|
||||
query.singleValue = self.singleValue
|
||||
self.queries.append(query)
|
||||
return query
|
||||
|
||||
def test_tag_last_change_simple(self):
|
||||
tags = [5, 6, 7, 8, 17, 23, 42]
|
||||
self.get_tag.return_value = {'id': tags[0], 'revoke_event': None}
|
||||
self.readFullInheritance.return_value = [{'parent_id':n} for n in tags[1:]]
|
||||
erepos = [101, 201]
|
||||
self.get_tag_external_repos.return_value = [{'external_repo_id': n} for n in erepos]
|
||||
events = [8, 8, 8, 8, 8, 8, None, 8, 8, 42, 23, 23, 23, 23, 23, None, 23, 23, 23,
|
||||
8, 8, 8, 8] # len=23
|
||||
# called once for tag_updates, twice for versioned tag tables, and twice for erepo tables
|
||||
# 1 + 2*9 + 2*2 = 23
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_last_change_event('TAG')
|
||||
|
||||
self.assertEqual(event, 42) # max(events)
|
||||
self.assertEqual(len(self.queries), 23)
|
||||
self.readFullInheritance.assert_called_once_with(tags[0], event=None)
|
||||
for query in self.queries[:19]: # tag queries
|
||||
self.assertEqual(query.clauses[0], 'tag_id IN %(tags)s')
|
||||
self.assertEqual(query.values['tags'], tags)
|
||||
# we didn't pass an event, so there should be no second clause
|
||||
self.assertEqual(len(query.clauses), 1)
|
||||
for query in self.queries[19:]: # erepo queries
|
||||
self.assertEqual(query.clauses[0], 'external_repo_id IN %(repos)s')
|
||||
self.assertEqual(query.values['repos'], erepos)
|
||||
# we didn't pass an event, so there should be no second clause
|
||||
self.assertEqual(len(query.clauses), 1)
|
||||
|
||||
def test_tag_last_change_noinherit(self):
|
||||
tags = [5, 6, 7, 8, 17, 23, 42]
|
||||
self.get_tag.return_value = {'id': tags[0], 'revoke_event': None}
|
||||
self.readFullInheritance.return_value = [{'parent_id':n} for n in tags[1:]]
|
||||
events = [8, 8, 8, 8, 8, 8, None, 8, 8, 42, 23, 23, 23, 23, 23, None, 23, 23, 23] # len=19
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_last_change_event('TAG', inherit=False)
|
||||
|
||||
self.assertEqual(event, 42) # max(events)
|
||||
self.assertEqual(len(self.queries), 19)
|
||||
self.readFullInheritance.assert_not_called()
|
||||
for query in self.queries:
|
||||
self.assertEqual(query.clauses[0], 'tag_id IN %(tags)s')
|
||||
# only the tag itself should be in the query condition
|
||||
self.assertEqual(query.values['tags'], [tags[0]])
|
||||
# we didn't pass an event, so there should be no second clause
|
||||
self.assertEqual(len(query.clauses), 1)
|
||||
|
||||
def test_tag_last_change_deleted(self):
|
||||
self.get_tag.return_value = {'id': 5, 'revoke_event': 9999}
|
||||
|
||||
event = kojihub.tag_last_change_event('TAG')
|
||||
|
||||
self.assertEqual(event, 9999)
|
||||
self.readFullInheritance.assert_not_called()
|
||||
self.get_tag_external_repos.assert_not_called()
|
||||
self.singleValue.assert_not_called()
|
||||
self.assertEqual(len(self.queries), 0)
|
||||
|
||||
def test_tag_last_change_before(self):
|
||||
tags = [5, 6, 7, 8, 17, 23, 42]
|
||||
before = 123
|
||||
self.get_tag.return_value = {'id': tags[0], 'revoke_event': None}
|
||||
self.readFullInheritance.return_value = [{'parent_id':n} for n in tags[1:]]
|
||||
erepos = [101, 201]
|
||||
self.get_tag_external_repos.return_value = [{'external_repo_id': n} for n in erepos]
|
||||
events = [8, 8, 8, 8, 8, 8, None, 8, 8, 42, 23, 23, 23, 23, 23, None, 23, 23, 23,
|
||||
8, 8, 8, 8] # len=23
|
||||
# called once for tag_updates, twice for versioned tag tables, and twice for erepo tables
|
||||
# 1 + 2*9 + 2*2 = 23
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_last_change_event('TAG', before=before)
|
||||
|
||||
self.assertEqual(event, 42) # max(events)
|
||||
self.assertEqual(len(self.queries), 23)
|
||||
self.readFullInheritance.assert_called_once_with(tags[0], event=before)
|
||||
for query in self.queries[:19]:
|
||||
self.assertEqual(query.values['tags'], tags)
|
||||
self.assertEqual(query.values['before'], before)
|
||||
# QP sorts the clauses, so they are not in the order the code adds them
|
||||
self.assertIn('tag_id IN %(tags)s', query.clauses)
|
||||
self.assertEqual(len(query.clauses), 2)
|
||||
for query in self.queries[19:]: # erepo queries
|
||||
self.assertIn('external_repo_id IN %(repos)s', query.clauses)
|
||||
self.assertEqual(query.values['repos'], erepos)
|
||||
self.assertEqual(query.values['before'], before)
|
||||
# we didn't pass an event, so there should be no second clause
|
||||
self.assertEqual(len(query.clauses), 2)
|
||||
|
||||
def test_tag_first_change_simple(self):
|
||||
self.get_tag_id.return_value = 99
|
||||
events = [88]
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_first_change_event('TAG')
|
||||
|
||||
self.assertEqual(event, 88)
|
||||
self.assertEqual(len(self.queries), 1)
|
||||
self.readFullInheritance.assert_not_called()
|
||||
# first query is for tag_config
|
||||
query = self.queries[0]
|
||||
self.assertEqual(query.tables, ['tag_config'])
|
||||
self.assertEqual(query.clauses[0], 'tag_id = %(tag_id)s')
|
||||
self.assertEqual(query.values['tag_id'], 99)
|
||||
self.assertEqual(len(query.clauses), 1)
|
||||
|
||||
def test_tag_first_change_noinherit(self):
|
||||
self.get_tag_id.return_value = 99
|
||||
events = [88]
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_first_change_event('TAG', inherit=False)
|
||||
|
||||
# with no after arg, we should only query tag_config
|
||||
self.assertEqual(event, 88)
|
||||
self.readFullInheritance.assert_not_called()
|
||||
self.assertEqual(len(self.queries), 1)
|
||||
query = self.queries[0]
|
||||
self.assertEqual(query.tables, ['tag_config'])
|
||||
self.assertEqual(query.clauses[0], 'tag_id = %(tag_id)s')
|
||||
# only the tag itself should be in the query condition
|
||||
self.assertEqual(query.values['tag_id'], 99)
|
||||
# we didn't pass an event, so there should be no second clause
|
||||
self.assertEqual(len(query.clauses), 1)
|
||||
|
||||
def test_tag_first_change_after(self):
|
||||
tags = [5, 6, 7, 8, 17, 23, 42]
|
||||
after = 5
|
||||
self.get_tag_id.return_value = tags[0]
|
||||
self.readFullInheritance.return_value = [{'parent_id':n} for n in tags[1:]]
|
||||
erepos = [101, 201]
|
||||
self.get_tag_external_repos.return_value = [{'external_repo_id': n} for n in erepos]
|
||||
events = [8, 8, 8, 8, 8, 8, 8, None, 8, 8, 42, 23, 23, 23, 23, 23, None, 23, 23, 23,
|
||||
8, 8, 8, 8] # len=24
|
||||
self.assertEqual(len(events), 24)
|
||||
# called once for tag_config, once for tag_updates, twice for versioned tag tables,
|
||||
# and twice for erepo tables
|
||||
# 1 + 1 + 2*9 + 2*2 = 23
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_first_change_event('TAG', after=after)
|
||||
|
||||
self.assertEqual(event, 8) # min(events)
|
||||
self.assertEqual(len(self.queries), 24)
|
||||
self.readFullInheritance.assert_called_once_with(tags[0], event=after)
|
||||
for query in self.queries[1:20]:
|
||||
self.assertEqual(query.values['tags'], tags)
|
||||
self.assertEqual(query.values['after'], after)
|
||||
# QP sorts the clauses, so they are not in the order the code adds them
|
||||
self.assertIn('tag_id IN %(tags)s', query.clauses)
|
||||
self.assertEqual(len(query.clauses), 2)
|
||||
for query in self.queries[20:]:
|
||||
self.assertEqual(query.values['repos'], erepos)
|
||||
self.assertEqual(query.values['after'], after)
|
||||
# QP sorts the clauses, so they are not in the order the code adds them
|
||||
self.assertIn('external_repo_id IN %(repos)s', query.clauses)
|
||||
self.assertEqual(len(query.clauses), 2)
|
||||
|
||||
def test_tag_first_change_after_noinherit(self):
|
||||
# without inheritance, we'll only query the tag itself
|
||||
tag_id = 999
|
||||
after = 5
|
||||
self.get_tag_id.return_value = tag_id
|
||||
events = [8, 8, 8, 8, 8, 8, 8, None, 8, 8, 42, 23, 23, 23, 23, 23, None, 23, 23, 23]
|
||||
self.assertEqual(len(events), 20)
|
||||
# called once for tag_config, once for tag_updates, and twice for versioned tag tables
|
||||
# 2 + 2*9 = 20
|
||||
self.singleValue.side_effect = events
|
||||
|
||||
event = kojihub.tag_first_change_event('TAG', after=after, inherit=False)
|
||||
|
||||
self.assertEqual(event, 8) # min(events)
|
||||
self.assertEqual(len(self.queries), 20)
|
||||
self.readFullInheritance.assert_not_called()
|
||||
for query in self.queries[1:]:
|
||||
self.assertEqual(query.values['tags'], [tag_id])
|
||||
self.assertEqual(query.values['after'], after)
|
||||
# QP sorts the clauses, so they are not in the order the code adds them
|
||||
self.assertIn('tag_id IN %(tags)s', query.clauses)
|
||||
self.assertEqual(len(query.clauses), 2)
|
||||
|
||||
|
||||
# the end
|
||||
69
tests/test_kojira/data/external-repomd.xml
Normal file
69
tests/test_kojira/data/external-repomd.xml
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<repomd xmlns="http://linux.duke.edu/metadata/repo" xmlns:rpm="http://linux.duke.edu/metadata/rpm">
|
||||
<revision>1711390493</revision>
|
||||
<data type="primary">
|
||||
<checksum type="sha256">2d64a128e2b0f527c4ede4203e4c6bfd111a89d4c6cb72d5f503c33759a76075</checksum>
|
||||
<open-checksum type="sha256">53cd17e486fd5ddc4b94c41d17492496f39b0f71a1fbd80974e4c72dbe96499c</open-checksum>
|
||||
<location href="repodata/2d64a128e2b0f527c4ede4203e4c6bfd111a89d4c6cb72d5f503c33759a76075-primary.xml.gz"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>224483</size>
|
||||
<open-size>1699242</open-size>
|
||||
</data>
|
||||
<data type="filelists">
|
||||
<checksum type="sha256">f5fd4083abd9c497faddc53093ef596ca50bf9419b523d712762dc6157fc8d5b</checksum>
|
||||
<open-checksum type="sha256">dc0e5bfcb4fe2fdf4573f4b3d306dbf3e0c72ef8711961af12f3212585aba0c3</open-checksum>
|
||||
<location href="repodata/f5fd4083abd9c497faddc53093ef596ca50bf9419b523d712762dc6157fc8d5b-filelists.xml.gz"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>436625</size>
|
||||
<open-size>5189688</open-size>
|
||||
</data>
|
||||
<data type="other">
|
||||
<checksum type="sha256">badfeedb0c48fa9f638c5bb64909103b3eafacfd19a9bfc6edb2ed6438c9e029</checksum>
|
||||
<open-checksum type="sha256">8f2b3e4179d1c7dde4d06fcdf0e3d287345c65bf410530ee0f55b9a03f1038d8</open-checksum>
|
||||
<location href="repodata/badfeedb0c48fa9f638c5bb64909103b3eafacfd19a9bfc6edb2ed6438c9e029-other.xml.gz"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>266251</size>
|
||||
<open-size>2205259</open-size>
|
||||
</data>
|
||||
<data type="primary_db">
|
||||
<checksum type="sha256">20f4cb13be2c8aa1b1c4c612e6480559c42f835b9683a5fb0f405167533a460e</checksum>
|
||||
<open-checksum type="sha256">1700092e52a4c972d684ab8e9a5d7551ed37176c41ac2a20dc77f5576e1d5198</open-checksum>
|
||||
<location href="repodata/20f4cb13be2c8aa1b1c4c612e6480559c42f835b9683a5fb0f405167533a460e-primary.sqlite.bz2"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>438912</size>
|
||||
<open-size>1773568</open-size>
|
||||
<database_version>10</database_version>
|
||||
</data>
|
||||
<data type="filelists_db">
|
||||
<checksum type="sha256">2d1ce3b2dd348c484e59cd27a378c8374d5df0e82e332a5899e38e257f219ef5</checksum>
|
||||
<open-checksum type="sha256">a94ffb9f906e54b1092d69f2797cdabd52085605f29fa8740e96f1e7cc5952c8</open-checksum>
|
||||
<location href="repodata/2d1ce3b2dd348c484e59cd27a378c8374d5df0e82e332a5899e38e257f219ef5-filelists.sqlite.bz2"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>501200</size>
|
||||
<open-size>2539520</open-size>
|
||||
<database_version>10</database_version>
|
||||
</data>
|
||||
<data type="other_db">
|
||||
<checksum type="sha256">0d3b0537fc40cde08cf37cf8cf83a59ab1fab43c27cf5f9b5f8b22ea3ba93020</checksum>
|
||||
<open-checksum type="sha256">09e30f1bf9213e629b73a0893ce6172d6e5b8d791d1e7b5e53b684fa44ddc6aa</open-checksum>
|
||||
<location href="repodata/0d3b0537fc40cde08cf37cf8cf83a59ab1fab43c27cf5f9b5f8b22ea3ba93020-other.sqlite.bz2"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>349440</size>
|
||||
<open-size>2142208</open-size>
|
||||
<database_version>10</database_version>
|
||||
</data>
|
||||
<data type="group">
|
||||
<checksum type="sha256">2310cdad2843ead6ea646ce091f0f73de5898fcb303acbe20fc26364dbf550fe</checksum>
|
||||
<location href="repodata/2310cdad2843ead6ea646ce091f0f73de5898fcb303acbe20fc26364dbf550fe-comps.xml"/>
|
||||
<timestamp>1711390492</timestamp>
|
||||
<size>5048</size>
|
||||
</data>
|
||||
<data type="group_gz">
|
||||
<checksum type="sha256">f913cb539babfcbe9611f306b2b9b8fe270fe103780ff3f2f64b59cd445ab425</checksum>
|
||||
<open-checksum type="sha256">2310cdad2843ead6ea646ce091f0f73de5898fcb303acbe20fc26364dbf550fe</open-checksum>
|
||||
<location href="repodata/f913cb539babfcbe9611f306b2b9b8fe270fe103780ff3f2f64b59cd445ab425-comps.xml.gz"/>
|
||||
<timestamp>1711390493</timestamp>
|
||||
<size>651</size>
|
||||
<open-size>5048</open-size>
|
||||
</data>
|
||||
</repomd>
|
||||
129
tests/test_kojira/test_managed_repo.py
Normal file
129
tests/test_kojira/test_managed_repo.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
from __future__ import absolute_import
|
||||
import json
|
||||
import mock
|
||||
import os.path
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import koji
|
||||
|
||||
from . import loadkojira
|
||||
kojira = loadkojira.kojira
|
||||
|
||||
|
||||
class OurException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ManagedRepoTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.workdir = tempfile.mkdtemp()
|
||||
self.kojidir = self.workdir + '/koji'
|
||||
os.mkdir(self.kojidir)
|
||||
self.pathinfo = koji.PathInfo(self.kojidir)
|
||||
mock.patch.object(kojira, 'pathinfo', new=self.pathinfo, create=True).start()
|
||||
|
||||
self.session = mock.MagicMock()
|
||||
self.options = mock.MagicMock()
|
||||
self.mgr = mock.MagicMock()
|
||||
self.mgr.options = self.options
|
||||
self.mgr.session = self.session
|
||||
self.unlink = mock.patch('os.unlink').start()
|
||||
self.data = {
|
||||
'create_event': 497359,
|
||||
'create_ts': 1709791593.368943,
|
||||
'creation_ts': 1709791593.367575,
|
||||
'dist': False,
|
||||
'end_event': None,
|
||||
'id': 2385,
|
||||
'opts': {'debuginfo': False, 'separate_src': False, 'src': False},
|
||||
'state': 1,
|
||||
'state_ts': 1710705227.166751,
|
||||
'tag_id': 50,
|
||||
'tag_name': 'some-tag',
|
||||
'task_id': 13290,
|
||||
}
|
||||
self.repo = self.mkrepo(self.data)
|
||||
|
||||
def mkrepo(self, data):
|
||||
repodir = self.kojidir + ('/repos/%(tag_name)s/%(id)s' % self.data)
|
||||
os.makedirs(repodir)
|
||||
with open('%s/repo.json' % repodir, 'wt', encoding='utf-8') as fp:
|
||||
# technically not quite the right data, but close enough
|
||||
json.dump(data, fp, indent=2)
|
||||
for arch in ('x86_64', 'aarch64'):
|
||||
os.mkdir(repodir + '/' + arch)
|
||||
repo = kojira.ManagedRepo(self.mgr, data.copy())
|
||||
return repo
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
shutil.rmtree(self.workdir)
|
||||
|
||||
def test_get_info(self):
|
||||
info = self.repo.get_info()
|
||||
self.assertEqual(info, self.data)
|
||||
|
||||
def test_get_path(self):
|
||||
path = self.repo.get_path()
|
||||
repodir = self.kojidir + ('/repos/%(tag_name)s/%(id)s' % self.repo.data)
|
||||
self.assertEqual(path, repodir)
|
||||
|
||||
def test_delete_check(self):
|
||||
self.options.expired_repo_lifetime = 3600 * 24
|
||||
self.options.reference_recheck_period = 3600
|
||||
base_ts = 444888888
|
||||
now = base_ts + 100
|
||||
|
||||
self.repo.data['state'] = koji.REPO_EXPIRED
|
||||
self.repo.data['state_ts'] = base_ts
|
||||
|
||||
with mock.patch('time.time') as _time:
|
||||
_time.return_value = now
|
||||
self.repo.delete_check()
|
||||
|
||||
# we should have stopped at the age check
|
||||
self.session.repo.references.assert_not_called()
|
||||
self.mgr.rmtree.assert_not_called()
|
||||
path = self.repo.get_path()
|
||||
if not os.path.exists(path):
|
||||
raise Exception('Missing directory: %s' % path)
|
||||
|
||||
# try again with later time but also references
|
||||
now += self.options.expired_repo_lifetime
|
||||
self.session.repo.references.return_value = ['REF1', 'REF2']
|
||||
with mock.patch('time.time') as _time:
|
||||
_time.return_value = now
|
||||
self.repo.delete_check()
|
||||
|
||||
self.mgr.rmtree.assert_not_called()
|
||||
path = self.repo.get_path()
|
||||
if not os.path.exists(path):
|
||||
raise Exception('Missing directory: %s' % path)
|
||||
|
||||
self.session.reset_mock()
|
||||
|
||||
# no refs, but same time as last check
|
||||
# (now unchanged)
|
||||
self.session.repo.references.return_value = []
|
||||
with mock.patch('time.time') as _time:
|
||||
_time.return_value = now
|
||||
self.repo.delete_check()
|
||||
|
||||
# we should have stopped at the recheck_period check
|
||||
self.session.repo.references.assert_not_called()
|
||||
self.mgr.rmtree.assert_not_called()
|
||||
|
||||
# finally, let's check again with no refs
|
||||
now += self.options.reference_recheck_period
|
||||
with mock.patch('time.time') as _time:
|
||||
_time.return_value = now
|
||||
self.repo.delete_check()
|
||||
|
||||
self.session.repo.setState.assert_called_once_with(self.repo.id, koji.REPO_DELETED)
|
||||
self.mgr.rmtree.assert_called_once_with(path)
|
||||
|
||||
# the end
|
||||
|
|
@ -1,5 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
import mock
|
||||
import os.path
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
|
|
@ -19,71 +22,24 @@ class RepoManagerTest(unittest.TestCase):
|
|||
self.session = mock.MagicMock()
|
||||
self.options = mock.MagicMock()
|
||||
self.mgr = kojira.RepoManager(self.options, self.session)
|
||||
self.rmtree = mock.patch('koji.util.rmtree').start()
|
||||
# also mock in kojira namespace
|
||||
mock.patch.object(kojira, 'rmtree', new=self.rmtree).start()
|
||||
self.workdir = tempfile.mkdtemp()
|
||||
self.kill = mock.patch('os.kill').start()
|
||||
self.fork = mock.patch('os.fork').start()
|
||||
self.unlink = mock.patch('os.unlink').start()
|
||||
self.waitpid = mock.patch('os.waitpid', new=self.my_waitpid).start()
|
||||
# kojira defines global pathinfo in start block
|
||||
self.pathinfo = mock.patch.object(kojira, 'pathinfo', create=True).start()
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
shutil.rmtree(self.workdir)
|
||||
|
||||
def test_check_tasks_none(self):
|
||||
self.mgr.tasks = {}
|
||||
self.mgr.other_tasks = {}
|
||||
self.session.listTasks.return_value = []
|
||||
|
||||
self.mgr.checkTasks()
|
||||
|
||||
self.session.getTaskInfo.assert_not_called()
|
||||
self.assertEqual(self.mgr.tasks, {})
|
||||
self.assertEqual(self.mgr.other_tasks, {})
|
||||
|
||||
def test_check_tasks_other(self):
|
||||
self.mgr.tasks = {}
|
||||
self.mgr.other_tasks = {}
|
||||
self.session.listTasks.return_value = [
|
||||
{
|
||||
'id': 1294,
|
||||
'method': 'newRepo',
|
||||
'state': 0,
|
||||
'waiting': None,
|
||||
},
|
||||
]
|
||||
self.mgr.logger = mock.MagicMock()
|
||||
self.mgr.checkTasks()
|
||||
|
||||
self.session.getTaskInfo.assert_not_called()
|
||||
self.assertEqual(self.mgr.tasks, {})
|
||||
self.assertEqual(len(self.mgr.other_tasks), 1)
|
||||
self.mgr.logger.info.assert_called_once()
|
||||
|
||||
# the extra task should not be logged if we run again
|
||||
self.mgr.logger.reset_mock()
|
||||
self.mgr.checkTasks()
|
||||
self.mgr.logger.info.assert_not_called()
|
||||
|
||||
def test_check_tasks_ours(self):
|
||||
tasks = [
|
||||
{'id': 101, 'state': koji.TASK_STATES['FREE']},
|
||||
{'id': 102, 'state': koji.TASK_STATES['OPEN']},
|
||||
{'id': 103, 'state': koji.TASK_STATES['CLOSED']},
|
||||
{'id': 104, 'state': koji.TASK_STATES['CANCELED']},
|
||||
{'id': 105, 'state': koji.TASK_STATES['FAILED']},
|
||||
]
|
||||
task_idx = dict([(t['id'], t) for t in tasks])
|
||||
order = []
|
||||
def getTaskInfo(task_id):
|
||||
# record the order of calls in multicall
|
||||
order.append(task_id)
|
||||
def multiCall(strict):
|
||||
return [[task_idx[tid]] for tid in order]
|
||||
self.session.getTaskInfo.side_effect = getTaskInfo
|
||||
self.session.multiCall.side_effect = multiCall
|
||||
self.mgr.tasks = dict([
|
||||
(t['id'], {'taskinfo': t, 'tag_id': 'TAG'})
|
||||
for t in tasks])
|
||||
self.mgr.other_tasks = {}
|
||||
self.session.listTasks.return_value = []
|
||||
|
||||
self.mgr.checkTasks()
|
||||
# should have removed the close tasks
|
||||
self.assertEqual(list(self.mgr.tasks.keys()), [101, 102])
|
||||
def my_waitpid(self, pid, *a):
|
||||
# by default, report all processes exit normally
|
||||
return pid, 0
|
||||
|
||||
@mock.patch('time.sleep')
|
||||
def test_regen_loop(self, sleep):
|
||||
|
|
@ -98,72 +54,202 @@ class RepoManagerTest(unittest.TestCase):
|
|||
self.assertEqual(self.mgr.regenRepos.call_count, 11)
|
||||
subsession.logout.assert_called_once()
|
||||
|
||||
def test_set_tag_score(self):
|
||||
self.mgr.tagUseStats = mock.MagicMock()
|
||||
self.mgr.tagUseStats.return_value = {
|
||||
'n_recent': 5
|
||||
}
|
||||
self.mgr.needed_tags = {}
|
||||
entry = {
|
||||
'taginfo': {
|
||||
'id': 'TAGID',
|
||||
'name': 'TAGNAME',
|
||||
},
|
||||
'expire_ts': time.time() - 300
|
||||
}
|
||||
self.mgr.setTagScore(entry)
|
||||
score = entry['score']
|
||||
if score < 0.0:
|
||||
raise Exception('score too low')
|
||||
@mock.patch('time.sleep')
|
||||
def test_rmtree_loop(self, sleep):
|
||||
subsession = mock.MagicMock()
|
||||
self.mgr.checkQueue = mock.MagicMock()
|
||||
self.mgr.checkQueue.side_effect = [None] * 10 + [OurException()]
|
||||
# we need the exception to terminate the infinite loop
|
||||
|
||||
_entry = entry.copy()
|
||||
_entry['expire_ts'] -= 300
|
||||
self.mgr.setTagScore(_entry)
|
||||
if score > entry['score']:
|
||||
raise Exception('score should have increased')
|
||||
with self.assertRaises(OurException):
|
||||
self.mgr.rmtreeLoop(subsession)
|
||||
|
||||
self.mgr.tagUseStats.return_value = {
|
||||
'n_recent': 10
|
||||
# higher than before
|
||||
}
|
||||
self.mgr.setTagScore(entry)
|
||||
if score > entry['score']:
|
||||
raise Exception('score should have increased')
|
||||
self.assertEqual(self.mgr.checkQueue.call_count, 11)
|
||||
subsession.logout.assert_called_once()
|
||||
|
||||
def test_check_needed(self):
|
||||
self.options.no_repo_effective_age = 999
|
||||
self.session.getBuildTargets.return_value = [
|
||||
{'build_tag': 1, 'build_tag_name': 'tag 1'},
|
||||
{'build_tag': 2, 'build_tag_name': 'tag 2'},
|
||||
{'build_tag': 3, 'build_tag_name': 'tag 3'},
|
||||
]
|
||||
# make two repo entries
|
||||
repo1 = mock.MagicMock()
|
||||
repo1.tag_id = 1
|
||||
repo1.current = True
|
||||
repo2 = mock.MagicMock()
|
||||
repo2.tag_id = 2
|
||||
repo2.current = False
|
||||
repo2.pending.return_value = True
|
||||
self.mgr.repos = {1: repo1, 2: repo2}
|
||||
@mock.patch('time.sleep')
|
||||
def test_currency_loop(self, sleep):
|
||||
subsession = mock.MagicMock()
|
||||
subsession.repo.updateEndEvents.side_effect = [None] * 10 + [OurException()]
|
||||
# we need the exception to terminate the infinite loop
|
||||
|
||||
# more mocks
|
||||
def my_get_tag(tag_id):
|
||||
return {'id': tag_id, 'name': 'TAG %i' % tag_id}
|
||||
self.session.getTag.side_effect = my_get_tag
|
||||
self.mgr.logger = mock.MagicMock()
|
||||
self.mgr.setTagScore = mock.MagicMock()
|
||||
with self.assertRaises(OurException):
|
||||
self.mgr.currencyChecker(subsession)
|
||||
|
||||
with mock.patch('time.time') as mytime:
|
||||
mytime.side_effect = [1000, 1100]
|
||||
self.mgr.checkNeeded()
|
||||
self.assertEqual(subsession.repo.updateEndEvents.call_count, 11)
|
||||
subsession.logout.assert_called_once()
|
||||
|
||||
# only the third tag should show up as needed
|
||||
expected = {3:
|
||||
{'expire_ts': 1, # time minus effective age
|
||||
'needed_since': 1100,
|
||||
'taginfo': {
|
||||
'id': 3,
|
||||
'name': 'TAG 3'
|
||||
}}}
|
||||
self.assertEqual(self.mgr.needed_tags, expected)
|
||||
@mock.patch('time.sleep')
|
||||
def test_external_loop(self, sleep):
|
||||
subsession = mock.MagicMock()
|
||||
self.mgr.checkExternalRepos = mock.MagicMock()
|
||||
self.mgr.checkExternalRepos.side_effect = [None] * 10 + [OurException()]
|
||||
# we need the exception to terminate the infinite loop
|
||||
|
||||
with self.assertRaises(OurException):
|
||||
self.mgr.currencyExternalChecker(subsession)
|
||||
|
||||
self.assertEqual(self.mgr.checkExternalRepos.call_count, 11)
|
||||
subsession.logout.assert_called_once()
|
||||
|
||||
def test_rmtree(self):
|
||||
subsession = mock.MagicMock()
|
||||
dir1 = self.workdir + '/one'
|
||||
dir2 = self.workdir + '/two'
|
||||
self.assertEqual(list(self.mgr.delete_queue), [])
|
||||
|
||||
# add a dir to the queue
|
||||
self.mgr.rmtree(dir1)
|
||||
self.assertEqual(list(self.mgr.delete_queue), [dir1])
|
||||
|
||||
# duplicate should be ignored
|
||||
self.mgr.rmtree(dir1)
|
||||
self.assertEqual(list(self.mgr.delete_queue), [dir1])
|
||||
|
||||
# new entry should appear in correct order
|
||||
self.mgr.rmtree(dir2)
|
||||
self.assertEqual(list(self.mgr.delete_queue), [dir1, dir2])
|
||||
|
||||
def test_check_queue(self):
|
||||
self.options.max_delete_processes = 3
|
||||
nums = range(1, 11) # 1 to 10
|
||||
# avoiding n=0 because we use it as a fake pid
|
||||
|
||||
# queue up some deletes
|
||||
dirs = [self.workdir + '/dir_%02i' % n for n in nums]
|
||||
for d in dirs:
|
||||
self.mgr.rmtree(d)
|
||||
check = mock.MagicMock()
|
||||
self.rmtree.side_effect = [(n, check) for n in nums]
|
||||
# fake pids match dir number
|
||||
self.assertEqual(list(self.mgr.delete_queue), dirs)
|
||||
|
||||
# first pass
|
||||
self.mgr.checkQueue()
|
||||
self.assertEqual(list(self.mgr.delete_queue), dirs[3:])
|
||||
self.assertEqual(set(self.mgr.delete_pids), set([1, 2, 3]))
|
||||
|
||||
# second pass
|
||||
self.mgr.checkQueue()
|
||||
self.assertEqual(list(self.mgr.delete_queue), dirs[6:])
|
||||
self.assertEqual(set(self.mgr.delete_pids), set([4, 5, 6]))
|
||||
|
||||
# third pass
|
||||
self.mgr.checkQueue()
|
||||
self.assertEqual(list(self.mgr.delete_queue), dirs[9:])
|
||||
self.assertEqual(set(self.mgr.delete_pids), set([7, 8, 9]))
|
||||
|
||||
# fourth pass
|
||||
self.mgr.checkQueue()
|
||||
self.assertEqual(list(self.mgr.delete_queue), [])
|
||||
self.assertEqual(set(self.mgr.delete_pids), set([10]))
|
||||
|
||||
# last pass
|
||||
self.mgr.checkQueue()
|
||||
self.assertEqual(list(self.mgr.delete_queue), [])
|
||||
self.assertEqual(set(self.mgr.delete_pids), set([]))
|
||||
|
||||
def test_read_current(self):
|
||||
self.assertEqual(set(self.mgr.repos), set())
|
||||
|
||||
# fake repo data
|
||||
data = {'create_event': 100, 'create_ts': 101010, 'tag_id': 999, 'state': 1,
|
||||
'dist': False, 'tag_name': 'TAG'}
|
||||
repo_ids = range(1000, 1015)
|
||||
repos = [dict(id=n, **data) for n in repo_ids]
|
||||
|
||||
# pass 1
|
||||
self.session.repo.query.return_value = repos
|
||||
self.mgr.readCurrentRepos()
|
||||
|
||||
self.assertEqual(set(self.mgr.repos), set([r['id'] for r in repos]))
|
||||
|
||||
# pass 2 - no new repos
|
||||
self.mgr.readCurrentRepos()
|
||||
self.assertEqual(set(self.mgr.repos), set([r['id'] for r in repos]))
|
||||
|
||||
# pass 3 - repo changes state
|
||||
repos[0] = repos[0].copy() # don't change the data in mgr.repos
|
||||
repos[0]['state'] = 2 # expired
|
||||
repo_id = repos[0]['id']
|
||||
self.mgr.readCurrentRepos()
|
||||
self.assertEqual(set(self.mgr.repos), set([r['id'] for r in repos]))
|
||||
self.assertEqual(self.mgr.repos[repo_id].state, 2)
|
||||
self.assertEqual(self.mgr.repos[repo_id].data['state'], 2)
|
||||
|
||||
# pass 4 - repo disappears from hub
|
||||
repos.pop(0)
|
||||
self.mgr.readCurrentRepos()
|
||||
self.assertEqual(set(self.mgr.repos), set([r['id'] for r in repos]))
|
||||
|
||||
# using autospec so we can grab self from mock_calls
|
||||
@mock.patch.object(kojira.ManagedRepo, 'delete_check', autospec=True)
|
||||
def test_update_repos(self, delete_check):
|
||||
self.options.init_timeout = 3600
|
||||
self.options.repo_lifetime = 3600 * 24
|
||||
self.options.dist_repo_lifetime = 3600 * 24
|
||||
|
||||
base_ts = 444888888
|
||||
|
||||
# fake repo data
|
||||
data = {'tag_id': 999, 'state': koji.REPO_READY, 'tag_name': 'TAG', 'dist': False,
|
||||
'create_event': 100, 'end_event': 200, 'opts': {}, 'custom_opts': {},
|
||||
'state_ts': base_ts, 'creation_ts': base_ts}
|
||||
repo_ids = range(1000, 1015)
|
||||
repos = [dict(id=n, **data) for n in repo_ids]
|
||||
# make one old enough to expire
|
||||
repos[0]['state_ts'] = base_ts - self.options.repo_lifetime
|
||||
# make one stale
|
||||
repos[1]['state'] = koji.REPO_INIT
|
||||
repos[1]['creation_ts'] = base_ts - self.options.init_timeout
|
||||
# make one expired
|
||||
repos[2]['state'] = koji.REPO_EXPIRED
|
||||
|
||||
# do the run
|
||||
self.session.repo.query.return_value = repos
|
||||
with mock.patch('time.time') as _time:
|
||||
_time.return_value = base_ts + 100 # shorter than all timeouts
|
||||
self.mgr.updateRepos()
|
||||
|
||||
# confirm the expiration
|
||||
repo_id = repos[0]['id']
|
||||
self.session.repoExpire.assert_called_once_with(repo_id)
|
||||
self.assertEqual(self.mgr.repos[repo_id].state, koji.REPO_EXPIRED)
|
||||
self.assertEqual(self.mgr.repos[repo_id].data['state'], koji.REPO_EXPIRED)
|
||||
|
||||
# confirm action on the stale repo
|
||||
repo_id = repos[1]['id']
|
||||
self.session.repoProblem.assert_called_once_with(repo_id)
|
||||
self.assertEqual(self.mgr.repos[repo_id].state, koji.REPO_PROBLEM)
|
||||
self.assertEqual(self.mgr.repos[repo_id].data['state'], koji.REPO_PROBLEM)
|
||||
|
||||
# only repo 2 should have been checked for deletion
|
||||
repo_id = repos[2]['id']
|
||||
delete_check.assert_called_once()
|
||||
mrepo = delete_check.mock_calls[0][1][0] # self arg
|
||||
self.assertEqual(mrepo.repo_id, repo_id)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
def test_check_external(self, get):
|
||||
# fake ext repo data
|
||||
repo1 = {'external_repo_id': 1, 'external_repo_name': 'myrepo',
|
||||
'url': 'https://localhost/NOSUCHPATH'}
|
||||
repo2 = {'external_repo_id': 2, 'external_repo_name': 'myotherrepo',
|
||||
'url': 'https://localhost/FAKEPATH/$arch'}
|
||||
self.session.getTagExternalRepos.return_value = [repo1, repo2]
|
||||
data1 = {}
|
||||
data2 = {}
|
||||
self.session.repo.getExternalRepoData.side_effect = [data1, data2]
|
||||
self.session.getAllArches.return_value = ['i386', 'x86_64', 'riscv']
|
||||
repomd_fn = os.path.dirname(__file__) + '/data/external-repomd.xml'
|
||||
with open(repomd_fn, 'rt') as fo:
|
||||
repomd = fo.read()
|
||||
get.return_value.text = repomd
|
||||
|
||||
self.mgr.checkExternalRepos()
|
||||
self.session.repo.setExternalRepoData.assert_has_calls([
|
||||
mock.call(1, {'max_ts': 1711390493}),
|
||||
mock.call(2, {'max_ts': 1711390493}),
|
||||
])
|
||||
|
||||
# the end
|
||||
|
|
|
|||
232
tests/test_lib/test_repowatcher.py
Normal file
232
tests/test_lib/test_repowatcher.py
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
import mock
|
||||
|
||||
import unittest
|
||||
|
||||
import koji.tasks
|
||||
from koji.util import RepoWatcher
|
||||
|
||||
|
||||
class TestRepoWatcher(unittest.TestCase):
|
||||
|
||||
TAG = {'id': 137, 'name': 'MY-TAG'}
|
||||
|
||||
def setUp(self):
|
||||
self.session = mock.MagicMock()
|
||||
self.checkForBuilds = mock.patch('koji.util.checkForBuilds').start()
|
||||
self.session.getTag.return_value = self.TAG
|
||||
self.sleep = mock.patch('time.sleep').start()
|
||||
|
||||
def tearDown(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
def test_getRepo_ready(self):
|
||||
repoinfo = {'id': 123, 'tag_id': self.TAG['id']}
|
||||
self.session.repo.request.return_value = {'repo': repoinfo}
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
result = watcher.getRepo()
|
||||
self.assertEqual(result, repoinfo)
|
||||
|
||||
def test_getRepo_request(self):
|
||||
self.session.repo.request.return_value = {'repo': None, 'request': {'id': 999}}
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
result = watcher.getRepo()
|
||||
self.assertEqual(result, None)
|
||||
|
||||
def test_getRepo_builds_missing(self):
|
||||
self.session.repo.request.return_value = {'repo': None, 'request': {'id': 999}}
|
||||
self.checkForBuilds.return_value = False
|
||||
watcher = RepoWatcher(self.session, 'TAG', nvrs=['package-1.2-34'])
|
||||
result = watcher.getRepo()
|
||||
self.assertEqual(result, None)
|
||||
self.checkForBuilds.assert_called_once()
|
||||
|
||||
def test_waitrepo_request_gives_repo(self):
|
||||
repoinfo = {'id': 123, 'tag_id': self.TAG['id']}
|
||||
self.session.repo.get.return_value = None
|
||||
self.session.repo.request.return_value = {'repo': repoinfo}
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
result = watcher.waitrepo()
|
||||
self.assertEqual(result, repoinfo)
|
||||
|
||||
def test_waitrepo_request_wait(self):
|
||||
repoinfo = {'id': 123, 'tag_id': self.TAG['id']}
|
||||
req = {'id': 999, 'min_event': 10001, 'task_id': 'TASK', 'task_state': 0, 'repo_id': None,
|
||||
'active': True, 'tries': 1}
|
||||
self.session.repo.get.return_value = None
|
||||
check = {'repo': None, 'request': req}
|
||||
req2 = req.copy()
|
||||
req2['task_state'] = 1
|
||||
check2 = {'repo': None, 'request': req2}
|
||||
self.session.repo.request.return_value = check
|
||||
done = {'repo': repoinfo, 'request': req}
|
||||
self.session.repo.checkRequest.side_effect = [check, check, check2, done]
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
result = watcher.waitrepo()
|
||||
self.assertEqual(result, repoinfo)
|
||||
|
||||
def test_waitrepo_anon_wait(self):
|
||||
repoinfo = {'id': 123, 'tag_id': self.TAG['id']}
|
||||
self.session.repo.get.side_effect = [None] * 5 + [repoinfo]
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
result = watcher.waitrepo(anon=True)
|
||||
self.assertEqual(result, repoinfo)
|
||||
self.session.repo.request.assert_not_called()
|
||||
|
||||
def test_waitrepo_request_timeout(self):
|
||||
req = {'id': 999, 'min_event': 10001, 'task_id': 'TASK', 'task_state': 0, 'repo_id': None,
|
||||
'active': True, 'tries': 1}
|
||||
self.session.repo.get.return_value = None
|
||||
check = {'repo': None, 'request': req}
|
||||
self.session.repo.request.return_value = check
|
||||
self.session.repo.checkRequest.side_effect = [check] * 20
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
watcher.check_timeout = mock.MagicMock()
|
||||
watcher.check_timeout.side_effect = [False] * 10 + [True]
|
||||
with self.assertRaises(koji.GenericError) as err:
|
||||
watcher.waitrepo()
|
||||
|
||||
def test_taskargs(self):
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
args = watcher.task_args()
|
||||
params = koji.tasks.parse_task_params('waitrepo', args)
|
||||
|
||||
def test_waitrepo_build_wait(self):
|
||||
self.session.repo.get.return_value = None
|
||||
# we'll pass with nvrs, so we should wait for builds before making request
|
||||
nvrs = ['package-1.2-34']
|
||||
builds = [{'name': 'package', 'version': '1.2', 'release': '34', 'epoch': ''}]
|
||||
self.session.tagLastChangeEvent.return_value = 10000
|
||||
|
||||
def got_builds():
|
||||
# called when we start reporting the builds in the tag
|
||||
self.session.repo.request.assert_not_called()
|
||||
self.session.tagLastChangeEvent.return_value = 10002
|
||||
return True
|
||||
|
||||
self.checkForBuilds.side_effect = [False, False, False, got_builds, True]
|
||||
# once we report the build, checkForBuilds should be called just once more to verify the repo
|
||||
|
||||
req = {'id': 999, 'min_event': 10000, 'task_id': 'TASK', 'task_state': 0, 'repo_id': None,
|
||||
'active': True, 'tries': 1}
|
||||
check = {'repo': None, 'request': req}
|
||||
self.session.repo.request.return_value = check
|
||||
|
||||
repoinfo = {'id': 123, 'tag_id': self.TAG['id'], 'create_event': 10002}
|
||||
done = {'repo': repoinfo, 'request': req}
|
||||
self.session.repo.checkRequest.side_effect = [check, check, check, done]
|
||||
|
||||
watcher = RepoWatcher(self.session, 'TAG', nvrs=nvrs)
|
||||
result = watcher.waitrepo()
|
||||
|
||||
self.assertEqual(result, repoinfo)
|
||||
# checkForBuilds is called several times, the event arg can vary, but the others should not
|
||||
for call in self.checkForBuilds.mock_calls:
|
||||
# name, args, kwargs
|
||||
# session, tag, builds, event, latest
|
||||
self.assertEqual(call[1][0], self.session)
|
||||
self.assertEqual(call[1][1], self.TAG['id'])
|
||||
self.assertEqual(call[1][2], builds)
|
||||
|
||||
def test_waitrepo_build_timeout(self):
|
||||
self.session.repo.get.return_value = None
|
||||
nvrs = ['package-1.2-34']
|
||||
# just keep reporting that the build is not there
|
||||
self.checkForBuilds.side_effect = [False] * 20
|
||||
|
||||
watcher = RepoWatcher(self.session, 'TAG', nvrs=nvrs)
|
||||
watcher.check_timeout = mock.MagicMock()
|
||||
watcher.check_timeout.side_effect = [False] * 10 + [True]
|
||||
with self.assertRaises(koji.GenericError) as err:
|
||||
watcher.waitrepo()
|
||||
|
||||
# we should not have reached the request stage
|
||||
self.session.repo.request.assert_not_called()
|
||||
|
||||
def test_waitrepo_build_not_in_repo(self):
|
||||
self.session.repo.get.return_value = None
|
||||
nvrs = ['package-1.2-34']
|
||||
self.session.tagLastChangeEvent.return_value = 10000
|
||||
|
||||
# replace checkForBuilds
|
||||
def my_check(session, tag, builds, event, latest=False):
|
||||
if event and event < 10002:
|
||||
# called from check_repo with repo event id
|
||||
return False
|
||||
return True
|
||||
|
||||
self.checkForBuilds.side_effect = my_check
|
||||
|
||||
req1 = {'id': 999, 'min_event': 10000, 'task_id': 'TASK', 'task_state': 0, 'repo_id': None}
|
||||
req2 = req1.copy()
|
||||
req2['min_event'] = 10002
|
||||
repo1 = {'id': 123, 'tag_id': self.TAG['id'], 'create_event': 10000}
|
||||
repo2 = {'id': 123, 'tag_id': self.TAG['id'], 'create_event': 10002}
|
||||
check1 = {'repo': None, 'request': req1}
|
||||
check1b = {'repo': repo1, 'request': req1}
|
||||
check2 = {'repo': None, 'request': req2}
|
||||
check2b = {'repo': repo2, 'request': req2}
|
||||
|
||||
# request should be made twice
|
||||
self.session.repo.request.side_effect = [check1, check2]
|
||||
|
||||
# and each checked once
|
||||
self.session.repo.checkRequest.side_effect = [check1b, check2b]
|
||||
|
||||
watcher = RepoWatcher(self.session, 'TAG', nvrs=nvrs)
|
||||
result = watcher.waitrepo()
|
||||
|
||||
self.assertEqual(result, repo2)
|
||||
|
||||
def test_check_repo(self):
|
||||
watcher = RepoWatcher(self.session, 'TAG')
|
||||
repo = {'tag_id': self.TAG['id'], 'create_event': 10000, 'opts': {'src': True}}
|
||||
self.checkForBuilds.return_value = True
|
||||
|
||||
# wrong tag
|
||||
_repo = repo.copy()
|
||||
_repo['tag_id'] += 1
|
||||
result = watcher.check_repo(_repo)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
# wrong at_event
|
||||
watcher = RepoWatcher(self.session, 'TAG', at_event=5000)
|
||||
result = watcher.check_repo(repo)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
# wrong min_event
|
||||
watcher = RepoWatcher(self.session, 'TAG', min_event=20000)
|
||||
result = watcher.check_repo(repo)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
# wrong opts
|
||||
watcher = RepoWatcher(self.session, 'TAG', opts={'src': False})
|
||||
result = watcher.check_repo(repo)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
# wrong builds
|
||||
nvrs = ['package-1.2-34']
|
||||
self.checkForBuilds.return_value = False
|
||||
watcher = RepoWatcher(self.session, 'TAG', nvrs=nvrs)
|
||||
result = watcher.check_repo(repo)
|
||||
self.assertEqual(result, False)
|
||||
|
||||
# good
|
||||
self.checkForBuilds.return_value = True
|
||||
watcher = RepoWatcher(self.session, 'TAG', nvrs=nvrs, at_event=10000, opts={'src': True})
|
||||
result = watcher.check_repo(repo)
|
||||
self.assertEqual(result, True)
|
||||
|
||||
def test_event_args(self):
|
||||
# both min and at
|
||||
with self.assertRaises(koji.ParameterError):
|
||||
watcher = RepoWatcher(self.session, 'TAG', min_event=100, at_event=99)
|
||||
|
||||
self.session.tagLastChangeEvent.return_value = 101010
|
||||
watcher = RepoWatcher(self.session, 'TAG', min_event='last')
|
||||
self.assertEqual(watcher.min_event, 101010)
|
||||
self.session.tagLastChangeEvent.assert_called_once()
|
||||
|
||||
|
||||
|
||||
|
||||
# the end
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
from __future__ import absolute_import
|
||||
import mock
|
||||
import random
|
||||
import shutil
|
||||
import six
|
||||
|
|
@ -575,10 +576,9 @@ class TasksTestCase(unittest.TestCase):
|
|||
('host test.domain.local (i386) does not support any arches '
|
||||
'of tag some_package-1.2-build (aarch64, x86_64)'))
|
||||
|
||||
def test_getRepo_tied_to_session(self):
|
||||
""" Tests that the getRepo function calls session.getRepo(), and returns the result
|
||||
when successful.
|
||||
"""
|
||||
@patch('koji.util.RepoWatcher')
|
||||
def test_getRepo_no_wait_task(self, RepoWatcher):
|
||||
""" Tests that the getRepo method does not wait if repo is available"""
|
||||
temp_path = get_tmp_dir_path('TaskTest')
|
||||
makedirs(temp_path)
|
||||
|
||||
|
|
@ -590,89 +590,75 @@ class TasksTestCase(unittest.TestCase):
|
|||
'state': 1
|
||||
}
|
||||
|
||||
obj = TaskTest(123, 'some_method', ['random_arg'], None, None, temp_path)
|
||||
obj.session = Mock()
|
||||
obj.session.getRepo.return_value = repo_dict
|
||||
handler = TaskTest(123, 'some_method', ['random_arg'], None, None, temp_path)
|
||||
handler.session = mock.MagicMock()
|
||||
handler.wait = mock.MagicMock()
|
||||
watcher = mock.MagicMock()
|
||||
watcher.getRepo.return_value = repo_dict
|
||||
RepoWatcher.return_value = watcher
|
||||
|
||||
self.assertEqual(obj.getRepo(8472), repo_dict)
|
||||
result = handler.getRepo(8472)
|
||||
|
||||
@patch('{0}.TaskTest.wait'.format(__name__))
|
||||
def test_getRepo_not_tied_to_session(self, mock_wait):
|
||||
""" Tests that the getRepo function waits until the results are available
|
||||
for session.getRepo, when it is not available at the start of the function call.
|
||||
"""
|
||||
handler.session.host.subtask.assert_not_called()
|
||||
handler.wait.assert_not_called()
|
||||
self.assertEqual(result, repo_dict)
|
||||
|
||||
@patch('koji.util.RepoWatcher')
|
||||
def test_getRepo_last_event(self, RepoWatcher):
|
||||
""" Tests that the getRepo method uses min_event='last' when requested"""
|
||||
temp_path = get_tmp_dir_path('TaskTest')
|
||||
makedirs(temp_path)
|
||||
|
||||
repo_dict = {
|
||||
'create_event': 13413120,
|
||||
'create_ts': 1466140834.9119599,
|
||||
'creation_time': '2016-06-17 05:20:34.911962',
|
||||
'id': 1592850,
|
||||
'create_event': 13635166,
|
||||
'create_ts': 1469039671.5743899,
|
||||
'creation_time': '2016-07-20 18:34:31.574386',
|
||||
'id': 1630631,
|
||||
'state': 1
|
||||
}
|
||||
|
||||
obj = TaskTest(123, 'some_method', ['random_arg'], None, None, temp_path)
|
||||
obj.session = Mock()
|
||||
obj.session.getRepo.return_value = None
|
||||
obj.session.getTag.return_value = {
|
||||
'arches': 'i386 ia64 x86_64 ppc s390 s390x ppc64',
|
||||
'extra': {},
|
||||
'id': 851,
|
||||
'locked': True,
|
||||
'maven_include_all': False,
|
||||
'maven_support': False,
|
||||
'name': 'dist-3.0E-build',
|
||||
'perm': None,
|
||||
'perm_id': None
|
||||
}
|
||||
obj.session.getBuildTargets.return_value = [{
|
||||
'build_tag': 3093,
|
||||
'build_tag_name': 'dist-6E-dsrv-9-build',
|
||||
'dest_tag': 3092,
|
||||
'dest_tag_name': 'dist-6E-dsrv-9-qu-candidate',
|
||||
'id': 851,
|
||||
'name': 'dist-6E-dsrv-9-qu-candidate'
|
||||
}
|
||||
]
|
||||
handler = TaskTest(123, 'some_method', ['random_arg'], None, None, temp_path)
|
||||
handler.session = mock.MagicMock()
|
||||
handler.wait = mock.MagicMock()
|
||||
watcher = mock.MagicMock()
|
||||
watcher.getRepo.return_value = repo_dict
|
||||
RepoWatcher.return_value = watcher
|
||||
|
||||
obj.session.host.subtask.return_value = 123
|
||||
mock_wait.return_value = {123: repo_dict}
|
||||
result = handler.getRepo(8472, wait=True)
|
||||
|
||||
self.assertEqual(obj.getRepo(851), repo_dict)
|
||||
obj.session.getRepo.assert_called_once_with(851)
|
||||
obj.session.getTag.assert_called_once_with(851, strict=True)
|
||||
RepoWatcher.assert_called_once_with(handler.session, 8472, nvrs=None, min_event='last', logger=handler.logger)
|
||||
handler.session.host.subtask.assert_not_called()
|
||||
handler.wait.assert_not_called()
|
||||
self.assertEqual(result, repo_dict)
|
||||
|
||||
@patch('{0}.TaskTest.wait'.format(__name__))
|
||||
def test_getRepo_not_tied_to_session_no_build_targets(self, mock_wait):
|
||||
""" Tests that the getRepo function raises an exception
|
||||
when session.getBuildTargets returns an empty list
|
||||
"""
|
||||
@patch('koji.util.RepoWatcher')
|
||||
def test_getRepo_wait_task(self, RepoWatcher):
|
||||
""" Tests that the getRepo function waits for subtask if repo not immediately available"""
|
||||
temp_path = get_tmp_dir_path('TaskTest')
|
||||
makedirs(temp_path)
|
||||
|
||||
obj = TaskTest(123, 'some_method', ['random_arg'], None, None, temp_path)
|
||||
obj.session = Mock()
|
||||
obj.session.getRepo.return_value = None
|
||||
obj.session.getTag.return_value = {
|
||||
'arches': 'i686 x86_64 ppc ppc64 ppc64le s390 s390x aarch64',
|
||||
'extra': {},
|
||||
'id': 8472,
|
||||
'locked': False,
|
||||
'maven_include_all': False,
|
||||
'maven_support': False,
|
||||
'name': 'rhel-7.3-build',
|
||||
'perm': 'admin',
|
||||
'perm_id': 1
|
||||
repo_dict = {
|
||||
'create_event': 13635166,
|
||||
'create_ts': 1469039671.5743899,
|
||||
'creation_time': '2016-07-20 18:34:31.574386',
|
||||
'id': 1630631,
|
||||
'state': 1
|
||||
}
|
||||
obj.session.getBuildTargets.return_value = []
|
||||
|
||||
try:
|
||||
obj.getRepo(8472)
|
||||
raise Exception('The BuildError Exception was not raised')
|
||||
except koji.BuildError as e:
|
||||
obj.session.getRepo.assert_called_once_with(8472)
|
||||
self.assertEqual(e.args[0], 'no repo (and no target) for tag rhel-7.3-build')
|
||||
handler = TaskTest(123, 'some_method', ['random_arg'], None, None, temp_path)
|
||||
handler.session = mock.MagicMock()
|
||||
handler.session.host.subtask.return_value = 'TASKID'
|
||||
handler.wait = mock.MagicMock()
|
||||
handler.wait.return_value = {'TASKID': repo_dict}
|
||||
watcher = mock.MagicMock()
|
||||
watcher.getRepo.return_value = None
|
||||
RepoWatcher.return_value = watcher
|
||||
|
||||
result = handler.getRepo(8472)
|
||||
|
||||
handler.session.host.subtask.assert_called_once()
|
||||
handler.wait.assert_called_once_with('TASKID')
|
||||
self.assertEqual(result, repo_dict)
|
||||
|
||||
def test_FakeTask_handler(self):
|
||||
""" Tests that the FakeTest handler can be instantiated and returns 42 when run.
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ class TestCreateSideTagHub(unittest.TestCase):
|
|||
self._create_tag.return_value = 12346
|
||||
|
||||
ret = sidetag_hub.createSideTag('base_tag')
|
||||
self.assertEqual(ret, {'name': sidetag_name, 'id': 12346, 'task_id': None})
|
||||
self.assertEqual(ret, {'name': sidetag_name, 'id': 12346, 'task_id': None, 'request': None})
|
||||
|
||||
self.get_user.assert_called_once_with(23, strict=True)
|
||||
self.get_tag.assert_called_once_with(self.basetag['name'], strict=True)
|
||||
|
|
@ -83,7 +83,7 @@ class TestCreateSideTagHub(unittest.TestCase):
|
|||
sidetag_hub.NAME_TEMPLATE = '{basetag}-sidetag-{tag_id}'
|
||||
|
||||
ret = sidetag_hub.createSideTag('base_tag', debuginfo=True, suffix='suffix')
|
||||
self.assertEqual(ret, {'name': sidetag_name, 'id': 12346, 'task_id': None})
|
||||
self.assertEqual(ret, {'name': sidetag_name, 'id': 12346, 'task_id': None, 'request': None})
|
||||
|
||||
def test_createsidetag_template_forbidden_suffix(self):
|
||||
sidetag_hub.ALLOWED_SUFFIXES = ['suffix', 'another']
|
||||
|
|
|
|||
|
|
@ -1043,28 +1043,31 @@ class BuildTracker(object):
|
|||
results = session.multiCall(strict=True)
|
||||
event_id = results[-1][0]['id']
|
||||
# TODO - verify / check results ?
|
||||
task_id = session.newRepo(our_tag, event=event_id)
|
||||
# TODO - upload src
|
||||
# [?] use remote SCM url (if avail)?
|
||||
src = build.getSource()
|
||||
if not src:
|
||||
log("Couldn't get source for %s" % build.nvr)
|
||||
return None
|
||||
# wait for repo task
|
||||
log("Waiting on newRepo task %i" % task_id)
|
||||
# request our repo
|
||||
check = session.repo.request(our_tag, at_event=event_id)
|
||||
log("Waiting on repo")
|
||||
while True:
|
||||
tinfo = session.getTaskInfo(task_id)
|
||||
tstate = koji.TASK_STATES[tinfo['state']]
|
||||
if tstate == 'CLOSED':
|
||||
# TODO use Repowatcher instead
|
||||
repo = check.get('repo')
|
||||
if repo:
|
||||
# done waiting
|
||||
break
|
||||
elif tstate in ('CANCELED', 'FAILED'):
|
||||
req = check['request']
|
||||
if not req or not req['active']:
|
||||
log("Error: failed to generate repo")
|
||||
return None
|
||||
time.sleep(1)
|
||||
check = session.repo.checkRequest(req['id'])
|
||||
# add a timeout?
|
||||
# TODO ...and verify repo
|
||||
repo_id, event_id = session.getTaskResult(task_id)
|
||||
|
||||
# kick off build
|
||||
task_id = session.build(src, None, opts={'repo_id': repo_id}, priority=options.priority)
|
||||
task_id = session.build(src, None, opts={'repo_id': repo['id']}, priority=options.priority)
|
||||
return task_id
|
||||
|
||||
def report(self):
|
||||
|
|
|
|||
935
util/kojira
935
util/kojira
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue