Merge commit 'origin/master' into mead-tmp

Conflicts:
	builder/kojid

Resolved conflict between make_sources option and source_cmd option
This commit is contained in:
Mike McLean 2010-06-18 17:28:37 -04:00
commit 1e3a7f0fef
14 changed files with 312 additions and 147 deletions

View file

@ -545,17 +545,15 @@ class BuildRoot(object):
msg = '; see %s for more information' % logfile
return _parseStatus(rv, 'mock') + msg
def build_srpm(self, specfile, sourcedir, make_sources=True):
def build_srpm(self, specfile, sourcedir, source_cmd, make_sources=True):
session.host.setBuildRootState(self.id,'BUILDING')
if make_sources:
chroot_sourcedir = sourcedir[len(self.rootdir()):]
# call "make sources" in the chroot so any required files not stored in
if source_cmd and make_sources:
# call the command defined by source_cmd in the chroot so any required files not stored in
# the SCM can be retrieved
args = ['--no-clean', '--unpriv', '--cwd', chroot_sourcedir, '--chroot', 'make', 'sources']
chroot_sourcedir = sourcedir[len(self.rootdir()):]
args = ['--no-clean', '--unpriv', '--cwd', chroot_sourcedir, '--chroot']
args.extend(source_cmd)
rv = self.mock(args)
if rv:
self.expire()
raise koji.BuildError, "error retrieving sources, %s" % self._mockResult(rv)
@ -1811,22 +1809,22 @@ class ChainBuildTask(BaseTaskHandler):
if not target_info:
raise koji.GenericError, 'unknown build target: %s' % target
nvrs = []
for n_src, build_level in enumerate(srcs):
for n_level, build_level in enumerate(srcs):
#if there are any nvrs to wait on, do so
if nvrs:
task_id = session.host.subtask(method='waitrepo',
arglist=[target_info['build_tag_name'], None, nvrs],
label="wait %i" % n_src,
label="wait %i" % n_level,
parent=self.id)
self.wait(task_id, all=True, failany=True)
nvrs = []
#kick off the builds for this level
build_tasks = []
for src in build_level:
for n_src, src in enumerate(build_level):
if SCM.is_scm_url(src):
task_id = session.host.subtask(method='build',
arglist=[src, target, opts],
label="build %i" % n_src,
label="build %i,%i" % (n_level, n_src),
parent=self.id)
build_tasks.append(task_id)
else:
@ -2898,9 +2896,9 @@ class ImageTask(BaseTaskHandler):
try:
ks.readKickstart(kspath)
except IOError, (err, msg):
except IOError, e:
raise koji.LiveCDError("Failed to read kickstart file "
"'%s' : %s" % (kspath, msg))
"'%s' : %s" % (kspath, e))
except kserrors.KickstartError, e:
raise koji.LiveCDError("Failed to parse kickstart file "
"'%s' : %s" % (kspath, e))
@ -3337,7 +3335,7 @@ class BuildSRPMFromSCMTask(BaseTaskHandler):
#build srpm
self.logger.debug("Running srpm build")
broot.build_srpm(spec_file, sourcedir)
broot.build_srpm(spec_file, sourcedir, scm.source_cmd)
srpms = glob.glob('%s/*.src.rpm' % broot.resultdir())
if len(srpms) == 0:
@ -3907,6 +3905,7 @@ class SCM(object):
- module
- revision
- use_common (defaults to True, may be set by assert_allowed())
- source_cmd (defaults to ['make', 'sources'], may be set by assert_allowed())
- scmtype
The exact format of each attribute is SCM-specific, but the structure of the url
@ -3927,6 +3926,7 @@ class SCM(object):
self.module = query
self.revision = fragment
self.use_common = True
self.source_cmd = ['make', 'sources']
for scmtype, schemes in SCM.types.items():
if self.scheme in schemes:
@ -3983,22 +3983,33 @@ class SCM(object):
Verify that the host and repository of this SCM is in the provided list of
allowed repositories.
allowed is a space-separated list of host:repository[:use_common] tuples. Incorrectly-formatted
allowed is a space-separated list of host:repository[:use_common[:source_cmd]] tuples. Incorrectly-formatted
tuples will be ignored.
If use_common is not present, kojid will attempt to checkout a common/ directory from the
repository. If use_common is set to no, off, false, or 0, it will not attempt to checkout a common/
directory.
source_cmd is a shell command (args separated with commas instead of spaces) to run before building the srpm.
It is generally used to retrieve source files from a remote location. If no source_cmd is specified,
"make sources" is run by default.
"""
for allowed_scm in allowed.split():
scm_tuple = allowed_scm.split(':')
if len(scm_tuple) in (2, 3):
if len(scm_tuple) >= 2:
if fnmatch(self.host, scm_tuple[0]) and fnmatch(self.repository, scm_tuple[1]):
# SCM host:repository is in the allowed list
# check if we specify a value for use_common
if len(scm_tuple) == 3:
if len(scm_tuple) >= 3:
if scm_tuple[2].lower() in ('no', 'off', 'false', '0'):
self.use_common = False
# check if we specify a custom source_cmd
if len(scm_tuple) >= 4:
if scm_tuple[3]:
self.source_cmd = scm_tuple[3].split(',')
else:
# there was nothing after the trailing :, so they don't want to run a source_cmd at all
self.source_cmd = None
break
else:
self.logger.warn('Ignoring incorrectly formatted SCM host:repository: %s' % allowed_scm)

View file

@ -142,11 +142,11 @@ class RepoMerge(object):
"""
For each package object, check if the srpm name has ever been seen before.
If is has not, keep the package. If it has, check if the srpm name was first seen
in the same repo as the current package. If so, keep the package (it's probably a subpackage
of the same build). If not, delete the package from the package sack.
Note that this does allow an external repo to contain multiple versions of the same package,
and they will all end up in the repo, but the yum client will ensure that only the latest is
installed.
in the same repo as the current package. If so, keep the package from the srpm with the
highest NVR. If not, keep the packages from the first srpm we found, and delete packages from
all other srpms.
Packages with matching NVRs in multiple repos will be taken from the first repo.
If the srpm name appears in the blocked package list, any packages generated from the srpm
will be deleted from the package sack as well.
@ -161,31 +161,49 @@ class RepoMerge(object):
repos = self.yumbase.repos.listEnabled()
repos.sort(key=lambda o: o._merge_rank)
seen_srpms = {}
include_srpms = {}
# calculating what "builds" (srpms) we're allowing into the repo
for repo in repos:
for pkg in repo.sack:
srpm_name, ver, rel, epoch, arch = rpmUtils.miscutils.splitFilename(pkg.sourcerpm)
if include_srpms.has_key(srpm_name):
other_srpm, other_repoid = include_srpms[srpm_name]
if pkg.repoid != other_repoid:
# We found a rpm built from an srpm with the same name in a previous repo.
# The previous repo takes precendence, so ignore the srpm found here.
continue
else:
# We're in the same repo, so compare srpm NVRs
other_srpm_name, other_ver, other_rel, other_epoch, other_arch = \
rpmUtils.miscutils.splitFilename(other_srpm)
cmp = rpmUtils.miscutils.compareEVR((epoch, ver, rel),
(other_epoch, other_ver, other_rel))
if cmp > 0:
# The current package we're processing is from a newer srpm than the
# existing srpm in the dict, so update the dict
include_srpms[srpm_name] = (pkg.sourcerpm, pkg.repoid)
elif self.blocked.has_key(srpm_name):
continue
else:
include_srpms[srpm_name] = (pkg.sourcerpm, pkg.repoid)
pkgorigins = os.path.join(self.yumbase.conf.cachedir, 'pkgorigins')
origins = file(pkgorigins, 'w')
seen_rpms = {}
for repo in repos:
for pkg in repo.sack:
srpm_name, ver, rel, epoch, arch = rpmUtils.miscutils.splitFilename(pkg.sourcerpm)
if seen_srpms.has_key(srpm_name):
# We've seen a package created from this srpm before.
# Check if that package was also from this repo, in
# which case it's likely just another subpackage from
# the same build.
if seen_srpms[srpm_name] != pkg.repoid:
# Package has been superceded by a package
# from a higher priority repo
repo.sack.delPackage(pkg)
continue
elif self.blocked.has_key(srpm_name):
print >> sys.stderr, 'Removing blocked package: %s' % pkg
incl_srpm, incl_repoid = include_srpms.get(srpm_name, (None, None))
pkg_nvra = str(pkg)
if incl_srpm == pkg.sourcerpm and not seen_rpms.has_key(pkg_nvra):
origins.write('%s\t%s\n' % (pkg_nvra, repo.urls[0]))
seen_rpms[pkg_nvra] = 1
else:
# Either the srpm is in the block list, it is not built from the srpm we
# identified above, or it's a duplicate, so exclude it
repo.sack.delPackage(pkg)
continue
seen_srpms[srpm_name] = pkg.repoid
origins.write('%s\t%s\n' % (pkg, repo.urls[0]))
origins.close()
self.mdconf.additional_metadata['origin'] = pkgorigins

View file

@ -26,6 +26,11 @@ try:
import krbV
except ImportError:
pass
try:
import ast
except ImportError:
ast = None
pass
import ConfigParser
import base64
import koji
@ -1003,21 +1008,31 @@ def handle_call(options, session, args):
usage = _("usage: %prog call [options] name [arg...]")
usage += _("\n(Specify the --help global option for a list of other help options)")
parser = OptionParser(usage=usage)
parser.add_option("--python", action="store_true", help=_("Use python syntax for values"))
parser.add_option("--kwargs", help=_("Specify keyword arguments as a dictionary (implies --python)"))
(options, args) = parser.parse_args(args)
if len(args) < 1:
parser.error(_("Please specify the name of the XML-RPC method"))
assert False
if options.kwargs:
options.python = True
if options.python and ast is None:
parser.error(_("The ast module is required to read python syntax"))
activate_session(session)
name = args[0]
non_kw = []
kw = {}
for arg in args[1:]:
if arg.find('=') != -1:
key, value = arg.split('=', 1)
kw[key] = arg_filter(value)
else:
non_kw.append(arg_filter(arg))
if options.python:
non_kw = [ast.literal_eval(a) for a in args[1:]]
if options.kwargs:
kw = ast.literal_eval(options.kwargs)
else:
for arg in args[1:]:
if arg.find('=') != -1:
key, value = arg.split('=', 1)
kw[key] = arg_filter(value)
else:
non_kw.append(arg_filter(arg))
pprint.pprint(getattr(session, name).__call__(*non_kw, **kw))
def anon_handle_mock_config(options, session, args):
@ -1810,7 +1825,7 @@ def handle_disable_user(options, session, args):
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args(args)
if len(args) < 1:
parser.error(_("You must specify the username of the user to enable"))
parser.error(_("You must specify the username of the user to disable"))
elif len(args) > 1:
parser.error(_("This command only accepts one argument (username)"))
username = args[0]
@ -4145,8 +4160,10 @@ def handle_spin_livecd(options, session, args):
usage += _("\n(Specify the --help global option for a list of other " +
"help options)")
parser = OptionParser(usage=usage)
parser.add_option("--nowait", action="store_true",
help=_("Don't wait on livecd creation"))
parser.add_option("--wait", action="store_true",
help=_("Wait on the livecd creation, even if running in the background"))
parser.add_option("--nowait", action="store_false", dest="wait",
help=_("Don't wait on livecd creation"))
parser.add_option("--noprogress", action="store_true",
help=_("Do not display progress of the upload"))
parser.add_option("--background", action="store_true",
@ -4184,8 +4201,10 @@ def handle_spin_appliance(options, session, args):
usage += _("\n(Specify the --help global option for a list of other " +
"help options)")
parser = OptionParser(usage=usage)
parser.add_option("--nowait", action="store_true",
help=_("Do not wait on appliance creation."))
parser.add_option("--wait", action="store_true",
help=_("Wait on the appliance creation, even if running in the background"))
parser.add_option("--nowait", action="store_false", dest="wait",
help=_("Don't wait on appliance creation"))
parser.add_option("--noprogress", action="store_true",
help=_("Do not display progress of the upload."))
parser.add_option("--background", action="store_true",
@ -4271,13 +4290,17 @@ def _build_image(options, task_opts, session, args, img_type):
ksfile = os.path.join(serverdir, os.path.basename(ksfile))
print
# don't send the 'wait' option to the hub
wait = task_opts.wait
del task_opts.wait
# finally, create the task.
task_id = session.buildImage(arch, target, ksfile, img_type,
opts=task_opts, priority=priority)
print "Created task:", task_id
print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
if _running_in_bg() or task_opts.nowait:
if not wait and (_running_in_bg() or wait is False):
return
else:
session.logout()
@ -4354,10 +4377,14 @@ def handle_set_task_priority(options, session, args):
if options.priority is None:
parser.error(_("You must specify --priority"))
assert False
try:
tasks = [int(a) for a in args]
except ValueError:
parser.error(_("Task numbers must be integers"))
activate_session(session)
for task_id in args:
for task_id in tasks:
session.setTaskPriority(task_id, options.priority, options.recurse)
def handle_list_tasks(options, session, args):
@ -4553,7 +4580,7 @@ def handle_make_task(options, session, args):
taskopts[key] = value
task_id = session.makeTask(method=args[0],
arglist=map(arg_filter,args[1:]),
opts=taskopts)
**taskopts)
print "Created task id %d" % task_id
if _running_in_bg() or not options.watch:
return
@ -4780,7 +4807,7 @@ def anon_handle_download_build(options, session, args):
pg = progress.TextMeter()
for rpm in rpms:
if rpm['name'].endswith('-debuginfo') and not suboptions.debuginfo:
if not suboptions.debuginfo and koji.is_debuginfo(rpm['name']):
continue
if suboptions.key:

View file

@ -1,5 +1,11 @@
[hub]
## ConfigParser style config file, similar to ini files
## http://docs.python.org/library/configparser.html
##
## Note that multiline values can be set by indenting subsequent lines
## (which means you should not indent regular lines)
## Basic options ##
DBName = koji
DBUser = koji

View file

@ -175,9 +175,11 @@ class Task(object):
# - assigned to host_id
# - force option is enabled
state = koji.TASK_STATES[newstate]
q = """UPDATE task SET state=%(state)s,host_id=%(host_id)s
WHERE id=%(task_id)s"""
_dml(q,locals())
update = UpdateProcessor('task', clauses=['id=%(task_id)i'], values=locals())
update.set(state=state, host_id=host_id)
if state == koji.TASK_STATES['OPEN']:
update.rawset(start_time='NOW()')
update.execute()
self.runCallbacks('postTaskStateChange', info, 'state', koji.TASK_STATES[newstate])
self.runCallbacks('postTaskStateChange', info, 'host_id', host_id)
return True
@ -193,10 +195,6 @@ class Task(object):
returns task data if successful, None otherwise"""
if self.lock(host_id,'OPEN'):
#set task start time
update = UpdateProcessor('task', clauses=['id=%(id)i'], values=vars(self))
update.rawset(start_time='NOW()')
update.execute()
# get more complete data to return
fields = self.fields + (('task.request', 'request'),)
query = QueryProcessor(tables=['task'], clauses=['id=%(id)i'], values=vars(self),
@ -482,23 +480,64 @@ def make_task(method,arglist,**opts):
raise koji.GenericError, "Parent task (id %(parent)s) is not open" % opts
#default to a higher priority than parent
opts.setdefault('priority', pdata['priority'] - 1)
for f in ('owner','channel_id','arch'):
for f in ('owner', 'arch'):
opts.setdefault(f,pdata[f])
opts.setdefault('label',None)
else:
opts.setdefault('priority',koji.PRIO_DEFAULT)
#calling function should enforce priority limitations, if applicable
opts.setdefault('arch','noarch')
opts.setdefault('channel','default')
#no labels for top-level tasks
#calling function should enforce channel limitations, if applicable
opts['channel_id'] = get_channel_id(opts['channel'],strict=True)
if not context.session.logged_in:
raise koji.GenericError, 'task must have an owner'
else:
opts['owner'] = context.session.user_id
opts['label'] = None
opts['parent'] = None
#determine channel from policy
policy_data = {}
policy_data['method'] = method
for key in 'arch', 'parent', 'label', 'owner':
policy_data[key] = opts[key]
policy_data['user_id'] = opts['owner']
if 'channel' in opts:
policy_data['req_channel'] = opts['channel']
req_channel_id = get_channel_id(opts['channel'], strict=True)
if method == 'build':
# arglist = source, target, [opts]
args = koji.decode_args2(arglist, ('source', 'target', 'opts'))
policy_data['source'] = args['source']
target = get_build_target(args['target'], strict=True)
policy_data['target'] = target['name']
t_opts = args.get('opts', {})
policy_data['scratch'] = t_opts.get('scratch', False)
ruleset = context.policy.get('channel')
result = ruleset.apply(policy_data)
if result is None:
logger.warning('Channel policy returned no result, using default')
opts['channel_id'] = get_channel_id('default', strict=True)
else:
try:
parts = result.split()
if parts[0] == "use":
opts['channel_id'] = get_channel_id(parts[1], strict=True)
elif parts[0] == "parent":
if not opts.get('parent'):
logger.error("Invalid channel policy result (no parent task): %s",
ruleset.last_rule())
raise koji.GenericError, "invalid channel policy"
opts['channel_id'] = pdata['channel_id']
elif parts[0] == "req":
if 'channel' not in opts:
logger.error('Invalid channel policy result (no channel requested): %s',
ruleset.last_rule())
raise koji.GenericError, "invalid channel policy"
opts['channel_id'] = req_channel_id
else:
logger.error("Invalid result from channel policy: %s", ruleset.last_rule())
raise koji.GenericError, "invalid channel policy"
except IndexError:
logger.error("Invalid result from channel policy: %s", ruleset.last_rule())
raise koji.GenericError, "invalid channel policy"
#XXX - temporary workaround
if method in ('buildArch', 'buildSRPMFromSCM') and opts['arch'] == 'noarch':
#not all arches can generate a proper buildroot for all tags
@ -1337,10 +1376,10 @@ def _tag_build(tag,build,user_id=None,force=False):
table = 'tag_listing'
clauses = ('tag_id=%(tag_id)i', 'build_id=%(build_id)i')
query = QueryProcessor(columns=['build_id'], tables=[table],
clauses=('active = TRUE')+clauses,
clauses=('active = TRUE',)+clauses,
values=locals(), opts={'rowlock':True})
#note: tag_listing is unique on (build_id, tag_id, active)
if query.fetchSingle():
if query.executeOne():
#already tagged
if not force:
raise koji.TagError, "build %s already tagged (%s)" % (nvr,tag['name'])
@ -1407,7 +1446,7 @@ def grplist_add(taginfo,grpinfo,block=False,force=False,**opts):
cfg_fields = ('exported','display_name','is_default','uservisible',
'description','langonly','biarchonly',)
#prevent user-provided opts from doing anything strange
opts = dslice(opts, cfg_fields)
opts = dslice(opts, cfg_fields, strict=False)
if previous is not None:
#already there (possibly via inheritance)
if previous['blocked'] and not force:
@ -1431,11 +1470,11 @@ def grplist_add(taginfo,grpinfo,block=False,force=False,**opts):
opts.setdefault('uservisible',True)
# XXX ^^^
opts['tag_id'] = tag['id']
opts['grp_id'] = group['id']
opts['group_id'] = group['id']
opts['blocked'] = block
#revoke old entry (if present)
update = UpdateProcessor('group_config', values=opts,
clauses=['group_id=%(grp_id)s', 'tag_id=%(tag_id)s'])
clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s'])
update.make_revoke()
update.execute()
#add new entry
@ -1479,7 +1518,7 @@ def grplist_unblock(taginfo,grpinfo):
table = 'group_config'
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s')
query = QueryProcessor(columns=['blocked'], tables=[table],
clauses=('active = TRUE')+clauses,
clauses=('active = TRUE',)+clauses,
values=locals(), opts={'rowlock':True})
blocked = query.singleValue(strict=False)
if not blocked:
@ -1513,7 +1552,7 @@ def grp_pkg_add(taginfo,grpinfo,pkg_name,block=False,force=False,**opts):
previous = grp_cfg['packagelist'].get(pkg_name,None)
cfg_fields = ('type','basearchonly','requires')
#prevent user-provided opts from doing anything strange
opts = dslice(opts, cfg_fields)
opts = dslice(opts, cfg_fields, strict=False)
if previous is not None:
#already there (possibly via inheritance)
if previous['blocked'] and not force:
@ -1582,7 +1621,7 @@ def grp_pkg_unblock(taginfo,grpinfo,pkg_name):
grp_id = get_group_id(grpinfo,strict=True)
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'package = %(pkg_name)s')
query = QueryProcessor(columns=['blocked'], tables=[table],
clauses=('active = TRUE')+clauses,
clauses=('active = TRUE',)+clauses,
values=locals(), opts={'rowlock':True})
blocked = query.singleValue(strict=False)
if not blocked:
@ -1617,7 +1656,7 @@ def grp_req_add(taginfo,grpinfo,reqinfo,block=False,force=False,**opts):
previous = grp_cfg['grouplist'].get(req['id'],None)
cfg_fields = ('type','is_metapkg')
#prevent user-provided opts from doing anything strange
opts = dslice(opts, cfg_fields)
opts = dslice(opts, cfg_fields, strict=False)
if previous is not None:
#already there (possibly via inheritance)
if previous['blocked'] and not force:
@ -1689,7 +1728,7 @@ def grp_req_unblock(taginfo,grpinfo,reqinfo):
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'req_id = %(req_id)s')
query = QueryProcessor(columns=['blocked'], tables=[table],
clauses=('active = TRUE')+clauses,
clauses=('active = TRUE',)+clauses,
values=locals(), opts={'rowlock':True})
blocked = query.singleValue(strict=False)
if not blocked:
@ -1988,8 +2027,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None):
for repoarch in repo_arches:
packages.setdefault(repoarch, [])
for rpminfo in rpms:
if (rpminfo['name'].endswith('-debuginfo') or rpminfo['name'].endswith('-debuginfo-common')) \
and not with_debuginfo:
if not with_debuginfo and koji.is_debuginfo(rpminfo['name']):
continue
arch = rpminfo['arch']
repoarch = koji.canonArch(arch)
@ -2376,6 +2414,17 @@ def get_build_targets(info=None, event=None, buildTagID=None, destTagID=None, qu
values=locals(), opts=queryOpts)
return query.execute()
def get_build_target(info, event=None, strict=False):
"""Return the build target with the given name or ID.
If there is no matching build target, return None."""
targets = get_build_targets(info=info, event=event)
if len(targets) == 1:
return targets[0]
elif strict:
raise koji.GenericError, 'No matching build target found: %s' % info
else:
return None
def lookup_name(table,info,strict=False,create=False):
"""Find the id and name in the table associated with info.
@ -5003,9 +5052,9 @@ def add_group_member(group, user, strict=True):
table = 'user_groups'
clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s')
query = QueryProcessor(columns=['user_id'], tables=[table],
clauses=('active = TRUE')+clauses,
clauses=('active = TRUE',)+clauses,
values=data, opts={'rowlock':True})
row = query.fetchSingle(strict=False)
row = query.executeOne()
if row:
if not strict:
return
@ -5563,6 +5612,25 @@ class ImportedTest(koji.policy.BaseSimpleTest):
#otherwise...
return False
class ChildTaskTest(koji.policy.BoolTest):
name = 'is_child_task'
field = 'parent'
class MethodTest(koji.policy.MatchTest):
name = 'method'
field = 'method'
class UserTest(koji.policy.MatchTest):
"""Checks username against glob patterns"""
name = 'user'
field = '_username'
def run(self, data):
user = policy_get_user(data)
if not user:
return False
data[self.field] = user['name']
return super(UserTest, self).run(data)
class IsBuildOwnerTest(koji.policy.BaseSimpleTest):
"""Check if user owns the build"""
name = "is_build_owner"
@ -7410,18 +7478,7 @@ class RootExports(object):
editBuildTarget = staticmethod(edit_build_target)
deleteBuildTarget = staticmethod(delete_build_target)
getBuildTargets = staticmethod(get_build_targets)
def getBuildTarget(self, info, event=None, strict=False):
"""Return the build target with the given name or ID.
If there is no matching build target, return None."""
targets = get_build_targets(info=info, event=event)
if len(targets) == 1:
return targets[0]
else:
if strict:
raise koji.GenericError, 'No matching build target found: %s' % info
else:
return None
getBuildTarget = staticmethod(get_build_target)
def taskFinished(self,taskId):
task = Task(taskId)
@ -7484,6 +7541,12 @@ class RootExports(object):
createdAfter[float or str]: limit to tasks whose create_time is after the
given date, in either float (seconds since the epoch)
or str (ISO) format
startedBefore[float or str]: limit to tasks whose start_time is before the
given date, in either float (seconds since the epoch)
or str (ISO) format
startedAfter[float or str]: limit to tasks whose start_time is after the
given date, in either float (seconds since the epoch)
or str (ISO) format
completeBefore[float or str]: limit to tasks whose completion_time is before
the given date, in either float (seconds since the epoch)
or str (ISO) format
@ -7496,12 +7559,14 @@ class RootExports(object):
tables = ['task']
joins = ['users ON task.owner = users.id']
fields = ('task.id','state','create_time','completion_time','channel_id',
'host_id','parent','label','waiting','awaited','owner','method',
'arch','priority','weight','request','result', 'users.name', 'users.usertype')
aliases = ('id','state','create_time','completion_time','channel_id',
'host_id','parent','label','waiting','awaited','owner','method',
'arch','priority','weight','request','result', 'owner_name', 'owner_type')
flist = Task.fields + (
('task.request', 'request'),
('task.result', 'result'),
('users.name', 'owner_name'),
('users.usertype', 'owner_type'),
)
fields = [f[0] for f in flist]
aliases = [f[1] for f in flist]
conditions = []
for f in ['arch','state']:
@ -7515,26 +7580,20 @@ class RootExports(object):
conditions.append('%s = %%(%s)i' % (f, f))
if opts.has_key('method'):
conditions.append('method = %(method)s')
if opts.get('createdBefore') != None:
createdBefore = opts['createdBefore']
if not isinstance(createdBefore, str):
opts['createdBefore'] = datetime.datetime.fromtimestamp(createdBefore).isoformat(' ')
conditions.append('create_time < %(createdBefore)s')
if opts.get('createdAfter') != None:
createdAfter = opts['createdAfter']
if not isinstance(createdAfter, str):
opts['createdAfter'] = datetime.datetime.fromtimestamp(createdAfter).isoformat(' ')
conditions.append('create_time > %(createdAfter)s')
if opts.get('completeBefore') != None:
completeBefore = opts['completeBefore']
if not isinstance(completeBefore, str):
opts['completeBefore'] = datetime.datetime.fromtimestamp(completeBefore).isoformat(' ')
conditions.append('completion_time < %(completeBefore)s')
if opts.get('completeAfter') != None:
completeAfter = opts['completeAfter']
if not isinstance(completeAfter, str):
opts['completeAfter'] = datetime.datetime.fromtimestamp(completeAfter).isoformat(' ')
conditions.append('completion_time > %(completeAfter)s')
time_opts = [
['createdBefore', 'create_time', '<'],
['createdAfter', 'create_time', '>'],
['startedBefore', 'start_time', '<'],
['startedAfter', 'start_time', '>'],
['completedBefore', 'completion_time', '<'],
['completedAfter', 'completion_time', '>'],
]
for key, field, cmp in time_opts:
if opts.get(key) != None:
value = opts[key]
if not isinstance(value, str):
opts[key] = datetime.datetime.fromtimestamp(value).isoformat(' ')
conditions.append('%(field)s %(cmp)s %%(%(key)s)s' % locals())
query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, joins=joins,
clauses=conditions, values=opts, opts=queryOpts)

View file

@ -246,8 +246,8 @@ class ModXMLRPCRequestHandler(object):
self.logger.debug("Handling method %s for session %s (#%s)",
method, context.session.id, context.session.callnum)
if method != 'uploadFile':
self.logger.debug("Params: %s\n", pprint.pformat(params))
self.logger.debug("Opts: %s\n", pprint.pformat(opts))
self.logger.debug("Params: %s", pprint.pformat(params))
self.logger.debug("Opts: %s", pprint.pformat(opts))
start = time.time()
ret = func(*params,**opts)
@ -297,7 +297,7 @@ class ModXMLRPCRequestHandler(object):
req.content_type = "text/xml"
req.set_content_length(len(response))
req.write(response)
self.logger.debug("Returning %d bytes after %f seconds\n", len(response),
self.logger.debug("Returning %d bytes after %f seconds", len(response),
time.time() - start)
@ -367,11 +367,11 @@ def load_config(req):
['AuthPrincipal', 'string', None],
['AuthKeytab', 'string', None],
['ProxyPrincipals', 'string', None],
['ProxyPrincipals', 'string', ''],
['HostPrincipalFormat', 'string', None],
['DNUsernameComponent', 'string', 'CN'],
['ProxyDNs', 'string', None],
['ProxyDNs', 'string', ''],
['LoginCreatesUser', 'boolean', True],
['KojiWebURL', 'string', 'http://localhost.localdomain/koji'],
@ -444,7 +444,7 @@ def load_plugins(opts):
logger = logging.getLogger('koji.plugins')
tracker = koji.plugin.PluginTracker(path=opts['PluginPath'].split(':'))
for name in opts['Plugins'].split():
logger.info('Loading plugin: %s\n', name)
logger.info('Loading plugin: %s', name)
try:
tracker.load(name)
except Exception:
@ -466,6 +466,11 @@ _default_policies = {
'package_list' : '''
has_perm admin :: allow
all :: deny
''',
'channel' : '''
has req_channel :: req
is_child_task :: parent
all :: use default
'''
}

View file

@ -369,6 +369,15 @@ def decode_args(*args):
args = args[:-1]
return args,opts
def decode_args2(args, names, strict=True):
"An alternate form of decode_args, returns a dictionary"
args, opts = decode_args(*args)
if strict and len(names) < len(args):
raise TypeError, "Expecting at most %i arguments" % len(names)
ret = dict(zip(names, args))
ret.update(opts)
return ret
def encode_int(n):
"""If n is too large for a 32bit signed, convert it to a string"""
if n <= 2147483647:
@ -845,6 +854,12 @@ def parse_NVRA(nvra):
ret['location'] = location
return ret
def is_debuginfo(name):
"""Determines if an rpm is a debuginfo rpm, based on name"""
if name.endswith('-debuginfo') or name.endswith('-debuginfo-common'):
return True
return False
def canonArch(arch):
"""Given an arch, return the "canonical" arch"""
#XXX - this could stand to be smarter, and we should probably

View file

@ -94,7 +94,7 @@ class CursorWrapper:
start = time.time()
ret = self.cursor.execute(operation, parameters)
if debug:
self.logger.debug("Execute operation completed in %.4f seconds\n", time.time() - start)
self.logger.debug("Execute operation completed in %.4f seconds", time.time() - start)
return ret

View file

@ -60,6 +60,21 @@ class NoneTest(FalseTest):
#alias for false
class HasTest(BaseSimpleTest):
"""Test if policy data contains a field"""
name = "has"
def __init__(self, str):
try:
self.field = str.split(None, 1)[1]
except IndexError:
raise koji.GenericError, "Empty python expression in policy"
def run(self, data):
return self.field in data
class BoolTest(BaseSimpleTest):
"""Test a field in the data as a boolean value

View file

@ -69,6 +69,7 @@ def get_routing_key(cbtype, *args, **kws):
_token_append(key, kws['tag']['name'])
_token_append(key, kws['package']['name'])
elif cbtype in ('preTaskStateChange', 'postTaskStateChange'):
_token_append(key, kws['info']['method'])
_token_append(key, kws['attribute'])
elif cbtype in ('preBuildStateChange', 'postBuildStateChange'):
info = kws['info']
@ -94,14 +95,16 @@ def get_routing_key(cbtype, *args, **kws):
return key
def get_message_headers(cbtype, *args, **kws):
# We're only registering for post callbacks, so strip
# off the redundant "post" prefix
headers = {'type': cbtype[4:]}
if cbtype.startswith('pre'):
headers = {'type': cbtype[3:]}
else:
headers = {'type': cbtype[4:]}
if cbtype in ('prePackageListChange', 'postPackageListChange'):
headers['tag'] = kws['tag']['name']
headers['package'] = kws['package']['name']
elif cbtype in ('preTaskStateChange', 'postTaskStateChange'):
headers['method'] = kws['info']['method']
headers['attribute'] = kws['attribute']
headers['old'] = kws['old']
headers['new'] = kws['new']

View file

@ -135,13 +135,20 @@ class ManagedRepo(object):
#also check dir age. We do this because a repo can be created from an older event
#and should not be removed based solely on that event's timestamp.
mtime = os.stat(path).st_mtime
except OSError:
self.logger.error("Can't stat repo directory: %s" % path)
return True
age = time.time() - max(self.event_ts, mtime)
if age < options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
return False
except OSError, e:
if e.errno == 2:
# No such file or directory, so the repo either never existed,
# or has already been deleted, so allow it to be marked deleted.
self.logger.info("Repo directory does not exist: %s" % path)
pass
else:
self.logger.error("Can't stat repo directory: %s, %s" % (path, e.strerror))
return False
else:
age = time.time() - max(self.event_ts, mtime)
if age < options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
return False
self.logger.debug("Attempting to delete repo %s.." % self.repo_id)
if self.state != koji.REPO_EXPIRED:
raise GenericError, "Repo not expired"
@ -248,10 +255,12 @@ class RepoManager(object):
continue
rinfo = session.repoInfo(repo_id)
if rinfo is None:
age = time.time() - dir_ts
if age > 36000:
if not options.ignore_stray_repos:
self.logger.warn("Unexpected directory (no such repo): %s" % repodir)
if not options.ignore_stray_repos:
age = time.time() - dir_ts
if age > options.deleted_repo_lifetime:
count += 1
self.logger.info("Removing unexpected directory (no such repo): %s" % repodir)
safe_rmtree(repodir, strict=False)
continue
if rinfo['tag_name'] != taginfo['name']:
self.logger.warn("Tag name mismatch: %s" % repodir)
@ -351,9 +360,6 @@ class RepoManager(object):
del self.tasks[tag_id]
#TODO [?] - implement a timeout for active tasks?
self.logger.debug("Current tasks: %r" % self.tasks)
if len(self.tasks) >= options.max_repo_tasks:
self.logger.info("Maximum number of repo tasks reached.")
return
self.logger.debug("Updating repos")
self.readCurrentRepos()
#check for stale repos

View file

@ -1,7 +1,7 @@
</div>
<p id="footer">
Copyright &copy; 2006-2009 Red Hat
Copyright &copy; 2006-2010 Red Hat
<a href="https://fedorahosted.org/koji/"><img src="/koji-static/images/powered-by-koji.png" alt="Powered By Koji" id="PoweredByKojiLogo"/></a>
</p>

View file

@ -1011,7 +1011,7 @@ def buildinfo(req, buildID):
rpmsByArch = {}
debuginfoByArch = {}
for rpm in rpms:
if rpm['name'].endswith('-debuginfo') or rpm['name'].endswith('-debuginfo-common'):
if koji.is_debuginfo(rpm['name']):
debuginfoByArch.setdefault(rpm['arch'], []).append(rpm)
else:
rpmsByArch.setdefault(rpm['arch'], []).append(rpm)

View file

@ -87,7 +87,7 @@ p#footer a {
/* Used only for the powered by koji icon */
float: right;
border: 0px;
height: 20px;
height: 30px;
}
.hide {