Merge branch 'master' into mead

Conflicts:

	hub/kojihub.py
This commit is contained in:
Mike McLean 2008-11-07 17:36:55 -05:00
commit f90a3d85b7
23 changed files with 4168 additions and 373 deletions

View file

@ -1,4 +1,5 @@
SUBDIRS = lib
BINFILES = kojid
PYFILES = $(wildcard *.py)
@ -7,6 +8,8 @@ _default:
clean:
rm -f *.o *.so *.pyc *~
for d in $(SUBDIRS); do make -s -C $$d clean; done
install:
@if [ "$(DESTDIR)" = "" ]; then \
@ -27,3 +30,7 @@ install:
mkdir -p $(DESTDIR)/etc/kojid
install -p -m 644 kojid.conf $(DESTDIR)/etc/kojid/kojid.conf
for d in $(SUBDIRS); do make DESTDIR=`cd $(DESTDIR); pwd` \
-C $$d install; [ $$? = 0 ] || exit 1; done

View file

@ -26,6 +26,7 @@ except ImportError:
pass
import base64
import koji
import koji.plugin
import koji.util
import commands
import errno
@ -57,6 +58,10 @@ from fnmatch import fnmatch
from optparse import OptionParser
from xmlrpclib import Fault
# our private modules
sys.path.insert(0, '/usr/share/koji-builder/lib')
import tasks
class ServerExit(Exception):
"""Raised to shutdown the server"""
pass
@ -67,6 +72,12 @@ def main():
logger = logging.getLogger("koji.build")
logger.info('Starting up')
tm = TaskManager()
if options.plugin:
#load plugins
pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))
for name in options.plugin:
logger.info('Loading plugin: %s' % name)
tm.scanPlugin(pt.load(name))
def shutdown(*args):
raise SystemExit
signal.signal(signal.SIGTERM,shutdown)
@ -296,7 +307,7 @@ class BuildRoot(object):
self.name = "%(tag_name)s-%(id)s-%(repoid)s" % vars(self)
self.config = session.getBuildConfig(self.tag_id)
def _new(self, tag, arch, task_id, distribution=None):
def _new(self, tag, arch, task_id, distribution=None, repo_id=None):
"""Create a brand new repo"""
self.task_id = task_id
self.distribution = distribution
@ -309,12 +320,26 @@ class BuildRoot(object):
raise koji.BuildrootError("Could not get config info for tag: %s" % tag)
self.tag_id = self.config['id']
self.tag_name = self.config['name']
while 1:
repo_info = session.getRepo(self.tag_id)
if repo_info and repo_info['state'] == koji.REPO_READY:
break
self.logger.debug("Waiting for repo to be created %s" % self.tag_name)
time.sleep(5)
if repo_id is None:
#use current active repo for tag, waiting if necessary
while 1:
repo_info = session.getRepo(self.tag_id)
if repo_info and repo_info['state'] == koji.REPO_READY:
break
self.logger.debug("Waiting for repo to be created %s" % self.tag_name)
time.sleep(30)
else:
repo_info = session.repoInfo(repo_id)
if self.config['id'] != repo_info['tag_id']:
raise koji.BuildrootError, "tag/repo mismatch: %s vs %s" \
% (self.config['name'], repo_info['tag_name'])
repo_state = koji.REPO_STATES[repo_info['state']]
if repo_state == 'EXPIRED':
# This should be ok. Expired repos are still intact, just not
# up-to-date (which may be the point in some cases).
self.logger.info("Requested repo (%i) is no longer current" % repo_id)
elif repo_state != 'READY':
raise koji.BuildrootError, "Requested repo (%i) is %s" % (repo_id, repo_state)
self.repoid = repo_info['id']
self.br_arch = koji.canonArch(arch)
self.logger.debug("New buildroot: %(tag_name)s/%(br_arch)s/%(repoid)s" % vars(self))
@ -690,6 +715,15 @@ class TaskManager(object):
handlers[method] = v
self.handlers = handlers
def scanPlugin(self, plugin):
"""Find task handlers in a plugin"""
# XXX - this is a very simple implementation for now.
# it should be improved
for v in vars(plugin).itervalues():
if type(v) == type(tasks.BaseTaskHandler) and issubclass(v,tasks.BaseTaskHandler):
for method in v.Methods:
self.handlers[method] = v
def shutdown(self):
"""Attempt to shut down cleanly"""
for task_id in self.pids.keys():
@ -1216,7 +1250,11 @@ class TaskManager(object):
handlerClass = self.handlers['default']
else:
raise koji.GenericError, "No handler found for method '%s'" % method
handler = handlerClass(id,method,params)
if issubclass(handlerClass, tasks.BaseTaskHandler):
#new style handler needs session and options passed
handler = handlerClass(id,method,params,session,options)
else:
handler = handlerClass(id,method,params)
# set weight
session.host.setTaskWeight(task_id,handler.weight())
if handler.Foreground:
@ -1246,6 +1284,8 @@ class TaskManager(object):
os.setpgrp()
#use the subsession
session = newhub
if hasattr(handler, 'session'):
handler.session = session
#set a do-nothing handler for sigusr2
signal.signal(signal.SIGUSR2,lambda *args: None)
self.runTask(handler)
@ -1468,6 +1508,12 @@ class BaseTaskHandler(object):
fn = "%s/%s" % (options.topdir, relpath)
return fn
def subtask(self, method, arglist, **opts):
return session.host.subtask(method, arglist, self.id, **opts)
def subtask2(self, __taskopts, __method, *args, **kwargs):
return session.host.subtask2(self.id, __taskopts, __method, *args, **kwargs)
class FakeTask(BaseTaskHandler):
Methods = ['someMethod']
Foreground = True
@ -1588,6 +1634,9 @@ class ChainBuildTask(BaseTaskHandler):
def handler(self, srcs, target, opts=None):
if opts.get('scratch'):
raise koji.BuildError, "--scratch is not allowed with chain-builds"
target_info = session.getBuildTarget(target)
if not target_info:
raise koji.GenericError, 'unknown build target: %s' % target
for build_level in srcs:
subtasks = []
build_tasks = []
@ -1604,7 +1653,7 @@ class ChainBuildTask(BaseTaskHandler):
nvrs.append(src)
if nvrs:
task_id = session.host.subtask(method='waitrepo',
arglist=[target, None, nvrs],
arglist=[target['build_tag'], None, nvrs],
label=','.join(nvrs),
parent=self.id)
subtasks.append(task_id)
@ -1620,7 +1669,7 @@ class ChainBuildTask(BaseTaskHandler):
nvrs.append(builds[0]['nvr'])
if nvrs:
task_id = session.host.subtask(method='waitrepo',
arglist=[target, None, nvrs],
arglist=[target['build_tag'], None, nvrs],
label=','.join(nvrs),
parent=self.id)
self.wait(task_id, all=True, failany=True)
@ -1638,16 +1687,60 @@ class BuildTask(BaseTaskHandler):
self.opts = opts
if opts.get('arch_override') and not opts.get('scratch'):
raise koji.BuildError, "arch_override is only allowed for scratch builds"
if opts.get('repo_id') is not None:
repo_info = session.repoInfo(opts['repo_id'])
if not repo_info:
raise koji.BuildError, 'No such repo: %s' % opts['repo_id']
repo_state = koji.REPO_STATES[repo_info['state']]
if repo_state not in ('READY', 'EXPIRED'):
raise koji.BuildError, 'Bad repo: %s (%s)' % (repo_info['id'], repo_state)
else:
repo_info = None
#we'll wait for a repo later (self.getRepo)
task_info = session.getTaskInfo(self.id)
# only allow admins to perform non-scratch builds from srpm
if not SCM.is_scm_url(src) and not opts.get('scratch') \
and not 'admin' in session.getUserPerms(task_info['owner']):
raise koji.BuildError, "only admins may peform non-scratch builds from srpm"
target_info = session.getBuildTarget(target)
if not target_info:
raise koji.GenericError, 'unknown build target: %s' % target
dest_tag = target_info['dest_tag']
build_tag = target_info['build_tag']
target_info = None
if target:
target_info = session.getBuildTarget(target)
if target_info:
dest_tag = target_info['dest_tag']
build_tag = target_info['build_tag']
if repo_info is not None:
#make sure specified repo matches target
if repo_info['tag_id'] != target_info['build_tag']:
raise koji.BuildError, 'Repo/Target mismatch: %s/%s' \
% (repo_info['tag_name'], target_info['build_tag_name'])
else:
# if repo_id is specified, we can allow the 'target' arg to simply specify
# the destination tag (since the repo specifies the build tag).
if repo_info is None:
raise koji.GenericError, 'unknown build target: %s' % target
build_tag = repo_info['tag_id']
if target is None:
#ok, call it skip-tag for the buildroot tag
self.opts['skip_tag'] = True
dest_tag = build_tag
else:
taginfo = session.getTag(target)
if not taginfo:
raise koji.GenericError, 'neither tag nor target: %s' % target
dest_tag = taginfo['id']
#policy checks...
policy_data = {
'user_id' : task_info['owner'],
'source' : src,
'task_id' : self.id,
'build_tag' : build_tag, #id
}
if target_info:
policy_data['target'] = target_info['id'],
if not self.opts.get('skip_tag'):
policy_data['tag'] = dest_tag #id
if not SCM.is_scm_url(src) and not opts.get('scratch'):
#let hub policy decide
session.host.assertPolicy('build_from_srpm', policy_data)
if opts.get('repo_id') is not None:
# use of this option is governed by policy
session.host.assertPolicy('build_from_repo_id', policy_data)
srpm = self.getSRPM(src)
h = self.readSRPMHeader(srpm)
data = koji.get_header_fields(h,['name','version','release','epoch'])
@ -1675,8 +1768,10 @@ class BuildTask(BaseTaskHandler):
build_id = session.host.initBuild(data)
session.host.importChangelog(build_id, srpm)
#(initBuild raises an exception if there is a conflict)
if not repo_info:
repo_info = self.getRepo(build_tag) #(subtask)
try:
srpm,rpms,brmap,logs = self.runBuilds(srpm,build_tag,archlist)
srpm,rpms,brmap,logs = self.runBuilds(srpm,build_tag,archlist,repo_info['id'])
if opts.get('scratch'):
#scratch builds do not get imported
session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs)
@ -1775,13 +1870,24 @@ class BuildTask(BaseTaskHandler):
raise koji.BuildError, "No matching arches were found"
return archdict.keys()
def runBuilds(self, srpm, build_tag, archlist):
def getRepo(self, tag):
"""Get repo to use for builds"""
repo_info = session.getRepo(tag)
if not repo_info:
#wait for it
task_id = session.host.subtask(method='waitrepo',
arglist=[tag, None, None],
parent=self.id)
repo_info = self.wait(task_id)[task_id]
return repo_info
def runBuilds(self, srpm, build_tag, archlist, repo_id):
self.logger.debug("Spawning jobs for arches: %r" % (archlist))
subtasks = {}
keep_srpm = True
for arch in archlist:
subtasks[arch] = session.host.subtask(method='buildArch',
arglist=[srpm,build_tag,arch,keep_srpm],
arglist=[srpm, build_tag, arch, keep_srpm, {'repo_id': repo_id}],
label=arch,
parent=self.id,
arch=koji.canonArch(arch))
@ -1851,11 +1957,13 @@ class BuildArchTask(BaseTaskHandler):
if not header[rpm.RPMTAG_DISTRIBUTION]:
raise koji.BuildError, "The build system failed to set the distribution tag"
def handler(self, pkg, root, arch, keep_srpm, opts={}):
def handler(self, pkg, root, arch, keep_srpm, opts=None):
"""Build a package in a buildroot for one arch"""
global options
ret = {}
if opts is None:
opts = {}
#noarch is funny
if arch == "noarch":
@ -1901,7 +2009,12 @@ class BuildArchTask(BaseTaskHandler):
# if not h[rpm.RPMTAG_DISTRIBUTION]:
# raise koji.BuildError, "the distribution tag is not set in the original srpm"
broot = BuildRoot(root, br_arch, self.id, distribution=h[rpm.RPMTAG_DISTRIBUTION])
rootopts = {
'distribution' : h[rpm.RPMTAG_DISTRIBUTION],
}
if opts.get('repo_id') is not None:
rootopts['repo_id'] = opts['repo_id']
broot = BuildRoot(root, br_arch, self.id, **rootopts)
self.logger.debug("Initializing buildroot")
broot.init()
@ -2696,8 +2809,10 @@ Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
build_nvr = koji.buildLabel(build)
build_id = build['id']
build_owner = build['owner_name']
# target comes from session.py:_get_build_target()
dest_tag = target['dest_tag_name']
# target comes from session.py:_get_build_target()
dest_tag = None
if target is not None:
dest_tag = target['dest_tag_name']
status = koji.BUILD_STATES[build['state']].lower()
creation_time = koji.formatTimeLong(build['creation_time'])
completion_time = koji.formatTimeLong(build['completion_time'])
@ -2800,10 +2915,13 @@ class NewRepoTask(BaseTaskHandler):
Methods = ['newRepo']
_taskWeight = 0.1
def handler(self, tag):
def handler(self, tag, event=None):
self.uploadpath = self.getUploadDir()
tinfo = session.getTag(tag, strict=True)
repo_id, event_id = session.host.repoInit(tinfo['id'])
kwargs = {}
if event is not None:
kwargs['event'] = event
repo_id, event_id = session.host.repoInit(tinfo['id'], **kwargs)
path = koji.pathinfo.repo(repo_id, tinfo['name'])
if not os.path.isdir(path):
raise koji.GenericError, "Repo directory missing: %s" % path
@ -2826,7 +2944,10 @@ class NewRepoTask(BaseTaskHandler):
for (arch, task_id) in subtasks.iteritems():
data[arch] = results[task_id]
self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],))
session.host.repoDone(repo_id, data)
kwargs = {}
if event is not None:
kwargs['expire'] = True
session.host.repoDone(repo_id, data, **kwargs)
return repo_id, event_id
class CreaterepoTask(BaseTaskHandler):
@ -2865,7 +2986,8 @@ class CreaterepoTask(BaseTaskHandler):
koji.ensuredir(datadir)
os.system('cp -a %s/* %s' % (olddatadir, datadir))
cmd.append('--update')
cmd.append('--skip-stat')
if options.createrepo_skip_stat:
cmd.append('--skip-stat')
# note: we can't easily use a cachedir because we do not have write
# permission. The good news is that with --update we won't need to
# be scanning many rpms.
@ -2897,44 +3019,66 @@ class WaitrepoTask(BaseTaskHandler):
# time in minutes before we fail this task
TIMEOUT = 120
def handler(self, build_target_info, newer_then=None, nvrs=None):
def handler(self, tag, newer_than=None, nvrs=None):
"""Wait for a repo for the tag, subject to given conditions
newer_than: create_event timestamp should be newer than this
nvr: repo should contain this nvr (which may not exist at first)
Only one of the options may be specified. If neither is, then
the call will wait for the first ready repo.
Returns the repo info (from getRepo) of the chosen repo
"""
start = time.time()
build_target = session.getBuildTarget(build_target_info)
if not build_target:
raise koji.GenericError, "invalid build target: %s" % build_target_info
taginfo = session.getTag(tag)
targets = session.getBuildTargets(buildTagID=taginfo['id'])
if not targets:
raise koji.GenericError("No build target for tag: %s" % taginfo['name'])
if isinstance(newer_than, basestring) and newer_than.lower() == "now":
newer_than = start
if not isinstance(newer_than, (int, long, float)):
raise koji.GenericError, "Invalid value for newer_than"
if newer_than and nvrs:
raise koji.GenericError, "only one of (newer_than, nvrs) may be specified"
if not nvrs:
nvrs = []
builds = [koji.parse_NVR(nvr) for nvr in nvrs]
if not newer_then and not builds:
newer_then = time.time()
last_repo = None
repo = session.getRepo(build_target['build_tag'])
while True:
if builds and repo and repo != last_repo:
if koji.util.checkForBuilds(session, build_target['build_tag'], builds, repo['create_event']):
return "Successfully waited %s for %s to appear in the %s repo" % \
(koji.util.duration(start), koji.util.printList(nvrs), build_target['build_tag_name'])
elif newer_then:
if repo['create_ts'] > newer_then:
return "Successfully waited %s for a new %s repo" % \
(koji.util.duration(start), build_target['build_tag_name'])
repo = session.getRepo(taginfo['id'])
if repo and repo != last_repo:
if builds:
if koji.util.checkForBuilds(session, taginfo['id'], builds, repo['create_event']):
self.logger.debug("Successfully waited %s for %s to appear in the %s repo" % \
(koji.util.duration(start), koji.util.printList(nvrs), taginfo['name']))
return repo
elif newer_than:
if repo['create_ts'] > newer_than:
self.logger.debug("Successfully waited %s for a new %s repo" % \
(koji.util.duration(start), taginfo['name']))
return repo
else:
#no check requested -- return first ready repo
return repo
if (time.time() - start) > (self.TIMEOUT * 60.0):
if builds:
raise koji.GenericError, "Unsuccessfully waited %s for %s to appear in the %s repo" % \
(koji.util.duration(start), koji.util.printList(nvrs), build_target['build_tag_name'])
(koji.util.duration(start), koji.util.printList(nvrs), taginfo['name'])
else:
raise koji.GenericError, "Unsuccessfully waited %s for a new %s repo" % \
(koji.util.duration(start), build_target['build_tag_name'])
(koji.util.duration(start), taginfo['name'])
time.sleep(self.PAUSE)
last_repo = repo
repo = session.getRepo(build_target['build_tag'])
class SCM(object):
"SCM abstraction class"
@ -3236,6 +3380,8 @@ def get_options():
parser.add_option("--topdir", help="Specify topdir")
parser.add_option("--topurl", help="Specify topurl")
parser.add_option("--workdir", help="Specify workdir")
parser.add_option("--pluginpath", help="Specify plugin search path")
parser.add_option("--plugin", action="append", help="Load specified plugin")
parser.add_option("--mockdir", help="Specify mockdir")
parser.add_option("--mockuser", help="User to run mock as")
parser.add_option("-s", "--server", help="url of XMLRPC server")
@ -3260,6 +3406,7 @@ def get_options():
'topdir': '/mnt/koji',
'topurl': None,
'workdir': '/tmp/koji',
'pluginpath': '/usr/lib/koji-builder-plugins',
'mockdir': '/var/lib/mock',
'mockuser': 'kojibuilder',
'packager': 'Koji',
@ -3277,6 +3424,7 @@ def get_options():
'max_retries': 120,
'offline_retry': True,
'offline_retry_interval': 120,
'createrepo_skip_stat': True,
'pkgurl': None,
'allowed_scms': '',
'cert': '/etc/kojid/client.crt',
@ -3290,6 +3438,10 @@ def get_options():
defaults[name] = int(value)
except ValueError:
quit("value for %s option must be a valid integer" % name)
elif name in ['offline_retry', 'createrepo_skip_stat']:
defaults[name] = config.getboolean('kojid', name)
elif name in ['plugin', 'plugins']:
defaults['plugin'] = value.split()
elif name in defaults.keys():
defaults[name] = value
else:

20
builder/lib/Makefile Normal file
View file

@ -0,0 +1,20 @@
PYTHON=python
SHAREDIR = $(DESTDIR)/usr/share/koji-builder
MODDIR = $(SHAREDIR)/lib
PYFILES = $(wildcard *.py)
PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
_default:
@echo "nothing to make. try make install"
clean:
rm -f *.o *.so *.pyc *~
install:
mkdir -p $(MODDIR)
for p in $(PYFILES) ; do \
install -p -m 644 $$p $(MODDIR)/$$p; \
done
$(PYTHON) -c "import compileall; compileall.compile_dir('$(MODDIR)', 1, '$(PYDIR)', 1)"

224
builder/lib/tasks.py Normal file
View file

@ -0,0 +1,224 @@
# Python module
# tasks handlers for the koji build daemon
# Copyright (c) 2008 Red Hat
#
# Koji is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Mike McLean <mikem@redhat.com>
import koji
import logging
import os
import signal
import urllib2
class BaseTaskHandler(object):
"""The base class for task handlers
Each task handler is a class, a new instance of which is created
to handle each task.
"""
# list of methods the class can handle
Methods = []
# Options:
Foreground = False
def __init__(self, id, method, params, session, options, workdir=None):
self.id = id #task id
if method not in self.Methods:
raise koji.GenericError, 'method "%s" is not supported' % method
self.method = method
# handle named parameters
self.params,self.opts = koji.decode_args(*params)
self.session = session
self.options = options
if workdir is None:
workdir = "%s/%s" % (options.workdir, koji.pathinfo.taskrelpath(id))
self.workdir = workdir
self.logger = logging.getLogger("koji.build.BaseTaskHandler")
def setManager(self,manager):
"""Set the manager attribute
This is only used for foreground tasks to give them access
to their task manager.
"""
if not self.Foreground:
return
self.manager = manager
def handler(self):
"""(abstract) the handler for the task."""
raise NotImplementedError
def run(self):
"""Execute the task"""
self.createWorkdir()
try:
return self.handler(*self.params,**self.opts)
finally:
self.removeWorkdir()
_taskWeight = 1.0
def weight(self):
"""Return the weight of the task.
This is run by the taskmanager before the task is run to determine
the weight of the task. The weight is an abstract measure of the
total load the task places on the system while running.
A task may set _taskWeight for a constant weight different from 1, or
override this function for more complicated situations.
Note that task weight is partially ignored while the task is sleeping.
"""
return getattr(self,'_taskWeight',1.0)
def createWorkdir(self):
if self.workdir is None:
return
self.removeWorkdir()
os.makedirs(self.workdir)
def removeWorkdir(self):
if self.workdir is None:
return
safe_rmtree(self.workdir, unmount=False, strict=True)
#os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir])
def wait(self, subtasks=None, all=False, failany=False):
"""Wait on subtasks
subtasks is a list of integers (or an integer). If more than one subtask
is specified, then the default behavior is to return when any of those
tasks complete. However, if all is set to True, then it waits for all of
them to complete. If all and failany are both set to True, then each
finished task will be checked for failure, and a failure will cause all
of the unfinished tasks to be cancelled.
special values:
subtasks = None specify all subtasks
Implementation notes:
The build daemon forks all tasks as separate processes. This function
uses signal.pause to sleep. The main process watches subtasks in
the database and will send the subprocess corresponding to the
subtask a SIGUSR2 to wake it up when subtasks complete.
"""
if isinstance(subtasks,int):
# allow single integer w/o enclosing list
subtasks = [subtasks]
self.session.host.taskSetWait(self.id,subtasks)
self.logger.debug("Waiting on %r" % subtasks)
while True:
finished, unfinished = self.session.host.taskWait(self.id)
if len(unfinished) == 0:
#all done
break
elif len(finished) > 0:
if all:
if failany:
failed = False
for task in finished:
try:
result = self.session.getTaskResult(task)
except (koji.GenericError, Fault), task_error:
self.logger.info("task %s failed or was canceled" % task)
failed = True
break
if failed:
self.logger.info("at least one task failed or was canceled, cancelling unfinished tasks")
self.session.cancelTaskChildren(self.id)
# reraise the original error now, rather than waiting for
# an error in taskWaitResults()
raise task_error
else:
# at least one done
break
# signal handler set by TaskManager.forkTask
self.logger.debug("Pausing...")
signal.pause()
# main process will wake us up with SIGUSR2
self.logger.debug("...waking up")
self.logger.debug("Finished waiting")
return dict(self.session.host.taskWaitResults(self.id,subtasks))
def getUploadDir(self):
return koji.pathinfo.taskrelpath(self.id)
def uploadFile(self, filename, remoteName=None):
"""Upload the file with the given name to the task output directory
on the hub."""
# Only upload files with content
if os.path.isfile(filename) and os.stat(filename).st_size > 0:
self.session.uploadWrapper(filename, self.getUploadDir(), remoteName)
def localPath(self, relpath):
"""Return a local path to a remote file.
If the file is on an nfs mount, use that, otherwise download a copy"""
if self.options.topurl:
self.logger.debug("Downloading %s", relpath)
url = "%s/%s" % (self.options.topurl, relpath)
fsrc = urllib2.urlopen(url)
fn = "%s/local/%s" % (self.workdir, relpath)
os.makedirs(os.path.dirname(fn))
fdst = file(fn, 'w')
shutil.copyfileobj(fsrc, fdst)
fsrc.close()
fdst.close()
else:
fn = "%s/%s" % (self.options.topdir, relpath)
return fn
#XXX - not the right place for this
#XXX - not as safe as we want
def safe_rmtree(path, unmount=False, strict=True):
logger = logging.getLogger("koji.build")
#safe remove: with -xdev the find cmd will not cross filesystems
# (though it will cross bind mounts from the same filesystem)
if not os.path.exists(path):
logger.debug("No such path: %s" % path)
return
if unmount:
umount_all(path)
#first rm -f non-directories
logger.debug('Scrubbing files in %s' % path)
rv = os.system("find '%s' -xdev \\! -type d -print0 |xargs -0 rm -f" % path)
msg = 'file removal failed (code %r) for %s' % (rv,path)
if rv != 0:
logger.warn(msg)
if strict:
raise koji.GenericError, msg
else:
return rv
#them rmdir directories
#with -depth, we start at the bottom and work up
logger.debug('Scrubbing directories in %s' % path)
rv = os.system("find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % path)
msg = 'dir removal failed (code %r) for %s' % (rv,path)
if rv != 0:
logger.warn(msg)
if strict:
raise koji.GenericError, msg
return rv

View file

@ -652,6 +652,7 @@ def handle_build(options, session, args):
parser.add_option("--nowait", action="store_true",
help=_("Don't wait on build"))
parser.add_option("--arch-override", help=_("Override build arches"))
parser.add_option("--repo-id", type="int", help=_("Use a specific repo"))
parser.add_option("--noprogress", action="store_true",
help=_("Do not display progress of the upload"))
parser.add_option("--background", action="store_true",
@ -664,19 +665,23 @@ def handle_build(options, session, args):
parser.error(_("--arch_override is only allowed for --scratch builds"))
activate_session(session)
target = args[0]
build_target = session.getBuildTarget(target)
if not build_target:
parser.error(_("Unknown build target: %s" % target))
dest_tag = session.getTag(build_target['dest_tag'])
if not dest_tag:
parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
if dest_tag['locked'] and not build_opts.scratch:
parser.error(_("Destination tag %s is locked" % dest_tag['name']))
if target.lower() == "none" and build_opts.repo_id:
target = None
build_opts.skip_tag = True
else:
build_target = session.getBuildTarget(target)
if not build_target:
parser.error(_("Unknown build target: %s" % target))
dest_tag = session.getTag(build_target['dest_tag'])
if not dest_tag:
parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
if dest_tag['locked'] and not build_opts.scratch:
parser.error(_("Destination tag %s is locked" % dest_tag['name']))
source = args[1]
opts = {}
if build_opts.arch_override:
opts['arch_override'] = ' '.join(build_opts.arch_override.replace(',',' ').split())
for key in ('skip_tag','scratch'):
for key in ('skip_tag', 'scratch', 'repo_id'):
val = getattr(build_opts, key)
if val is not None:
opts[key] = val
@ -687,7 +692,7 @@ def handle_build(options, session, args):
# try to check that source is an SRPM
if '://' not in source:
# only allow admins to perform non-scratch builds from srpm
if not opts['scratch'] and not session.hasPerm('admin'):
if not opts.get('scratch') and not session.hasPerm('admin'):
parser.error(_("builds from srpm must use --scratch"))
assert False
#treat source as an srpm and upload it
@ -2149,7 +2154,7 @@ def anon_handle_list_buildroot(options, session, args):
opts['componentBuildrootID'] = buildrootID
data = session.listRPMs(**opts)
fmt = "%(nvr)s"
fmt = "%(nvr)s.%(arch)s"
output = [ fmt % x for x in data]
output.sort()
for line in output:
@ -4230,6 +4235,31 @@ def anon_handle_wait_repo(options, session, args):
print "Unsuccessfully waited %s for a new %s repo" % (koji.util.duration(start), tag)
return 1
_search_types = ('package', 'build', 'tag', 'target', 'user', 'host', 'rpm', 'file')
def anon_handle_search(options, session, args):
"Search koji"
usage = _("usage: %prog sync [options] search_type pattern")
usage += _('\nAvailable search types: %s') % ', '.join(_search_types)
usage += _("\n(Specify the --help global option for a list of other help options)")
parser = optparse.OptionParser(usage=usage)
parser.add_option("-r", "--regex", action="store_true", help=_("treat pattern as regex"))
parser.add_option("--exact", action="store_true", help=_("exact matches only"))
main_options = options
(options, args) = parser.parse_args(args)
type = args[0]
if type not in _search_types:
parser.error(_("Unknown search type: %s") % type)
pattern = args[1]
matchType = 'glob'
if options.regex:
matchType = 'regexp'
elif options.exact:
matchType = 'exact'
data = session.search(pattern, type, matchType)
for row in data:
print row['name']
def handle_help(options, session, args):
"List available commands"
usage = _("usage: %prog help [options]")

View file

@ -25,6 +25,9 @@ install:
mkdir -p $(DESTDIR)/etc/httpd/conf.d
install -p -m 644 httpd.conf $(DESTDIR)/etc/httpd/conf.d/kojihub.conf
mkdir -p $(DESTDIR)/etc/koji-hub
install -p -m 644 hub.conf $(DESTDIR)/etc/koji-hub/hub.conf
mkdir -p $(DESTDIR)/$(SERVERDIR)
for p in $(PYFILES) ; do \
install -p -m 644 $$p $(DESTDIR)/$(SERVERDIR)/$$p; \

62
hub/hub.conf Normal file
View file

@ -0,0 +1,62 @@
[hub]
## Basic options ##
DBName = koji
DBUser = koji
DBHost = db.example.com
#DBPass = example_password
KojiDir = /mnt/koji
## Kerberos authentication options ##
# AuthPrincipal = kojihub@EXAMPLE.COM
# AuthKeytab = /etc/koji.keytab
# ProxyPrincipals = kojihub@EXAMPLE.COM
## format string for host principals (%s = hostname)
# HostPrincipalFormat = compile/%s@EXAMPLE.COM
## end Kerberos auth configuration
## SSL client certificate auth configuration ##
#note: ssl auth may also require editing the httpd config (conf.d/kojihub.conf)
## the client username is the common name of the subject of their client certificate
# DNUsernameComponent = CN
## separate multiple DNs with |
# ProxyDNs = /C=US/ST=Massachusetts/O=Example Org/OU=Example User/CN=example/emailAddress=example@example.com
## end SSL client certificate auth configuration
## Other options ##
LoginCreatesUser = On
KojiWebURL = http://kojiweb.example.com/koji
# The domain name that will be appended to Koji usernames
# when creating email notifications
#EmailDomain example.com
## Whether or not to report exception details for anticipated errors (i.e.
## koji's own exceptions -- subclasses of koji.GenericError).
# KojiDebug = On
## Determines how much detail about exceptions is reported to the client (via faults)
## Meaningful values:
## normal - a basic traceback (format_exception)
## extended - an extended traceback (format_exc_plus)
## anything else - no traceback, just the error message
## The extended traceback is intended for debugging only and should NOT be
## used in production, since it may contain sensitive information.
# KojiTraceback = normal
## These options are intended for planned outages
# ServerOffline = False
# OfflineMessage = temporary outage
# LockOut = False
## If ServerOffline is True, the server will always report a ServerOffline fault (with
## OfflineMessage as the fault string).
## If LockOut is True, the server will report a ServerOffline fault for all non-admin
## requests.

View file

@ -25,11 +25,13 @@ import base64
import koji
import koji.auth
import koji.db
import koji.policy
import datetime
import errno
import logging
import logging.handlers
import fcntl
import fnmatch
import md5
import os
import pgdb
@ -323,6 +325,9 @@ class Task(object):
id = self.id
query = """SELECT request FROM task WHERE id = %(id)i"""
xml_request = _singleValue(query, locals())
if xml_request.find('<?xml', 0, 10) == -1:
#handle older base64 encoded data
xml_request = base64.decodestring(xml_request)
params, method = xmlrpclib.loads(xml_request)
return params
@ -343,6 +348,9 @@ class Task(object):
# If you try to return a fault as a value, it gets reduced to
# a mere struct.
# f = Fault(1,"hello"); print dumps((f,))
if xml_result.find('<?xml', 0, 10) == -1:
#handle older base64 encoded data
xml_result = base64.decodestring(xml_result)
result, method = xmlrpclib.loads(xml_result)
return result[0]
@ -365,6 +373,9 @@ class Task(object):
results = _multiRow(query, vars(self), [f[1] for f in fields])
if request:
for task in results:
if task['request'].find('<?xml', 0, 10) == -1:
#handle older base64 encoded data
task['request'] = base64.decodestring(task['request'])
task['request'] = xmlrpclib.loads(task['request'])[0]
return results
@ -1771,7 +1782,7 @@ def get_task_descendents(task, childMap=None, request=False):
get_task_descendents(Task(child['id']), childMap, request)
return childMap
def repo_init(tag, with_src=False, with_debuginfo=False):
def repo_init(tag, with_src=False, with_debuginfo=False, event=None):
"""Create a new repo entry in the INIT state, return full repo data
Returns a dictionary containing
@ -1786,7 +1797,13 @@ def repo_init(tag, with_src=False, with_debuginfo=False):
for arch in tinfo['arches'].split():
repo_arches[koji.canonArch(arch)] = 1
repo_id = _singleValue("SELECT nextval('repo_id_seq')")
event_id = _singleValue("SELECT get_event()")
if event is None:
event_id = _singleValue("SELECT get_event()")
else:
#make sure event is valid
q = "SELECT time FROM events WHERE id=%(event)s"
event_time = _singleValue(q, locals(), strict=True)
event_id = event
q = """INSERT INTO repo(id, create_event, tag_id, state)
VALUES(%(repo_id)s, %(event_id)s, %(tag_id)s, %(state)s)"""
_dml(q,locals())
@ -1952,6 +1969,7 @@ def repo_info(repo_id, strict=False):
('repo.id', 'id'),
('repo.state', 'state'),
('repo.create_event', 'create_event'),
('events.time','creation_time'), #for compatibility with getRepo
('EXTRACT(EPOCH FROM events.time)','create_ts'),
('repo.tag_id', 'tag_id'),
('tag.name', 'tag_name'),
@ -4269,7 +4287,7 @@ def reset_build(build):
WARNING: this function is potentially destructive. use with care.
nulls task_id
sets state to FAILED
sets state to CANCELED
clears data in rpminfo, rpmdeps, rpmfiles
removes rpminfo entries from any buildroot_listings [!]
clears data in archiveinfo, archivefiles, maven_info
@ -4314,7 +4332,7 @@ def reset_build(build):
_dml(delete, binfo)
delete = """DELETE FROM maven_builds WHERE build_id = %(id)i"""
_dml(delete, binfo)
binfo['state'] = koji.BUILD_STATES['FAILED']
binfo['state'] = koji.BUILD_STATES['CANCELED']
update = """UPDATE build SET state=%(state)i, task_id=NULL WHERE id=%(id)i"""
_dml(update, binfo)
#now clear the build dirs
@ -4370,7 +4388,11 @@ def _get_build_target(task_id):
task = Task(task_id)
request = task.getRequest()
# request is (path-to-srpm, build-target-name, map-of-other-options)
return get_build_targets(request[1])[0]
ret = get_build_targets(request[1])
if ret:
return ret[0]
else:
return None
def get_notification_recipients(build, tag_id, state):
"""
@ -4384,33 +4406,46 @@ def get_notification_recipients(build, tag_id, state):
for this tag and the user who submitted the build. The list will not contain
duplicates.
"""
package_id = build['package_id']
query = """SELECT email FROM build_notifications
WHERE ((package_id = %(package_id)i OR package_id IS NULL)
AND (tag_id = %(tag_id)i OR tag_id IS NULL))
"""
if state != koji.BUILD_STATES['COMPLETE']:
query += """AND success_only = FALSE
"""
clauses = []
emails = [result[0] for result in _fetchMulti(query, locals())]
if build:
package_id = build['package_id']
clauses.append('package_id = %(package_id)i OR package_id IS NULL')
else:
clauses.append('package_id IS NULL')
if tag_id:
clauses.append('tag_id = %(tag_id)i OR tag_id IS NULL')
else:
clauses.append('tag_id IS NULL')
if state != koji.BUILD_STATES['COMPLETE']:
clauses.append('success_only = FALSE')
query = QueryProcessor(columns=('email',), tables=['build_notifications'],
clauses=clauses, values=locals(),
opts={'asList':True})
emails = [result[0] for result in query.execute()]
email_domain = context.opts['EmailDomain']
# user who submitted the build
emails.append('%s@%s' % (build['owner_name'], email_domain))
packages = readPackageList(pkgID=package_id, tagID=tag_id, inherit=True)
# owner of the package in this tag, following inheritance
package_info = packages.get(package_id)
if package_info:
emails.append('%s@%s' % (package_info['owner_name'], email_domain))
if tag_id:
packages = readPackageList(pkgID=package_id, tagID=tag_id, inherit=True)
# owner of the package in this tag, following inheritance
package_info = packages.get(package_id)
if package_info:
emails.append('%s@%s' % (package_info['owner_name'], email_domain))
#FIXME - if tag_id is None, we don't have a good way to get the package owner.
# using all package owners from all tags would be way overkill.
emails_uniq = dict(zip(emails, [1] * len(emails))).keys()
return emails_uniq
def tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''):
if context.opts.get('DisableNotifications'):
return
if is_successful:
state = koji.BUILD_STATES['COMPLETE']
else:
@ -4435,15 +4470,21 @@ def tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_s
return None
def build_notification(task_id, build_id):
if context.opts.get('DisableNotifications'):
return
build = get_build(build_id)
target = _get_build_target(task_id)
dest_tag = None
if target:
dest_tag = target['dest_tag']
if build['state'] == koji.BUILD_STATES['BUILDING']:
raise koji.GenericError, 'never send notifications for incomplete builds'
web_url = context.opts.get('KojiWebURL', 'http://localhost/koji')
recipients = get_notification_recipients(build, target['dest_tag'], build['state'])
recipients = get_notification_recipients(build, dest_tag, build['state'])
if len(recipients) > 0:
make_task('buildNotification', [recipients, build, target, web_url])
@ -4686,6 +4727,258 @@ SELECT %(col_str)s
return results
#
# Policy Test Handlers
class OperationTest(koji.policy.MatchTest):
"""Checks operation against glob patterns"""
name = 'operation'
field = 'operation'
class PackageTest(koji.policy.MatchTest):
"""Checks package against glob patterns"""
name = 'package'
field = '_package'
def run(self, data):
#we need to find the package name from the base data
data[self.field] = get_build(data['build'])['name']
return super(PackageTest, self).run(data)
class TagTest(koji.policy.MatchTest):
name = 'tag'
field = '_tagname'
def run(self, data):
#we need to find the tag name from the base data
if not data['tag']:
return False
data[self.field] = get_tag(data['tag'])['name']
return super(TagTest, self).run(data)
class HasTagTest(koji.policy.BaseSimpleTest):
"""Check to see if build (currently) has a given tag"""
name = 'hastag'
def run(self, data):
tags = context.handlers.call('listTags', build=data['build'])
#True if any of these tags match any of the patterns
args = self.str.split()[1:]
for tag in tags:
for pattern in args:
if fnmatch.fnmatch(tag['name'], pattern):
return True
#otherwise...
return False
class BuildTagTest(koji.policy.BaseSimpleTest):
"""Check the build tag of the build
If build_tag is not provided in policy data, it is determined by the
buildroots of the component rpms
"""
name = 'buildtag'
def run(self, data):
if data.has_key('build_tag'):
tagname = get_tag(data['build_tag'])
for pattern in args:
if fnmatch.fnmatch(tagname, pattern):
return True
#else
return False
elif data.has_key('build'):
#determine build tag from buildroots
#in theory, we should find only one unique build tag
#it is possible that some rpms could have been imported later and hence
#not have a buildroot.
#or if the entire build was imported, there will be no buildroots
rpms = context.handlers.call('listRPMs', buildID=data['build'])
args = self.str.split()[1:]
for rpminfo in rpms:
if rpminfo['buildroot_id'] is None:
continue
tagname = get_buildroot(rpminfo['buildroot_id'])['tag_name']
for pattern in args:
if fnmatch.fnmatch(tagname, pattern):
return True
#otherwise...
return False
else:
return False
class ImportedTest(koji.policy.BaseSimpleTest):
"""Check if any part of a build was imported
This is determined by checking the buildroots of the rpms
True if any rpm lacks a buildroot (strict)"""
name = 'imported'
def run(self, data):
rpms = context.handlers.call('listRPMs', buildID=data['build'])
#no test args
for rpminfo in rpms:
if rpminfo['buildroot_id'] is None:
return True
#otherwise...
return False
class IsBuildOwnerTest(koji.policy.BaseSimpleTest):
"""Check if user owns the build"""
name = "is_build_owner"
def run(self, data):
build = get_build(data['build'])
owner = get_user(build['owner_id'])
user = get_user(data['user_id'])
if owner['id'] == user['id']:
return True
if owner['usertype'] == koji.USERTYPES['GROUP']:
# owner is a group, check to see if user is a member
if owner['id'] in koji.auth.get_user_groups(user['id']):
return True
#otherwise...
return False
class UserInGroupTest(koji.policy.BaseSimpleTest):
"""Check if user is in group(s)
args are treated as patterns and matched against group name
true is user is in /any/ matching group
"""
name = "user_in_group"
def run(self, data):
user = get_user(data['user_id'])
groups = koji.auth.get_user_groups(user['id'])
args = self.str.split()[1:]
for group_id, group in groups.iteritems():
for pattern in args:
if fnmatch.fnmatch(group, pattern):
return True
#otherwise...
return False
class HasPermTest(koji.policy.BaseSimpleTest):
"""Check if user has permission(s)
args are treated as patterns and matched against permission name
true is user has /any/ matching permission
"""
name = "has_perm"
def run(self, data):
user = get_user(data['user_id'])
perms = koji.auth.get_user_perms(user['id'])
args = self.str.split()[1:]
for perm in perms:
for pattern in args:
if fnmatch.fnmatch(perm, pattern):
return True
#otherwise...
return False
class SourceTest(koji.policy.MatchTest):
"""Match build source
This is not the cleanest, since we have to crack open the task parameters
True if build source matches any of the supplied patterns
"""
name = "source"
field = '_source'
def run(self, data):
if data.has_key('source'):
data[self.field] = data['source']
elif data.has_key('build'):
#crack open the build task
build = get_build(data['build'])
if build['task_id'] is None:
#imported, no source to match against
return False
task = Task(build['task_id'])
params = task.getRequest()
#signature is (src, target, opts=None)
data[self.field] = params[0]
else:
return False
return super(SourceTest, self).run(data)
class PolicyTest(koji.policy.BaseSimpleTest):
"""Test named policy
The named policy must exist
Returns True is the policy results in an action of:
yes, true, allow
Otherwise returns False
(Also returns False if there are no matches in the policy)
Watch out for loops
"""
name = 'policy'
def __init__(self, str):
super(PolicyTest, self).__init__(str)
self.depth = 0
# this is used to detect loops. Note that each test in a ruleset is
# a distinct instance of its test class. So this value is particular
# to a given appearance of a policy check in a ruleset.
def run(self, data):
args = self.str.split()[1:]
if self.depth != 0:
#LOOP!
raise koji.GenericError, "encountered policy loop at %s" % self.str
ruleset = context.policy.get(args[0])
if not ruleset:
raise koji.GenericError, "no such policy: %s" % args[0]
self.depth += 1
result = ruleset.apply(data)
self.depth -= 1
if result is None:
return False
else:
return result.lower() in ('yes', 'true', 'allow')
def check_policy(name, data, default='deny', strict=False):
"""Check data against the named policy
This assumes the policy actions consist of:
allow
deny
Returns a pair (access, reason)
access: True if the policy result is allow, false otherwise
reason: reason for the access
If strict is True, will raise ActionNotAllowed if the action is not 'allow'
"""
ruleset = context.policy.get(name)
if not ruleset:
if context.opts.get('MissingPolicyOk'):
# for backwards compatibility, this is the default
result = "allow"
else:
result = "deny"
reason = "missing policy"
else:
result = ruleset.apply(data)
if result is None:
result = default
reason = ruleset.last_rule()
if context.opts.get('KojiDebug', False):
log_error("policy %(name)s gave %(result)s, reason: %(reason)s" % locals())
if result.lower() == 'allow':
return True, reason
if not strict:
return False, reason
err_str = "policy violation"
if context.opts.get('KojiDebug', False):
err_str += " -- %s" % reason
raise koji.ActionNotAllowed, err_str
def assert_policy(name, data, default='deny'):
"""Enforce the named policy
This assumes the policy actions consist of:
allow
deny
Raises ActionNotAllowed if policy result is not allow
"""
check_policy(name, data, default=default, strict=True)
#
# XMLRPC Methods
#
@ -5096,6 +5389,16 @@ class RootExports(object):
pkg_error = "Package %s not in list for %s" % (build['name'], tag['name'])
elif pkgs[pkg_id]['blocked']:
pkg_error = "Package %s blocked in %s" % (build['name'], tag['name'])
policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : fromtag_id}
policy_data['user_id'] = context.session.user_id
if fromtag is None:
policy_data['operation'] = 'tag'
else:
policy_data['operation'] = 'move'
#don't check policy for admins using force
if not force or not context.session.hasPerm('admin'):
assert_policy('tag', policy_data)
#XXX - we're running this check twice, here and in host.tagBuild (called by the task)
if pkg_error:
if force and context.session.hasPerm('admin'):
pkglist_add(tag_id,pkg_id,force=True,block=False)
@ -5115,13 +5418,21 @@ class RootExports(object):
No return value"""
#we can't staticmethod this one -- we're limiting the options
user_id = context.session.user_id
tag_id = get_tag(tag, strict=True)['id']
build_id = get_build(build, strict=True)['id']
policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id}
policy_data['user_id'] = context.session.user_id
policy_data['operation'] = 'untag'
try:
#don't check policy for admins using force
if not force or not context.session.hasPerm('admin'):
assert_policy('tag', policy_data)
_untag_build(tag,build,strict=strict,force=force)
tag_notification(True, None, tag, build, user_id)
except Exception, e:
exctype, value = sys.exc_info()[:2]
tag_notification(False, None, tag, build, user_id, False, "%s: %s" % (exctype, value))
raise e
raise
def untagBuildBypass(self, tag, build, strict=True, force=False):
"""Untag a build without any checks or notifications
@ -5172,6 +5483,16 @@ class RootExports(object):
# we want 'ORDER BY tag_listing.create_event ASC' not DESC so reverse
build_list.reverse()
#policy check
policy_data = {'tag' : tag2, 'fromtag' : tag1, 'operation' : 'move'}
policy_data['user_id'] = context.session.user_id
#don't check policy for admins using force
if not force or not context.session.hasPerm('admin'):
for build in build_list:
policy_data['build'] = build
assert_policy('tag', policy_data)
#XXX - we're running this check twice, here and in host.tagBuild (called by the task)
wait_on = []
tasklist = []
for build in build_list:
@ -5786,10 +6107,14 @@ class RootExports(object):
repoInfo = staticmethod(repo_info)
getActiveRepos = staticmethod(get_active_repos)
def newRepo(self, tag):
def newRepo(self, tag, event=None):
"""Create a newRepo task. returns task id"""
context.session.assertPerm('repo')
return make_task('newRepo', [tag], priority=15, channel='createrepo')
if event:
args = koji.encode_args(tag, event=None)
else:
args = [tag]
return make_task('newRepo', args, priority=15, channel='createrepo')
def repoExpire(self, repo_id):
"""mark repo expired"""
@ -5898,7 +6223,7 @@ class RootExports(object):
the given date, in either float (seconds since the epoch)
or str (ISO) format
"""
if opts is None:
if not opts:
opts = {}
tables = ['task']
@ -5947,6 +6272,9 @@ class RootExports(object):
for f in ('request','result'):
if task[f]:
try:
if task[f].find('<?xml', 0, 10) == -1:
#handle older base64 encoded data
task[f] = base64.decodestring(task[f])
data, method = xmlrpclib.loads(task[f])
except xmlrpclib.Fault, fault:
data = fault
@ -6879,6 +7207,9 @@ class HostExports(object):
if row:
#return task id
return row[0]
if opts.has_key('kwargs'):
arglist = koji.encode_args(*arglist, **opts['kwargs'])
del opts['kwargs']
return make_task(method,arglist,**opts)
def subtask2(self,__parent,__taskopts,__method,*args,**opts):
@ -6891,6 +7222,7 @@ class HostExports(object):
Remaining args are passed on to the subtask
"""
#self.subtask will verify the host
args = koji.encode_args(*args,**opts)
return self.subtask(__method,args,__parent,**__taskopts)
@ -7104,6 +7436,16 @@ class HostExports(object):
task = Task(task_id)
task.assertHost(host.id)
user_id = task.getOwner()
policy_data = {'tag' : tag, 'build' : build, 'fromtag' : fromtag}
policy_data['user_id'] = user_id
if fromtag is None:
policy_data['operation'] = 'tag'
else:
policy_data['operation'] = 'move'
#don't check policy for admins using force
perms = koji.auth.get_user_perms(user_id)
if not force or 'admin' not in perms:
assert_policy('tag', policy_data)
if fromtag:
_untag_build(fromtag,build,user_id=user_id,force=force,strict=True)
_tag_build(tag,build,user_id=user_id,force=force)
@ -7111,6 +7453,8 @@ class HostExports(object):
def tagNotification(self, is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''):
"""Create a tag notification message.
Handles creation of tagNotification tasks for hosts."""
host = Host()
host.verify()
tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg)
def importChangelog(self, buildID, rpmfile):
@ -7135,6 +7479,16 @@ class HostExports(object):
rpmfile = '%s/%s' % (koji.pathinfo.work(), rpmfile)
import_changelog(build, rpmfile, replace=True)
def checkPolicy(self, name, data, default='deny', strict=False):
host = Host()
host.verify()
return check_policy(name, data, default=default, strict=strict)
def assertPolicy(self, name, data, default='deny'):
host = Host()
host.verify()
check_policy(name, data, default=default, strict=True)
def newBuildRoot(self, repo, arch, task_id=None):
host = Host()
host.verify()
@ -7241,11 +7595,11 @@ class HostExports(object):
return br.updateArchiveList(archives, project)
def repoInit(self, tag, with_src=False):
def repoInit(self, tag, with_src=False, event=None):
"""Initialize a new repo for tag"""
host = Host()
host.verify()
return repo_init(tag, with_src=with_src)
return repo_init(tag, with_src=with_src, event=event)
def repoAddRPM(self, repo_id, path):
"""Add an uploaded rpm to a repo"""
@ -7290,11 +7644,14 @@ class HostExports(object):
else:
os.link(filepath, dst)
def repoDone(self, repo_id, data):
def repoDone(self, repo_id, data, expire=False):
"""Move repo data into place, mark as ready, and expire earlier repos
repo_id: the id of the repo
data: a dictionary of the form { arch: (uploadpath, files), ...}
expire(optional): if set to true, mark the repo expired immediately*
* This is used when a repo from an older event is generated
"""
host = Host()
host.verify()
@ -7316,8 +7673,23 @@ class HostExports(object):
raise koji.GenericError, "uploaded file missing: %s" % src
os.link(src, dst)
os.unlink(src)
if expire:
repo_expire(repo_id)
return
#else:
repo_ready(repo_id)
repo_expire_older(rinfo['tag_id'], rinfo['create_event'])
#make a latest link
latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name'])
#XXX - this is a slight abuse of pathinfo
try:
if os.path.lexists(latestrepolink):
os.unlink(latestrepolink)
os.symlink(repodir, latestrepolink)
except OSError:
#making this link is nonessential
log_error("Unable to create latest link for repo: %s" % repodir)
def isEnabled(self):
host = Host()

View file

@ -20,9 +20,11 @@
# Authors:
# Mike McLean <mikem@redhat.com>
from ConfigParser import ConfigParser
import sys
import time
import traceback
import types
import pprint
from xmlrpclib import loads,dumps,Fault
from mod_python import apache
@ -30,38 +32,38 @@ from mod_python import apache
import koji
import koji.auth
import koji.db
import koji.plugin
import koji.policy
import kojihub
from kojihub import RootExports
from kojihub import HostExports
from koji.context import context
def _opt_bool(opts, name):
"""Convert a string option into a boolean
True or False value. The following values
will be considered True (case-insensitive):
yes, on, true, 1
Anything else will be considered False."""
val = opts.get(name, 'no')
if val is None:
val = ''
if val.lower() in ('yes', 'on', 'true', '1'):
return True
else:
return False
"""Convert option into a boolean if necessary
class ModXMLRPCRequestHandler(object):
"""Simple XML-RPC handler for mod_python environment"""
For strings, the following values
will be considered True (case-insensitive):
yes, on, true, 1
Any other strings will be considered False."""
val = opts.get(name, False)
if isinstance(val, bool):
return val
elif isinstance(val, basestring):
if val.lower() in ('yes', 'on', 'true', '1'):
return True
return False
class HandlerRegistry(object):
"""Track handlers for RPC calls"""
def __init__(self):
self.funcs = {}
self.traceback = False
#introspection functions
self.register_function(self.list_api, name="_listapi")
self.register_function(self.system_listMethods, name="system.listMethods")
self.register_function(self.system_methodSignature, name="system.methodSignature")
self.register_function(self.system_methodHelp, name="system.methodHelp")
self.register_function(self.multiCall)
# Also register it as system.multicall for standards compliance
self.register_function(self.multiCall, name="system.multicall")
def register_function(self, function, name = None):
if name is None:
@ -91,117 +93,21 @@ class ModXMLRPCRequestHandler(object):
def register_instance(self,instance):
self.register_module(instance)
def _marshaled_dispatch(self, data):
"""Dispatches an XML-RPC method from marshalled (XML) data."""
def register_plugin(self, plugin):
"""Scan a given plugin for handlers
params, method = loads(data)
start = time.time()
# generate response
try:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1, allow_none=1)
except Fault, fault:
self.traceback = True
response = dumps(fault)
except:
self.traceback = True
# report exception back to server
e_class, e = sys.exc_info()[:2]
faultCode = getattr(e_class,'faultCode',1)
tb_type = context.opts.get('KojiTraceback',None)
tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
if issubclass(e_class, koji.GenericError):
if _opt_bool(context.opts, 'KojiDebug'):
if tb_type == "extended":
faultString = koji.format_exc_plus()
else:
faultString = tb_str
Handlers are functions marked with one of the decorators defined in koji.plugin
"""
for v in vars(plugin).itervalues():
if isinstance(v, (types.ClassType, types.TypeType)):
#skip classes
continue
if callable(v) and getattr(v, 'exported', False):
if hasattr(v, 'export_alias'):
name = getattr(v, 'export_alias')
else:
faultString = str(e)
else:
if tb_type == "normal":
faultString = tb_str
elif tb_type == "extended":
faultString = koji.format_exc_plus()
else:
faultString = "%s: %s" % (e_class,e)
sys.stderr.write(tb_str)
sys.stderr.write('\n')
response = dumps(Fault(faultCode, faultString))
if _opt_bool(context.opts, 'KojiDebug'):
sys.stderr.write("Returning %d bytes after %f seconds\n" %
(len(response),time.time() - start))
sys.stderr.flush()
return response
def _dispatch(self,method,params):
func = self.funcs.get(method,None)
if func is None:
raise koji.GenericError, "Invalid method: %s" % method
context.method = method
if not hasattr(context,"session"):
#we may be called again by one of our meta-calls (like multiCall)
#so we should only create a session if one does not already exist
context.session = koji.auth.Session()
try:
context.session.validate()
except koji.AuthLockError:
#might be ok, depending on method
if method not in ('exclusiveSession','login', 'krbLogin', 'logout'):
raise
if _opt_bool(context.opts, 'LockOut') and \
method not in ('login', 'krbLogin', 'sslLogin', 'logout'):
if not context.session.hasPerm('admin'):
raise koji.GenericError, "Server disabled for maintenance"
# handle named parameters
params,opts = koji.decode_args(*params)
if _opt_bool(context.opts, 'KojiDebug'):
sys.stderr.write("Handling method %s for session %s (#%s)\n" \
% (method, context.session.id, context.session.callnum))
if method != 'uploadFile':
sys.stderr.write("Params: %s\n" % pprint.pformat(params))
sys.stderr.write("Opts: %s\n" % pprint.pformat(opts))
start = time.time()
ret = func(*params,**opts)
if _opt_bool(context.opts, 'KojiDebug'):
sys.stderr.write("Completed method %s for session %s (#%s): %f seconds\n"
% (method, context.session.id, context.session.callnum,
time.time()-start))
sys.stderr.flush()
return ret
def multiCall(self, calls):
"""Execute a multicall. Execute each method call in the calls list, collecting
results and errors, and return those as a list."""
results = []
for call in calls:
try:
result = self._dispatch(call['methodName'], call['params'])
except Fault, fault:
results.append({'faultCode': fault.faultCode, 'faultString': fault.faultString})
except:
# transform unknown exceptions into XML-RPC Faults
# don't create a reference to full traceback since this creates
# a circular reference.
exc_type, exc_value = sys.exc_info()[:2]
faultCode = getattr(exc_type, 'faultCode', 1)
faultString = ', '.join(exc_value.args)
trace = traceback.format_exception(*sys.exc_info())
# traceback is not part of the multicall spec, but we include it for debugging purposes
results.append({'faultCode': faultCode, 'faultString': faultString, 'traceback': trace})
else:
results.append([result])
return results
name = v.__name__
self.register_function(v, name=name)
def list_api(self):
funcs = []
@ -247,6 +153,152 @@ class ModXMLRPCRequestHandler(object):
ret += "\ndescription: %s" % func.__doc__
return ret
def get(self, name):
func = self.funcs.get(name, None)
if func is None:
raise koji.GenericError, "Invalid method: %s" % name
return func
class HandlerAccess(object):
"""This class is used to grant access to the rpc handlers"""
def __init__(self, registry):
self.__reg = registry
def call(self, __name, *args, **kwargs):
return self.__reg.get(__name)(*args, **kwargs)
def get(self, name):
return self.__Reg.get(name)
class ModXMLRPCRequestHandler(object):
"""Simple XML-RPC handler for mod_python environment"""
def __init__(self, handlers):
self.traceback = False
self.handlers = handlers #expecting HandlerRegistry instance
def _get_handler(self, name):
# just a wrapper so we can handle multicall ourselves
# we don't register multicall since the registry will outlive our instance
if name in ('multiCall', 'system.multicall'):
return self.multiCall
else:
return self.handlers.get(name)
def _marshaled_dispatch(self, data):
"""Dispatches an XML-RPC method from marshalled (XML) data."""
params, method = loads(data)
start = time.time()
# generate response
try:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1, allow_none=1)
except Fault, fault:
self.traceback = True
response = dumps(fault)
except:
self.traceback = True
# report exception back to server
e_class, e = sys.exc_info()[:2]
faultCode = getattr(e_class,'faultCode',1)
tb_type = context.opts.get('KojiTraceback',None)
tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
if issubclass(e_class, koji.GenericError):
if _opt_bool(context.opts, 'KojiDebug'):
if tb_type == "extended":
faultString = koji.format_exc_plus()
else:
faultString = tb_str
else:
faultString = str(e)
else:
if tb_type == "normal":
faultString = tb_str
elif tb_type == "extended":
faultString = koji.format_exc_plus()
else:
faultString = "%s: %s" % (e_class,e)
sys.stderr.write(tb_str)
sys.stderr.write('\n')
sys.stderr.flush()
response = dumps(Fault(faultCode, faultString))
if _opt_bool(context.opts, 'KojiDebug'):
sys.stderr.write("Returning %d bytes after %f seconds\n" %
(len(response),time.time() - start))
sys.stderr.flush()
return response
def _dispatch(self,method,params):
func = self._get_handler(method)
context.method = method
if not hasattr(context,"session"):
#we may be called again by one of our meta-calls (like multiCall)
#so we should only create a session if one does not already exist
context.session = koji.auth.Session()
try:
context.session.validate()
except koji.AuthLockError:
#might be ok, depending on method
if method not in ('exclusiveSession','login', 'krbLogin', 'logout'):
raise
if _opt_bool(context.opts, 'LockOut') and \
method not in ('login', 'krbLogin', 'sslLogin', 'logout'):
if not context.session.hasPerm('admin'):
raise koji.ServerOffline, "Server disabled for maintenance"
# handle named parameters
params,opts = koji.decode_args(*params)
if _opt_bool(context.opts, 'KojiDebug'):
sys.stderr.write("Handling method %s for session %s (#%s)\n" \
% (method, context.session.id, context.session.callnum))
if method != 'uploadFile':
sys.stderr.write("Params: %s\n" % pprint.pformat(params))
sys.stderr.write("Opts: %s\n" % pprint.pformat(opts))
start = time.time()
ret = func(*params,**opts)
if _opt_bool(context.opts, 'KojiDebug'):
sys.stderr.write("Completed method %s for session %s (#%s): %f seconds\n"
% (method, context.session.id, context.session.callnum,
time.time()-start))
sys.stderr.flush()
return ret
def multiCall(self, calls):
"""Execute a multicall. Execute each method call in the calls list, collecting
results and errors, and return those as a list."""
results = []
for call in calls:
try:
result = self._dispatch(call['methodName'], call['params'])
except Fault, fault:
results.append({'faultCode': fault.faultCode, 'faultString': fault.faultString})
except:
# transform unknown exceptions into XML-RPC Faults
# don't create a reference to full traceback since this creates
# a circular reference.
exc_type, exc_value = sys.exc_info()[:2]
faultCode = getattr(exc_type, 'faultCode', 1)
faultString = ', '.join(exc_value.args)
trace = traceback.format_exception(*sys.exc_info())
# traceback is not part of the multicall spec, but we include it for debugging purposes
results.append({'faultCode': faultCode, 'faultString': faultString, 'traceback': trace})
else:
results.append([result])
return results
def handle_request(self,req):
"""Handle a single XML-RPC request"""
@ -274,11 +326,181 @@ def offline_reply(req, msg=None):
req.set_content_length(len(response))
req.write(response)
def load_config(req):
"""Load configuration options
Options are read from a config file. The config file location is
controlled by the PythonOption ConfigFile in the httpd config.
Backwards compatibility:
- if ConfigFile is not set, opts are loaded from http config
- if ConfigFile is set, then the http config must not provide Koji options
- In a future version we will load the default hub config regardless
- all PythonOptions (except ConfigFile) are now deprecated and support for them
will disappear in a future version of Koji
"""
#get our config file
modpy_opts = req.get_options()
#cf = modpy_opts.get('ConfigFile', '/etc/koji-hub/hub.conf')
cf = modpy_opts.get('ConfigFile', None)
if cf:
# to aid in the transition from PythonOptions to hub.conf, we only load
# the configfile if it is explicitly configured
config = ConfigParser()
config.read(cf)
else:
sys.stderr.write('Warning: configuring Koji via PythonOptions is deprecated. Use hub.conf\n')
sys.stderr.flush()
cfgmap = [
#option, type, default
['DBName', 'string', None],
['DBUser', 'string', None],
['DBHost', 'string', None],
['DBPass', 'string', None],
['KojiDir', 'string', None],
['AuthPrincipal', 'string', None],
['AuthKeytab', 'string', None],
['ProxyPrincipals', 'string', None],
['HostPrincipalFormat', 'string', None],
['DNUsernameComponent', 'string', None],
['ProxyDNs', 'string', None],
['LoginCreatesUser', 'boolean', True],
['KojiWebURL', 'string', 'http://localhost.localdomain/koji'],
['EmailDomain', 'string', None],
['DisableNotifications', 'boolean', False],
['Plugins', 'string', None],
['PluginPath', 'string', '/usr/lib/koji-hub-plugins'],
['KojiDebug', 'boolean', False],
['KojiTraceback', 'string', None],
['EnableFunctionDebug', 'boolean', False],
['MissingPolicyOK', 'boolean', True],
['LockOut', 'boolean', False],
['ServerOffline', 'string', False],
['OfflineMessage', 'string', None],
]
opts = {}
for name, dtype, default in cfgmap:
if cf:
key = ('hub', name)
if config.has_option(*key):
if dtype == 'integer':
opts[name] = config.getint(*key)
elif dtype == 'boolean':
opts[name] = config.getboolean(*key)
else:
opts[name] = config.get(*key)
else:
opts[name] = default
else:
if modpy_opts.get(name, None) is not None:
if dtype == 'integer':
opts[name] = int(modpy_opts.get(name))
elif dtype == 'boolean':
opts[name] = modpy_opts.get(name).lower() in ('yes', 'on', 'true', '1')
else:
opts[name] = modpy_opts.get(name)
else:
opts[name] = default
# load policies
# (only from config file)
if cf and config.has_section('policy'):
#for the moment, we simply transfer the policy conf to opts
opts['policy'] = dict(config.items('policy'))
else:
opts['policy'] = {}
for pname, text in _default_policies.iteritems():
opts['policy'].setdefault(pname, text)
# use configured KojiDir
if opts.get('KojiDir') is not None:
koji.BASEDIR = opts['KojiDir']
koji.pathinfo.topdir = opts['KojiDir']
return opts
def load_plugins(opts):
"""Load plugins specified by our configuration"""
if not opts['Plugins']:
return
tracker = koji.plugin.PluginTracker(path=opts['PluginPath'].split(':'))
for name in opts['Plugins'].split():
sys.stderr.write('Loading plugin: %s\n' % name)
try:
tracker.load(name)
except Exception:
sys.stderr.write(''.join(traceback.format_exception(*sys.exc_info())))
#make this non-fatal, but set ServerOffline
opts['ServerOffline'] = True
opts['OfflineMessage'] = 'configuration error'
sys.stderr.flush()
return tracker
_default_policies = {
'build_from_srpm' : '''
has_perm admin :: allow
all :: deny
''',
'build_from_repo_id' : '''
has_perm admin :: allow
all :: deny
''',
}
def get_policy(opts, plugins):
if not opts.get('policy'):
return
#first find available policy tests
alltests = [koji.policy.findSimpleTests([vars(kojihub), vars(koji.policy)])]
# we delay merging these to allow a test to be overridden for a specific policy
for plugin_name in opts.get('Plugins', '').split():
alltests.append(koji.policy.findSimpleTests(vars(plugins.get(plugin_name))))
policy = {}
for pname, text in opts['policy'].iteritems():
#filter/merge tests
merged = {}
for tests in alltests:
# tests can be limited to certain policies by setting a class variable
for name, test in tests.iteritems():
if hasattr(test, 'policy'):
if isinstance(test.policy, basestring):
if pname != test.policy:
continue
elif pname not in test.policy:
continue
# in case of name overlap, last one wins
# hence plugins can override builtin tests
merged[name] = test
policy[pname] = koji.policy.SimpleRuleSet(text.splitlines(), merged)
return policy
#
# mod_python handler
#
firstcall = True
ready = False
opts = {}
def handler(req, profiling=False):
global firstcall, ready, registry, opts, plugins, policy
if firstcall:
firstcall = False
opts = load_config(req)
plugins = load_plugins(opts)
registry = get_registry(opts, plugins)
policy = get_policy(opts, plugins)
ready = True
if not ready:
#this will happen on subsequent passes if an error happens in the firstcall code
opts['ServerOffline'] = True
opts['OfflineMessage'] = 'server startup error'
if profiling:
import profile, pstats, StringIO, tempfile
global _profiling_req
@ -293,7 +515,6 @@ def handler(req, profiling=False):
req.write("<pre>" + strstream.getvalue() + "</pre>")
_profiling_req = None
else:
opts = req.get_options()
try:
if _opt_bool(opts, 'ServerOffline'):
offline_reply(req, msg=opts.get("OfflineMessage", None))
@ -301,7 +522,9 @@ def handler(req, profiling=False):
context._threadclear()
context.commit_pending = False
context.opts = opts
context.handlers = HandlerAccess(registry)
context.req = req
context.policy = policy
koji.db.provideDBopts(database = opts["DBName"],
user = opts["DBUser"],
password = opts.get("DBPass",None),
@ -311,19 +534,7 @@ def handler(req, profiling=False):
except Exception:
offline_reply(req, msg="database outage")
return apache.OK
functions = RootExports()
hostFunctions = HostExports()
h = ModXMLRPCRequestHandler()
h.register_instance(functions)
h.register_module(hostFunctions,"host")
h.register_function(koji.auth.login)
h.register_function(koji.auth.krbLogin)
h.register_function(koji.auth.sslLogin)
h.register_function(koji.auth.logout)
h.register_function(koji.auth.subsession)
h.register_function(koji.auth.logoutChild)
h.register_function(koji.auth.exclusiveSession)
h.register_function(koji.auth.sharedSession)
h = ModXMLRPCRequestHandler(registry)
h.handle_request(req)
if h.traceback:
#rollback
@ -336,3 +547,23 @@ def handler(req, profiling=False):
context.cnx.close()
context._threadclear()
return apache.OK
def get_registry(opts, plugins):
# Create and populate handler registry
registry = HandlerRegistry()
functions = RootExports()
hostFunctions = HostExports()
registry.register_instance(functions)
registry.register_module(hostFunctions,"host")
registry.register_function(koji.auth.login)
registry.register_function(koji.auth.krbLogin)
registry.register_function(koji.auth.sslLogin)
registry.register_function(koji.auth.logout)
registry.register_function(koji.auth.subsession)
registry.register_function(koji.auth.logoutChild)
registry.register_function(koji.auth.exclusiveSession)
registry.register_function(koji.auth.sharedSession)
for name in opts.get('Plugins', '').split():
registry.register_plugin(plugins.get(name))
return registry

View file

@ -111,17 +111,21 @@ rm -rf $RPM_BUILD_ROOT
%defattr(-,root,root)
%{_datadir}/koji-hub
%config(noreplace) /etc/httpd/conf.d/kojihub.conf
%config(noreplace) /etc/koji-hub/hub.conf
%files utils
%defattr(-,root,root)
%{_sbindir}/kojira
%{_initrddir}/kojira
%config(noreplace) %{_sysconfdir}/sysconfig/kojira
%{_sysconfdir}/kojira
%dir %{_sysconfdir}/kojira
%config(noreplace) %{_sysconfdir}/kojira/kojira.conf
%{_sbindir}/koji-gc
%dir %{_sysconfdir}/koji-gc
%config(noreplace) %{_sysconfdir}/koji-gc/koji-gc.conf
%{_sbindir}/koji-shadow
%dir %{_sysconfdir}/koji-shadow
%config(noreplace) %{_sysconfdir}/koji-shadow/koji-shadow.conf
%files web
%defattr(-,root,root)
@ -134,8 +138,9 @@ rm -rf $RPM_BUILD_ROOT
%{_sbindir}/kojid
%{_initrddir}/kojid
%config(noreplace) %{_sysconfdir}/sysconfig/kojid
%{_sysconfdir}/kojid
%dir %{_sysconfdir}/kojid
%config(noreplace) %{_sysconfdir}/kojid/kojid.conf
%{_datadir}/koji-builder
%attr(-,kojibuilder,kojibuilder) /etc/mock/koji
%pre builder

View file

@ -1544,7 +1544,7 @@ class ClientSession(object):
time.sleep(interval)
#not reached
def multiCall(self):
def multiCall(self, strict=False):
"""Execute a multicall (multiple function calls passed to the server
and executed at the same time, with results being returned in a batch).
Before calling this method, the self.multicall field must have
@ -1565,7 +1565,15 @@ class ClientSession(object):
self.multicall = False
calls = self._calls
self._calls = []
return self._callMethod('multiCall', (calls,), {})
ret = self._callMethod('multiCall', (calls,), {})
if strict:
#check for faults and raise first one
for entry in ret:
if isinstance(entry, dict):
fault = Fault(entry['faultCode'], entry['faultString'])
err = convertFault(fault)
raise err
return ret
def __getattr__(self,name):
#if name[:1] == '_':

103
koji/plugin.py Normal file
View file

@ -0,0 +1,103 @@
# koji plugin module
# Copyright (c) 2008 Red Hat
#
# Koji is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Mike McLean <mikem@redhat.com>
import imp
import koji
import sys
class PluginTracker(object):
def __init__(self, path=None, prefix='_koji_plugin__'):
self.searchpath = path
#prefix should not have a '.' in it, this can cause problems.
self.prefix = prefix
self.plugins = {}
def load(self, name, path=None, reload=False):
if self.plugins.has_key(name) and not reload:
return self.plugins[name]
mod_name = name
if self.prefix:
#mod_name determines how the module is named in sys.modules
#Using a prefix helps prevent overlap with other modules
#(no '.' -- it causes problems)
mod_name = self.prefix + name
if sys.modules.has_key(mod_name) and not reload:
raise koji.GenericError, 'module name conflict: %s' % mod_name
if path is None:
path = self.searchpath
if path is None:
raise koji.GenericError, "empty module search path"
file, pathname, description = imp.find_module(name, self.pathlist(path))
try:
plugin = imp.load_module(mod_name, file, pathname, description)
finally:
file.close()
self.plugins[name] = plugin
return plugin
def get(self, name):
return self.plugins.get(name)
def pathlist(self, path):
if isinstance(path, basestring):
return [path]
else:
return path
# some decorators used by plugins
def export(f):
"""a decorator that marks a function as exported
intended to be used by plugins
the HandlerRegistry will export the function under its own name
"""
setattr(f, 'exported', True)
return f
def export_as(alias):
"""returns a decorator that marks a function as exported and gives it an alias
indended to be used by plugins
"""
def dec(f):
setattr(f, 'exported', True)
setattr(f, 'export_alias', alias)
return f
return dec
def export_in(module, alias=None):
"""returns a decorator that marks a function as exported with a module prepended
optionally, can also alias the function within the module
indended to be used by plugins
"""
def dec(f):
if alias is None:
alias = "%s.%s" % (module, f.__name__)
else:
alias = "%s.%s" % (module, alias)
setattr(f, 'exported', True)
setattr(f, 'export_module', module)
setattr(f, 'export_alias', alias)
return f
return dec

332
koji/policy.py Normal file
View file

@ -0,0 +1,332 @@
# Copyright (c) 2008 Red Hat
#
# Koji is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import fnmatch
import koji
class BaseSimpleTest(object):
"""Abstract base class for simple tests"""
#Provide the name of the test
name = None
def __init__(self, str):
"""Read the test parameters from string"""
self.str = str
def run(self, data):
"""Run the test against data provided"""
raise NotImplementedError
def __str__(self):
return self.str
# The following tests are generic enough that we can place them here
class TrueTest(BaseSimpleTest):
name = 'true'
def run(self, data):
return True
class FalseTest(BaseSimpleTest):
name = 'false'
def run(self, data):
return False
class AllTest(TrueTest):
name = 'all'
#alias for true
class NoneTest(FalseTest):
name = 'none'
#alias for false
class MatchTest(BaseSimpleTest):
"""Matches a field in the data against glob patterns
True if any of the expressions match, else False
This test can be used as-is, or it can be subclassed to
test a specific field
Syntax:
name [field] pattern1 [pattern2 ...]
"""
name = 'match'
field = None
def run(self, data):
args = self.str.split()[1:]
if self.field is None:
field = args[0]
args = args[1:]
else:
# expected when we are subclassed
field = self.field
for pattern in args:
if fnmatch.fnmatch(data[field], pattern):
return True
return False
class CompareTest(BaseSimpleTest):
"""Simple numeric field comparison
Supports basic numeric comparisons. The right operand must be a valid number
This test can be used as-is, or it can be subclassed to
test a specific field
Syntax:
name [field] OP number
"""
name = 'compare'
field = None
allow_float = True
operators = {
'<' : lambda a, b: a < b,
'>' : lambda a, b: a > b,
'<=' : lambda a, b: a <= b,
'>=' : lambda a, b: a >= b,
'=' : lambda a, b: a == b,
'!=' : lambda a, b: a != b,
}
def __init__(self, str):
"""Read the test parameters from string"""
super(CompareTest, self).__init__(str)
if self.field is None:
# field OP number
self.field, cmp, value = str.split(None, 3)[1:]
else:
# OP number
cmp, value = str.split(None, 2)[1:]
self.func = self.operators.get(cmp, None)
if self.func is None:
raise koji.GenericError, "Invalid comparison in test."
try:
self.value = int(value)
except ValueError:
if not self.allow_float:
raise
self.value = float(value)
def run(self, data):
return self.func(data[self.field], self.value)
class SimpleRuleSet(object):
def __init__(self, rules, tests):
self.tests = tests
self.rules = self.parse_rules(rules)
self.lastrule = None
self.lastaction = None
def parse_rules(self, lines):
"""Parse rules into a ruleset data structure
At the top level, the structure is a set of rules
[rule1, rule2, ...]
Each rule is a pair
[tests, negate, action ]
Tests is a list of test handlers:
[handler1, handler2, ...]
Action can either be a string or a chained ruleset
"action"
or
[subrule1, subrule2, ...]
Putting it all together, you get something like this:
[[[test1, test2], negate, "action"],
[[test], negate,
[[[test1, test2], negate, "action"],
[[test1, test2, test3], negate
[[[test1, test2], negate, "action"]]]]]]
"""
cursor = []
self.ruleset = cursor
stack = []
for line in lines:
rule = self.parse_line(line)
if rule is None:
#blank/etc
continue
tests, negate, action = rule
if action == '{':
#nested rules
child = []
cursor.append([tests, negate, child])
stack.append(cursor)
cursor = child
elif action == '}':
if not stack:
raise koji.GenericError, "nesting error in rule set"
cursor = stack.pop()
else:
cursor.append(rule)
if stack:
# unclosed {
raise koji.GenericError, "nesting error in rule set"
def parse_line(self, line):
"""Parse line as a rule
Expected format is:
test [params] [&& test [params] ...] :: action-if-true
test [params] [&& test [params] ...] !! action-if-false
(syntax is !! instead of ||, because otherwise folks might think
they can mix && and ||, which is /not/ supported)
For complex rules:
test [params [&& ...]] :: {
test [params [&& ...]] :: action
test [params [&& ...]] :: {
...
}
}
Each closing brace must be on a line by itself
"""
line = line.split('#', 1)[0].strip()
if not line:
#blank or all comment
return None
if line == '}':
return None, False, '}'
#?? allow }} ??
negate = False
pos = line.rfind('::')
if pos == -1:
pos = line.rfind('!!')
if pos == -1:
raise Exception, "bad policy line: %s" % line
negate = True
tests = line[:pos]
action = line[pos+2:]
tests = [self.get_test_handler(x) for x in tests.split('&&')]
action = action.strip()
# just return action = { for nested rules
return tests, negate, action
def get_test_handler(self, str):
name = str.split(None,1)[0]
try:
return self.tests[name](str)
except KeyError:
raise koji.GenericError, "missing test handler: %s" % name
def all_actions(self):
"""report a list of all actions in the ruleset
(only the first word of the action is considered)
"""
def _recurse(rules, index):
for tests, negate, action in rules:
if isinstance(action, list):
_recurse(action, index)
else:
name = action.split(None,1)[0]
index[name] = 1
index = {}
_recurse(self.ruleset, index)
return index.keys()
def _apply(self, rules, data, top=False):
for tests, negate, action in rules:
if top:
self.lastrule = []
value = False
for test in tests:
if not test.run(data):
break
else:
#all tests in current rule passed
value = True
if negate:
value = not value
if value:
self.lastrule.append([tests, negate])
if isinstance(action, list):
# action is a list of subrules
ret = self._apply(action, data)
if ret is not None:
return ret
# if ret is None, then none of the subrules matched,
# so we keep going
else:
return action
return None
def apply(self, data):
self.lastrule = []
self.lastaction = self._apply(self.ruleset, data, top=True)
return self.lastaction
def last_rule(self):
if self.lastrule is None:
return None
ret = []
for (tests, negate) in self.lastrule:
line = '&&'.join([str(t) for t in tests])
if negate:
line += ' !! '
else:
line += ' :: '
ret.append(line)
ret = '\n... '.join(ret)
if self.lastaction is None:
ret += "(no match)"
else:
ret += self.lastaction
return ret
def findSimpleTests(namespace):
"""Search namespace for subclasses of BaseSimpleTest
This is a convenience function for initializing a SimpleRuleSet instance
namespace can be a dict (e.g. globals()), or a list of dicts
returns a dictionary of the found subclasses, indexed by name
"""
if not isinstance(namespace, (list, tuple)):
namespace = (namespace,)
ret = {}
for ns in namespace:
for key, value in ns.iteritems():
if value is BaseSimpleTest:
# skip this abstract base class if we encounter it
# this module contains generic tests, so it is valid to include it
# in the namespace list
continue
if type(value) == type(BaseSimpleTest) and issubclass(value, BaseSimpleTest):
name = getattr(value, 'name', None)
if not name:
#use the class name
name = key
#but trim 'Test' from the end
if name.endswith('Test') and len(name) > 4:
name = name[:-4]
ret.setdefault(name, value)
#...so first test wins in case of name overlap
return ret

View file

@ -1,4 +1,4 @@
BINFILES = kojira koji-gc
BINFILES = kojira koji-gc koji-shadow
_default:
@echo "nothing to make. try make install"
@ -26,3 +26,6 @@ install:
mkdir -p $(DESTDIR)/etc/koji-gc
install -p -m 644 koji-gc.conf $(DESTDIR)/etc/koji-gc/koji-gc.conf
mkdir -p $(DESTDIR)/etc/koji-shadow
install -p -m 644 koji-shadow.conf $(DESTDIR)/etc/koji-shadow/koji-shadow.conf

View file

@ -1,7 +1,7 @@
#!/usr/bin/python
# koji-gc: a garbage collection tool for Koji
# Copyright (c) 2007 Red Hat
# Copyright (c) 2007, 2008 Red Hat
#
# Authors:
# Mike McLean <mikem@redhat.com>
@ -11,6 +11,7 @@ try:
except ImportError:
pass
import koji
import koji.policy
import ConfigParser
from email.MIMEText import MIMEText
import fnmatch
@ -89,7 +90,7 @@ def get_options():
help=_("SSL certification file for authentication"))
parser.add_option("--ca", default='/etc/koji-gc/clientca.crt',
help=_("SSL certification file for authentication"))
parser.add_option("--serverca", default='etc/koji-gc/serverca.crt',
parser.add_option("--serverca", default='/etc/koji-gc/serverca.crt',
help=_("SSL certification file for authentication"))
parser.add_option("-n", "--test", action="store_true", default=False,
help=_("test mode"))
@ -661,66 +662,39 @@ def handle_delete(just_salvage=False):
#TODO - log details for delete failures
class BasePruneTest(object):
"""Abstract base class for pruning tests"""
#Provide the name of the test
name = None
def __init__(self, str):
"""Read the test parameters from string"""
raise NotImplementedError
def run(self, data):
"""Run the test against data provided"""
raise NotImplementedError
def __str__(self):
return "%s test handler" % self.name
class TagPruneTest(BasePruneTest):
class TagPruneTest(koji.policy.BaseSimpleTest):
name = 'tag'
def __init__(self, str):
"""Read the test parameters from string"""
self.patterns = str.split()[1:]
def run(self, data):
for pat in self.patterns:
patterns = self.str.split()[1:]
for pat in patterns:
if fnmatch.fnmatch(data['tagname'], pat):
return True
return False
class PackagePruneTest(BasePruneTest):
class PackagePruneTest(koji.policy.BaseSimpleTest):
name = 'package'
def __init__(self, str):
"""Read the test parameters from string"""
self.patterns = str.split()[1:]
def run(self, data):
for pat in self.patterns:
patterns = self.str.split()[1:]
for pat in patterns:
if fnmatch.fnmatch(data['pkgname'], pat):
return True
return False
class SigPruneTest(BasePruneTest):
class SigPruneTest(koji.policy.BaseSimpleTest):
name = 'sig'
def __init__(self, str):
"""Read the test parameters from string"""
self.patterns = str.split()[1:]
def run(self, data):
# true if any of the keys match any of the patterns
patterns = self.str.split()[1:]
for key in data['keys']:
if sigmatch(key, self.patterns):
if sigmatch(key, patterns):
return True
return False
@ -741,7 +715,7 @@ def sigmatch(key, patterns):
return False
class OrderPruneTest(BasePruneTest):
class OrderPruneTest(koji.policy.BaseSimpleTest):
name = 'order'
@ -756,6 +730,7 @@ class OrderPruneTest(BasePruneTest):
def __init__(self, str):
"""Read the test parameters from string"""
super(OrderPruneTest, self).__init__(str)
self.cmp, value = str.split(None, 3)[1:]
self.func = self.cmp_idx.get(self.cmp, None)
if self.func is None:
@ -768,7 +743,7 @@ class OrderPruneTest(BasePruneTest):
return self.func(data['order'], self.value)
class AgePruneTest(BasePruneTest):
class AgePruneTest(koji.policy.BaseSimpleTest):
name = 'age'
@ -783,6 +758,7 @@ class AgePruneTest(BasePruneTest):
def __init__(self, str):
"""Read the test parameters from string"""
super(AgePruneTest, self).__init__(str)
self.cmp, value = str.split(None, 2)[1:]
self.func = self.cmp_idx.get(self.cmp, None)
if self.func is None:
@ -793,58 +769,6 @@ class AgePruneTest(BasePruneTest):
return self.func(time.time() - data['ts'], self.span)
class PruneRule(object):
def __init__(self, line=None):
self.line = line
self.tests = []
self.action = None
#find available tests
self.test_handlers = {}
for v in globals().values():
if type(v) == type(BasePruneTest) and issubclass(v, BasePruneTest):
self.test_handlers[v.name] = v
if line is not None:
self.parse_line(line)
def parse_line(self, line):
"""Parse line as a pruning rule
Expected format is:
test [params] [&& test [params] ...] :: (keep|untag|skip)
"""
line = line.split('#', 1)[0].strip()
if not line:
#blank or all comment
return
split1 = line.split('::')
if len(split1) != 2:
raise Exception, "bad policy line: %s" % line
tests, action = split1
tests = [x.strip() for x in tests.split('&&')]
action = action.strip().lower()
self.tests = []
for str in tests:
tname = str.split(None,1)[0]
handler = self.test_handlers[tname]
self.tests.append(handler(str))
valid_actions = ("keep", "untag", "skip")
#skip means to keep, but to ignore for the sake of order number
if action not in valid_actions:
raise Exception, "Invalid action: %s" % str
self.action = action
def apply(self, data):
for test in self.tests:
if not test.run(data):
return None
#else
return self.action
def __str__(self):
return "prune rule: %s" % self.line.rstrip()
def read_policies(fn=None):
"""Read tag gc policies from file
@ -852,13 +776,9 @@ def read_policies(fn=None):
test [params] [&& test [params] ...] :: (keep|untag|skip)
"""
fo = file(fn, 'r')
ret = []
for line in fo:
rule = PruneRule(line)
if rule.action:
ret.append(rule)
if options.debug:
print rule
tests = koji.policy.findSimpleTests(globals())
ret = koji.policy.SimpleRuleSet(fo, tests)
fo.close()
return ret
def scan_policies(str):
@ -867,14 +787,8 @@ def scan_policies(str):
The expected format as follows
test [params] [&& test [params] ...] :: (keep|untag|skip)
"""
ret = []
for line in str.splitlines():
rule = PruneRule(line)
if rule.action:
ret.append(rule)
if options.debug:
print rule
return ret
tests = koji.policy.findSimpleTests(globals())
return koji.policy.SimpleRuleSet(str.splitlines(), tests)
def get_build_sigs(build):
rpms = session.listRPMs(buildID=build)
@ -901,6 +815,11 @@ def handle_prune():
return
#policies = read_policies(options.policy_file)
policies = scan_policies(options.config.get('prune', 'policy'))
for action in policies.all_actions():
if action not in ("keep", "untag", "skip"):
raise Exception, "Invalid action: %s" % action
if options.debug:
pprint.pprint(policies.ruleset)
#get tags
tags = [(t['name'], t) for t in session.listTags()]
tags.sort()
@ -910,8 +829,8 @@ def handle_prune():
print "Skipping trashcan tag: %s" % tagname
continue
if not check_tag(tagname):
if options.debug:
print "skipping tag due to filter: %s" % tagname
#if options.debug:
# print "skipping tag due to filter: %s" % tagname
continue
bypass = False
if taginfo['locked']:
@ -928,7 +847,6 @@ def handle_prune():
continue
if options.debug:
print "Pruning tag: %s" % tagname
mypolicies = list(policies) # copy
#get builds
history = session.tagHistory(tag=tagname)
if not history:
@ -966,28 +884,26 @@ def handle_prune():
'keys' : keys,
'nvr' : nvr,
}
for policy in mypolicies:
action = policy.apply(data)
if not action:
continue
elif action == 'skip':
skipped += 1
if options.debug:
print "%s: %s (%s, %s)" % (action, nvr, tagname, policy)
if action == 'untag':
if options.test:
print "Would have untagged %s from %s" % (nvr, tagname)
else:
print "Untagging build %s from %s" % (nvr, tagname)
try:
session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)
except (xmlrpclib.Fault, koji.GenericError), e:
print "Warning: untag operation failed: %s" % e
pass
break
else:
action = policies.apply(data)
if action is None:
if options.debug:
print "No policy for %s (%s)" % (nvr, tagname)
if action == 'skip':
skipped += 1
if options.debug:
print policies.last_rule()
print "%s: %s (%s, %i)" % (action, nvr, tagname, order)
if action == 'untag':
if options.test:
print "Would have untagged %s from %s" % (nvr, tagname)
else:
print "Untagging build %s from %s" % (nvr, tagname)
try:
session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)
except (xmlrpclib.Fault, koji.GenericError), e:
print "Warning: untag operation failed: %s" % e
pass
# if action == 'keep' do nothing
if __name__ == "__main__":

View file

@ -26,8 +26,10 @@ policy =
sig fedora-test && age < 12 weeks :: keep
#stuff to chuck semi-rapidly
tag *-testing *-candidate && order >= 2 :: untag
tag *-testing *-candidate && order > 0 && age > 6 weeks :: untag
tag *-testing *-candidate :: { # nested rules
order >= 2 :: untag
order > 0 && age > 6 weeks :: untag
} #closing braces must be on a line by themselves (modulo comments/whitespace)
tag *-candidate && age > 60 weeks :: untag
#default: keep the last 3

1157
util/koji-shadow Executable file

File diff suppressed because it is too large Load diff

7
util/koji-shadow.conf Normal file
View file

@ -0,0 +1,7 @@
# koji-shadow example config file
# (still working out all the config options)
[main]
server=http://localhost/kojihub/
remote=http://koji.fedoraproject.org/kojihub

View file

@ -119,8 +119,10 @@ class ManagedRepo(object):
def tryDelete(self):
"""Remove the repo from disk, if possible"""
#we check just the event age first since it is faster
age = time.time() - self.event_ts
if age < options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
return False
self.logger.debug("Attempting to delete repo %s.." % self.repo_id)
if self.state != koji.REPO_EXPIRED:
@ -138,6 +140,16 @@ class ManagedRepo(object):
return False
tag_name = tag_info['name']
path = pathinfo.repo(self.repo_id, tag_name)
#also check dir age. We do this because a repo can be created from an older event
#and should not be removed based solely on that event's timestamp.
try:
age = time.time() - os.stat(path).st_mtime
except OSError:
self.logger.error("Can't stat repo directory: %s" % path)
return True
if age < options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
return False
safe_rmtree(path, strict=False)
return True
@ -227,22 +239,25 @@ class RepoManager(object):
if self.repos.has_key(repo_id):
#we're already managing it, no need to deal with it here
continue
try:
dir_ts = os.stat(repodir).st_mtime
except OSError:
#just in case something deletes the repo out from under us
continue
rinfo = session.repoInfo(repo_id)
if rinfo is None:
try:
age = time.time() - os.stat(repodir).st_mtime
except OSError:
#just in case something deletes the repo out from under us
continue
age = time.time() - dir_ts
if age > 36000:
self.logger.warn("Unexpected directory (no such repo): %s" % repodir)
if not options.ignore_stray_repos:
self.logger.warn("Unexpected directory (no such repo): %s" % repodir)
continue
if rinfo['tag_name'] != taginfo['name']:
self.logger.warn("Tag name mismatch: %s" % repodir)
continue
if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):
age = time.time() - rinfo['create_ts']
age = time.time() - max(rinfo['create_ts'], dir_ts)
if age > options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
count += 1
logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))
safe_rmtree(repodir, strict=False)
@ -389,6 +404,8 @@ def get_options():
help="run in foreground")
parser.add_option("-d", "--debug", action="store_true",
help="show debug output")
parser.add_option("-q", "--quiet", action="store_true",
help="don't show warnings")
parser.add_option("-v", "--verbose", action="store_true",
help="show verbose output")
parser.add_option("--with-src", action="store_true",
@ -415,6 +432,7 @@ def get_options():
defaults = {'with_src': False,
'verbose': False,
'debug': False,
'ignore_stray_repos': False,
'topdir': '/mnt/koji',
'server': None,
'logfile': '/var/log/kojira.log',
@ -428,6 +446,7 @@ def get_options():
'delete_batch_size': 3,
'max_repo_tasks' : 10,
'deleted_repo_lifetime': 7*24*3600,
#XXX should really be called expired_repo_lifetime
'cert': '/etc/kojira/client.crt',
'ca': '/etc/kojira/clientca.crt',
'serverca': '/etc/kojira/serverca.crt'
@ -436,7 +455,7 @@ def get_options():
int_opts = ('prune_batch_size', 'deleted_repo_lifetime', 'max_repo_tasks',
'delete_batch_size', 'retry_interval', 'max_retries', 'offline_retry_interval')
str_opts = ('topdir','server','user','password','logfile', 'principal', 'keytab', 'cert', 'ca', 'serverca')
bool_opts = ('with_src','verbose','debug', 'offline_retry')
bool_opts = ('with_src','verbose','debug','ignore_stray_repos', 'offline_retry')
for name in config.options(section):
if name in int_opts:
defaults[name] = config.getint(section, name)
@ -486,6 +505,8 @@ if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
elif options.verbose:
logger.setLevel(logging.INFO)
elif options.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.WARNING)
session_opts = {}

996
util/kojisd Executable file
View file

@ -0,0 +1,996 @@
#!/usr/bin/python
# kojisd: a tool to subscribe to builds between koji instances
# Copyright (c) 2007-2008 Red Hat
# Copyright (c) 2007-2008 Dennis Gilmore
#
# Koji is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Mike McLean <mikem@redhat.com>
# Dennis Gilmore <dennis@ausil.us>
try:
import krbV
except ImportError:
pass
import koji
import ConfigParser
from email.MIMEText import MIMEText
import fnmatch
import optparse
import os
import pprint
import smtplib
import socket # for socket.error and socket.setdefaulttimeout
import sys
import time
import xmlrpclib # for ProtocolError and Fault
import urlgrabber.grabber as grabber
import rpm
# koji.fp.o keeps stalling, probably network errors...
# better to time out than to stall
socket.setdefaulttimeout(180) #XXX - too short?
OptionParser = optparse.OptionParser
if optparse.__version__ == "1.4.1+":
def _op_error(self, msg):
self.print_usage(sys.stderr)
msg = "%s: error: %s\n" % (self._get_prog_name(), msg)
if msg:
sys.stderr.write(msg)
sys.exit(2)
OptionParser.error = _op_error
def _(args):
"""Stub function for translation"""
return args
def get_options():
"""process options from command line and config file"""
usage = _("%prog [options]")
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config-file", metavar="FILE",
help=_("use alternate configuration file"))
parser.add_option("--keytab", help=_("specify a Kerberos keytab to use"))
parser.add_option("--principal", help=_("specify a Kerberos principal to use"))
parser.add_option("--runas", metavar="USER",
help=_("run as the specified user (requires special privileges)"))
parser.add_option("--user", help=_("specify user"))
parser.add_option("--password", help=_("specify password"))
parser.add_option("--noauth", action="store_true", default=False,
help=_("do not authenticate"))
parser.add_option("-n", "--test", action="store_true", default=False,
help=_("test mode"))
parser.add_option("-d", "--debug", action="store_true", default=False,
help=_("show debug output"))
parser.add_option("--first-one", action="store_true", default=False,
help=_("stop after scanning first build -- debugging"))
parser.add_option("--debug-xmlrpc", action="store_true", default=False,
help=_("show xmlrpc debug output"))
parser.add_option("--skip-main", action="store_true", default=False,
help=_("don't actually run main"))
parser.add_option("--build",
help=_("scan just this build"))
parser.add_option("-s", "--server",
help=_("url of local XMLRPC server"))
parser.add_option("-r", "--remote",
help=_("url of remote XMLRPC server"))
parser.add_option("--validtags", action="append", default=[],
help=_("List of valid tags to build for"))
parser.add_option("--invalidtags", action="append", default=[],
help=_("List of tags to not build for"))
parser.add_option("--logfile", default="/var/log/kojisd.log",
help=_("location of log file"))
parser.add_option("--topdir", default="/mnt/koji",
help=_(""))
parser.add_option("--workpath", default="/mnt/koji/work/kojisd",
help=_("location to save import files"))
parser.add_option("--importarches", default="",
help=_("arches to import"))
parser.add_option("--buildarches", action="store", default="",
help=_("arches to build"))
parser.add_option("--pkgurl", action="store", default="",
help=_("url to base packages on remote server"))
#parse once to get the config file
(options, args) = parser.parse_args()
defaults = parser.get_default_values()
config = ConfigParser.ConfigParser()
cf = getattr(options, 'config_file', None)
if cf:
if not os.access(cf, os.F_OK):
parser.error(_("No such file: %s") % cf)
assert False
else:
cf = '/etc/kojisd/kojisd.conf'
if not os.access(cf, os.F_OK):
cf = None
if not cf:
print "no config file"
config = None
else:
config.read(cf)
#allow config file to update defaults for certain options
cfgmap = [
['keytab', None, 'string'],
['principal', None, 'string'],
['runas', None, 'string'],
['user', None, 'string'],
['password', None, 'string'],
['noauth', None, 'boolean'],
['server', None, 'string'],
['remote', None, 'string'],
['importarches', None, 'list'],
['serverca', None, 'string'],
['cert', None, 'string'],
['ca', None, 'string'],
['validtags', None, 'list'],
['invalidtags', None, 'list'],
['logfile', None, 'string'],
['topdir', None, 'string'],
['workpath', None, 'string'],
['buildarches', None, 'string'],
['pkgurl', None, 'string'],
]
for name, alias, type in cfgmap:
print "Checking %s" % name
if alias is None:
alias = ('kojisd', name)
if config.has_option(*alias):
print "Using option %s from config file" % (alias,)
if type == 'integer':
setattr(defaults, name, config.getint(*alias))
elif type == 'boolean':
setattr(defaults, name, config.getboolean(*alias))
elif type == 'list':
line = config.get(*alias)
line = line.split()
setattr(defaults, name, line)
else:
setattr(defaults, name, config.get(*alias))
#parse again with updated defaults
(options, args) = parser.parse_args(values=defaults)
return options, args
time_units = {
'second' : 1,
'minute' : 60,
'hour' : 3600,
'day' : 86400,
'week' : 604800,
}
time_unit_aliases = [
#[unit, alias, alias, ...]
['week', 'weeks', 'wk', 'wks'],
['hour', 'hours', 'hr', 'hrs'],
['day', 'days'],
['minute', 'minutes', 'min', 'mins'],
['second', 'seconds', 'sec', 'secs', 's'],
]
def parse_duration(str):
"""Parse time duration from string, returns duration in seconds"""
ret = 0
n = None
unit = None
def parse_num(s):
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return None
for x in str.split():
if n is None:
n = parse_num(x)
if n is not None:
continue
#perhaps the unit is appended w/o a space
for names in time_unit_aliases:
for name in names:
if x.endswith(name):
n = parse_num(x[:-len(name)])
if n is None:
continue
unit = names[0]
# combined at end
break
if unit:
break
else:
raise ValueError, "Invalid time interval: %s" % str
if unit is None:
x = x.lower()
for names in time_unit_aliases:
for name in names:
if x == name:
unit = names[0]
break
if unit:
break
else:
raise ValueError, "Invalid time interval: %s" % str
ret += n * time_units[unit]
n = None
unit = None
return ret
def error(msg=None, code=1):
if msg:
sys.stderr.write(msg + "\n")
sys.stderr.flush()
sys.exit(code)
def warn(msg):
sys.stderr.write(msg + "\n")
sys.stderr.flush()
def ensure_connection(session):
try:
ret = session.getAPIVersion()
except xmlrpclib.ProtocolError:
error(_("Error: Unable to connect to server"))
if ret != koji.API_VERSION:
warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))
def activate_session(session):
"""Test and login the session is applicable"""
global options
if options.noauth:
#skip authentication
pass
elif os.path.isfile(options.cert):
# authenticate using SSL client cert
session.ssl_login(options.cert, options.ca, options.serverca, proxyuser=options.runas)
elif options.user:
# authenticate using user/password
session.login()
elif has_krb_creds():
try:
if options.keytab and options.principal:
session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)
else:
session.krb_login(proxyuser=options.runas)
except krbV.Krb5Error, e:
error(_("Kerberos authentication failed: %s (%s)") % (e.args[1], e.args[0]))
except socket.error, e:
warn(_("Could not connect to Kerberos authentication service: %s") % e.args[1])
if not options.noauth and not session.logged_in:
error(_("Unable to log in, no authentication methods available"))
ensure_connection(session)
if options.debug:
print "successfully connected to hub"
def getHubTags(session):
'''Determine the tags on the build hub'''
tags = []
allTags = session.listTags()
for remoteTag in allTags:
tags.append(remoteTag['name'])
print "tags : %s" % buildTags
return buildTags
def syncTags():
''' sync the tags from the master to the slave. due to inheritance
its easier to sync tags completely between the hubs
'''
toAddTags = []
for tag in buildTags:
if tag not in localTags:
toAddTags.append(tag)
orderToAddTags = []
for tag in toAddTags:
rawParents = remote.getFullInheritance(tag)
for rawParent in rawParents:
if rawParent['currdepth'] == 1:
print tag
print rawParent['name']
orderToAddTags.append(["%s", "%s"] % (tag, rawParent['name']))
for tag, parent in orderToAddTags:
session.createTag(tag, parent, arches=buildarches)
# TODO: handle errors gracefully, order tag creation. handle targets
return
def main(args):
#XXX get tags
buildTags = []
if options.validtags != None:
buildTags = options.validtags
else:
buildTags = getHubTags(remote)
for tag in options.invalidtags:
if tag in buildTags:
buildTags.remove(tag)
print "BuildTags: %s" % buildTags
#syncTags()
tracker = BuildTracker()
# go through each tag and see what needs building
for buildTag in buildTags :
print "BuildTag: %s" % buildTag
tracker.scanTag(buildTag)
tracker.report()
tracker.showOrder()
tracker.runRebuilds()
def remote_buildroots(build_id):
"""Return a list of buildroots for remote build"""
#XXX - only used in old test code (foo)
rpms = remote.listRPMs(build_id)
brs = {}
for rinfo in rpms:
br_id = rinfo.get('buildroot_id')
if not br_id:
print "Warning: no buildroot for: %s" % rinfo
continue
brs[br_id] = 1
return brs.keys()
def remote_br_builds(brlist):
"""Given a list of buildroots, return build data of contents"""
#XXX - only used in old test code (foo)
seen = {}
builds = {}
for br_id in brlist:
if seen.has_key(br_id):
continue
seen[br_id] = 1
#print "."
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
builds[rinfo['build_id']] = 1
return dict([(b, remote.getBuild(b)) for b in builds])
def foo():
"""just experimenting...."""
binfo = remote.getBuild(args[0])
buildroots = remote_buildroots(binfo['id'])
if not buildroots:
#nothing we can do
return
build_idx = remote_br_builds(buildroots)
name_idx = {}
for binfo2 in build_idx.itervalues():
name_idx.setdefault(binfo2['name'], []).append(binfo2)
names = name_idx.keys()
missing = {}
found = {}
for name, builds in name_idx.iteritems():
if len(builds) > 1:
print "Warning: found multiple versions of %s: %s" % (name, builds)
#pick latest (by completion time)
order = [(b['completion_ts'], b) for b in builds]
order.sort()
build = order[-1][1]
else:
build = builds[0]
nvr = "%(name)s-%(version)s-%(release)s" % build
build.setdefault('nvr', nvr)
#see if our server has it
ours = session.getBuild(nvr)
if ours:
ours.setdefault('nvr', nvr)
found[name] = ours
else:
missing[name] = build
names = found.keys()
names.sort()
for name in names:
print "Found common build: %(nvr)s" % found[name]
names = missing.keys()
names.sort()
for name in names:
print "Missing remote build: %(nvr)s" % missing[name]
class TrackedBuild(object):
def __init__(self, build_id, child=None, tracker=None):
self.id = build_id
self.tracker = tracker
self.info = remote.getBuild(build_id)
self.nvr = "%(name)s-%(version)s-%(release)s" % self.info
self.children = {}
self.state = None
self.order = 0
if child is not None:
#children tracks the builds that were built using this one
self.children[child] = 1
#see if we have it
ours = session.getBuild(self.nvr)
self.rebuilt = False
if ours is not None:
state = koji.BUILD_STATES[ours['state']]
if state == 'COMPLETE':
self.setState("common")
if ours['task_id']:
self.rebuilt = True
return
elif state in ('FAILED', 'CANCELED'):
#treat these as having no build
pass
else:
# DELETED, BUILDING
self.setState("broken")
return
self.setState("missing")
self.getDeps() #sets deps, br_tag, base, order, (maybe state)
def setState(self, state):
#print "%s -> %s" % (self.nvr, state)
if state == self.state:
return
if self.state is not None and self.tracker:
del self.tracker.state_idx[self.state][self.id]
self.state = state
if self.tracker:
self.tracker.state_idx.setdefault(self.state, {})[self.id] = 1
def addChild(self, child):
self.children[child] = 1
def setExtraArchesFromRPMs(self, rpms=None):
if rpms is None:
rpms = remote.listRPMs(self.id)
arches = {}
for rpminfo in rpms:
arches.setdefault(rpminfo['arch'], 1)
self.extraArches = [a for a in arches if koji.canonArch(a) != a]
def getBuildroots(self):
"""Return a list of buildroots for remote build"""
rpms = remote.listRPMs(self.id)
#while we've got the rpm list, let's note the extra arches
#XXX - really should reorganize this a bit
self.setExtraArchesFromRPMs(rpms)
brs = {}
bad = []
for rinfo in rpms:
br_id = rinfo.get('buildroot_id')
if not br_id:
bad.append(rinfo)
continue
brs[br_id] = 1
if brs and bad:
print "Warning: some rpms for %s lacked buildroots:" % self.nvr
for rinfo in bad:
print " %(name)-%(version)-%(release).%(arch)" % rinfo
return brs.keys()
def getDeps(self):
buildroots = self.getBuildroots()
if not buildroots:
self.setState("noroot")
return
buildroots.sort()
self.order = buildroots[-1]
seen = {} #used to avoid scanning the same buildroot twice
builds = {} #track which builds we need for a rebuild
bases = {} #track base install for buildroots
tags = {} #track buildroot tag(s)
for br_id in buildroots:
if seen.has_key(br_id):
continue
seen[br_id] = 1
br_info = remote.getBuildroot(br_id, strict=True)
tags.setdefault(br_info['tag_name'], 0)
tags[br_info['tag_name']] += 1
#print "."
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
builds[rinfo['build_id']] = 1
if not rinfo['is_update']:
bases.setdefault(rinfo['name'], {})[br_id] = 1
# we want to record the intersection of the base sets
# XXX - this makes some assumptions about homogeneity that, while reasonable,
# are not strictly required of the db.
# The only way I can think of to break this is if some significant tag/target
# changes happened during the build startup and some subtasks got the old
# repo and others the new one.
base = []
for name, brlist in bases.iteritems():
for br_id in buildroots:
if br_id not in brlist:
break
else:
#each buildroot had this as a base package
base.append(name)
if len(tags) > 1:
print "Warning: found multiple buildroot tags for %s: %s" % (self.nvr, tags.keys())
counts = [(n, tag) for tag, n in tags.iteritems()]
sort(counts)
tag = counts[-1][1]
else:
tag = tags.keys()[0]
self.deps = builds
self.br_tag = tag
self.base = base
class BuildTracker(object):
builds = {}
state_idx = {}
def rpmvercmp (self, (e1, v1, r1), (e2, v2, r2)):
"""find out which build is newer"""
rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
if rc == 1:
return "first"
elif rc == 0:
return "same"
else:
return "second"
def scanBuild(self, build_id, tag, from_build=None, depth=0):
"""Recursively scan a build and its dependencies"""
#print build_id
build = self.builds.get(build_id)
if build:
#already scanned
if from_build:
build.addChild(from_build.id)
return build
#otherwise...
child_id = None
if from_build:
child_id = from_build.id
build = TrackedBuild(build_id, child=child_id, tracker=self)
#print build.id, build.nvr
self.builds[build_id] = build
if len(self.builds) % 50 == 0:
self.report()
if from_build:
tail = " (from %s)" % from_build.nvr
else:
tail = ""
head = " " * depth
parentTask = remote.getBuild(int(build.id))
latestBuild = session.getLatestBuilds(tag, package=parentTask['package_name'])
if latestBuild:
parentevr = (str(parentTask['epoch']), parentTask['version'], parentTask['release'])
latestevr = (str(latestBuild[0]['epoch']), latestBuild[0]['version'], latestBuild[0]['release'])
newestRPM = self.rpmvercmp( parentevr, latestevr)
newBuild = remote.getBuild(latestBuild[0]['nvr'])
else:
# We get here when there is no build on the local hub
newestRPM = "first"
if newestRPM == "first":
if build.state == "common":
#we're good
if build.rebuilt:
print "%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail)
else:
print "%sCommon build %s%s" % (head, build.nvr, tail)
elif build.state == "noroot":
#we're fucked, so build with latest build root we have
#TODO: build with the latest buildroot
print "%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail)
#get src url
if parentTask['task_id'] is not None:
parentRequest = remote.getTaskRequest(parentTask['task_id'])
session.build(parentRequest[0], tag, parentRequest[2])
print "%sInfo: building %s%s"%(head, parentRequest[0], tail)
build.state = "broken"
else:
print "Error: unable to queue %s to build it was imported upstream" % parentTask['nvr']
elif build.state == "broken":
#also fucked
#TODO: find replacement package version
print "%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail)
elif build.state == "missing":
# check to see if we just import this from the remote host.
rpmfiles = remote.listRPMs(buildID=build.id, arches=options.importarches)
rpmname = remote.getBuild(build.id)['package_name']
# kernel is funky we should never just import it
# XXX: should this be a config of packages we dont import?
if rpmfiles and rpmname != "kernel":
print "%sInfo: Importing build %s%s" %(head, build.nvr, tail)
self.importBuild(build.id, tag, rpmfiles, rpmname, build.nvr )
build.state = "imported"
return build
# lets see if we have a newer build
#scan its deps
print "%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail)
for dep_id in build.deps:
for retry in xrange(10):
try:
self.scanBuild(dep_id, tag, from_build=build, depth=depth+1)
except (socket.timeout, socket.error):
print "retry"
continue
break
else:
print "Error: unable to scan dep: %i for %s" % (dep_id, build.nvr)
continue
elif newestRPM == "second":
# newBuild will be None when the build does not exist on the remote hub
if newBuild == None:
#if the newer build does not exist on the remote hub fill in the info from the localhub
# this should only ever haaaaappen during bootstrapping or if we build something on the
# local hub to make sure a fix works before building on the remote system
newBuild = session.getBuild(latestBuild[0]['nvr'])
build.id = newBuild['id']
build.tracker = None
build.info = newBuild
build.nvr = "%s" % newBuild['nvr']
build.children = {}
build.state = "common"
else:
build = TrackedBuild(newBuild['id'], child=None, tracker=self)
build.state = "common"
#self.builds.get(newBuild['task_id'])
print "%sNewer build %s%s" % (head, build.nvr, tail)
elif newestRPM == "same":
#we're good
if build.rebuilt:
print "%sCommon/Latest build (rebuilt) %s%s" % (head, build.nvr, tail)
else:
print "%sCommon/Latest build %s%s" % (head, build.nvr, tail)
return build
def importBuild(self, build_id, tag, rpmfiles, pkg, nvr):
'''import and tag a build from remote hub'''
fname = "%s-%s-%s.src.rpm" % (pkg, rpmfiles[0]['version'], rpmfiles[0]['release'])
url = "%s/%s/%s/%s/src/%s" % (options.pkgurl, pkg, rpmfiles[0]['version'], rpmfiles[0]['release'], fname)
print url
file = grabber.urlopen(url, text = "%s.%s" % (pkg, 'src'))
out = os.open(os.path.join(options.workpath, fname), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
try:
while 1:
buf = file.read(4096)
if not buf:
break
os.write(out, buf)
finally:
os.close(out)
file.close()
print 'Downloaded: %s' % fname
session.importRPM('kojisd', fname)
print 'Imported: %s' % fname
for rpm in rpmfiles:
fname = "%s-%s-%s.%s.rpm" % (rpm['name'], rpm['version'], rpm['release'], rpm['arch'])
url = "%s/%s/%s/%s/%s/%s" % (options.pkgurl, pkg, rpm['version'], rpm['release'], rpm['arch'], fname)
print url
file = grabber.urlopen(url, text = "%s.%s" % (rpm['name'], rpm['arch']))
out = os.open(os.path.join(options.workpath, fname), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
try:
while 1:
buf = file.read(4096)
if not buf:
break
os.write(out, buf)
finally:
os.close(out)
file.close()
print 'Downloaded: %s' % fname
session.importRPM('kojisd', fname)
print 'Imported: %s' % fname
session.tagBuildBypass(tag, nvr)
print 'Tagged: %s' % nvr
def scanTag(self, tag):
"""Scan the latest builds in a remote tag"""
taginfo = remote.getTag(tag)
builds = remote.listTagged(taginfo['id'], latest=True)
for build in builds:
for retry in xrange(10):
try:
self.scanBuild(build['id'], tag)
if options.first_one:
return
except (socket.timeout, socket.error):
print "retry"
continue
break
else:
print "Error: unable to scan %(name)s-%(version)s-%(release)s" % build
continue
def scan(self):
"""Scan based on config file"""
to_scan = []
alltags = remote.listTags()
def rebuild(self, build):
"""Rebuild a remote build using closest possible buildroot"""
#first check that we can
deps = []
for build_id in build.deps:
dep = self.builds.get(build_id)
if not dep:
print "Missing dependency %i for %s. Not scanned?" % (build_id, build.nvr)
return
if dep.state != 'common':
print "Dependency missing for %s: %s (%s)" % (build.nvr, dep.nvr, dep.state)
return
deps.append(dep)
#check/create tag
our_tag = "SHADOWBUILD-%s" % build.br_tag
taginfo = session.getTag(our_tag)
parents = None
if not taginfo:
#XXX - not sure what is best here
#how do we pick arches? for now just assume all....
# config option for
#XXX this call for perms is stupid, but it's all we've got
perm_id = None
for data in session.getAllPerms():
if data['name'] == 'admin':
perm_id = data['id']
break
# use config option for arches
session.createTag(our_tag, perm=perm_id, arches='%s' % buildarches)
taginfo = session.getTag(our_tag, strict=True)
session.createBuildTarget(taginfo['name'], taginfo['id'], taginfo['id'])
else:
parents = session.getInheritanceData(taginfo['id'])
if parents:
print "Warning: shadow build tag has inheritance"
#check package list
pkgs = {}
for pkg in session.listPackages(tagID=taginfo['id']):
pkgs[pkg['package_name']] = pkg
missing_pkgs = []
for dep in deps:
name = dep.info['name']
if not pkgs.has_key(name):
#guess owner
owners = {}
for pkg in session.listPackages(pkgID=name):
owners.setdefault(pkg['owner_id'], []).append(pkg)
if owners:
order = [(len(v), k) for k, v in owners.iteritems()]
order.sort()
owner = order[-1][1]
else:
#just use ourselves
owner=session.getLoggedInUser()['id']
missing_pkgs.append((name, owner))
#check build list
cur_builds = {}
for binfo in session.listTagged(taginfo['id']):
#index by name in tagging order (latest first)
cur_builds.setdefault(binfo['name'], []).append(binfo)
to_untag = []
to_tag = []
for dep in deps:
#XXX - assuming here that there is only one dep per 'name'
# may want to check that this is true
cur_order = cur_builds.get(dep.info['name'], [])
tagged = False
for binfo in cur_order:
if binfo['nvr'] == dep.nvr:
tagged = True
#may not be latest now, but it will be after we do all the untagging
else:
# note that the untagging keeps older builds from piling up. In a sense
# we're gc-pruning this tag ourselves every pass.
to_untag.append(binfo)
if not tagged:
to_tag.append(dep)
drop_groups = []
build_group = None
for group in session.getTagGroups(taginfo['id']):
if group['name'] == 'build':
build_group = group
else:
# we should have no other groups but build
print "Warning: found stray group: %s" % group
drop_groups.append(group['name'])
if build_group:
#TODO - fix build group package list based on base of build to shadow
needed = dict([(n,1) for n in build.base])
current = dict([(p['package'],1) for p in build_group['packagelist']])
add_pkgs = [n for n in needed if not current.has_key(n)]
drop_pkgs = [n for n in current if not needed.has_key(n)]
#no group deps needed/allowed
drop_deps = [(g['name'], 1) for g in build_group['grouplist']]
if drop_deps:
print "Warning: build group had deps: %r" % build_group
else:
add_pkgs = build.base
drop_pkgs = []
drop_deps = []
#update package list, tagged packages, and groups in one multicall/transaction
#(avoid useless repo regens)
session.multicall = True
for name, owner in missing_pkgs:
session.packageListAdd(taginfo['id'], name, owner=owner)
for binfo in to_untag:
session.untagBuildBypass(taginfo['id'], binfo['id'])
for dep in to_tag:
session.tagBuildBypass(taginfo['id'], dep.nvr)
#shouldn't need force here
#set groups data
if not build_group:
# build group not present. add it
session.groupListAdd(taginfo['id'], 'build', force=True)
#using force in case group is blocked. This shouldn't be the case, but...
for pkg_name in drop_pkgs:
#in principal, our tag should not have inheritance, so the remove call is the right thing
session.groupPackageListRemove(taginfo['id'], 'build', pkg_name)
for pkg_name in add_pkgs:
session.groupPackageListAdd(taginfo['id'], 'build', pkg_name)
#we never add any blocks, so forcing shouldn't be required
#TODO - adjust extra_arches for package to build
#TODO - get event id to facilitate waiting on repo
# not sure if getLastEvent is good enough
# short of adding a new call, perhaps use getLastEvent together with event of
# current latest repo for tag
session.getLastEvent()
results = session.multiCall()
[event_id, event_ts] = results[-1]
#TODO - verify / check results ?
#TODO - call newRepo
#TODO - upload src
src = "" #XXX
#TODO - wait for repo
#TODO - kick off build
#task_id = session.build(src, taginfo['name'], ... ) #XXX
#TODO - add task/build to some sort of watch list
#TODO - post-build validation
def report(self):
print time.asctime()
print "%i builds" % len(self.builds)
states = self.state_idx.keys()
states.sort()
for s in states:
print "%s: %i" % (s, len(self.state_idx[s]))
def runRebuilds(self):
"""Rebuild missing builds"""
print "Determining rebuild order"
builds = [(b.order, b.id, b) for b in self.builds.itervalues()]
builds.sort()
b_avail = {}
ok = 0
bad = 0
for order, build_id, build in builds:
if build.state == 'common':
b_avail[build_id] = 1
elif build.state == 'missing':
#check deps
not_avail = [x for x in build.deps.iterkeys() if not b_avail.get(x)]
if not_avail:
print "Can't rebuild %s, missing %i deps" % (build.nvr, len(not_avail))
b_avail[build_id] = 0
bad += 1
for dep_id in not_avail:
dep = self.builds[dep_id]
avail = b_avail.get(dep_id)
if avail is None:
print " %s (out of order?)" % dep.nvr
elif not avail:
print " %s (%s)" % (dep.nvr, dep.state)
else:
ok += 1
print "rebuild: %s" % build.nvr
self.rebuild(build)
break #XXX
b_avail[build_id] = 1
else:
print "build: %s, state: %s, #children: %i" \
% (build.nvr, build.state, len(build.children))
b_avail[build_id] = 0
print "ok: %i, bad: %i" % (ok, bad)
def showOrder(self):
"""Show order of rebuilds (for debugging)
This is sort of a dress rehearsal for the rebuild scheduler
"""
print "Determining rebuild order"
builds = [(b.order, b.id, b) for b in self.builds.itervalues()]
#builds = self.builds.items() # (id, build)
builds.sort()
b_avail = {}
ok = 0
bad = 0
#for build_id, build in builds:
for order, build_id, build in builds:
if build.state == 'common':
b_avail[build_id] = 1
elif build.state == 'missing':
#for sanity, check deps
for dep_id in build.deps.iterkeys():
dep = self.builds[dep_id]
avail = b_avail.get(dep_id)
if avail is None:
print "Can't rebuild %s, missing %s (out of order?)" % (build.nvr, dep.nvr)
b_avail[build_id] = 0
bad += 1
break
elif not avail:
print "Can't rebuild %s, missing %s (%s)" % (build.nvr, dep.nvr, dep.state)
b_avail[build_id] = 0
bad += 1
break
else:
ok += 1
print "rebuild: %s" % build.nvr
b_avail[build_id] = 1
else:
print "build: %s, state: %s, #children: %i" \
% (build.nvr, build.state, len(build.children))
#show_children(build_id)
b_avail[build_id] = 0
print "ok: %i, bad: %i" % (ok, bad)
def bar():
tracker = BuildTracker()
#binfo = remote.getBuild(args[0], strict=True)
#tracker.scanBuild(binfo['id'])
if options.build:
binfo = remote.getBuild(options.build, strict=True)
tracker.scanBuild(binfo['id'])
else:
tracker.scanTag(args[0])
tracker.report()
tracker.showOrder()
tracker.runRebuilds()
if __name__ == "__main__":
options, args = get_options()
print options
session_opts = {}
for k in ('user', 'password', 'debug_xmlrpc', 'debug'):
session_opts[k] = getattr(options,k)
print options.server
session = koji.ClientSession(options.server, session_opts)
if not options.noauth:
activate_session(session)
#XXX - sane auth
#XXX - config!
remote = koji.ClientSession(options.remote, session_opts)
rv = 0
try:
rv = main(args)
if not rv:
rv = 0
except KeyboardInterrupt:
pass
except SystemExit:
rv = 1
#except:
# if options.debug:
# raise
# else:
# exctype, value = sys.exc_info()[:2]
# rv = 1
# print "%s: %s" % (exctype, value)
try:
session.logout()
except:
pass
sys.exit(rv)

55
util/kojisd.conf Normal file
View file

@ -0,0 +1,55 @@
[kojisd]
; For user/pass authentication
; user=kojisd
; password=kojisd
; For Kerberos authentication
; the principal to connect with
;principal=koji/repo@EXAMPLE.COM
; The location of the keytab for the principal above
;keytab=/etc/kojira.keytab
; The URL for the building koji hub server
server = http://sparc.koji.fedoraproject.org/kojihub
; The URL for the master koji hub server
remote = http://koji.fedoraproject.org/kojihub
; The directory containing the repos/ directory
;topdir = /mnt/koji
; Logfile
;logfile = /var/log/kojisad.log
; kojisd daemon user cert for secondary hub
clientcert = /etc/kojisd/fedora.cert
clientca = /etc/kojisad/fedora-upload-ca.cert
serverca = /etc/kojisad/fedora-server-ca.cert
; tags on primary we want to build
validtags = dist-f8 dist-f9 dist-f8-updates dist-f8-updates-candidate dist-f8-updates-testing
;validtags = ['dist-f8', 'dist-f9', 'dist-f8-updates', 'dist-f8-updates-candidate', 'dist-f8-updates-testing']
; tags from primary we do not want to build
;invalidtags =
; Arches we import directly from the master hub
importarches = noarch
; Arches we will build for
buildarches = 'sparcv9 sparc64'
; Work directory, where we download files to
;workpath = /mnt/koji/work/kojisd
pkgurl = http://koji.fedoraproject.org/packages
; user to run build as
;buildas =
;configuration for SSL athentication
;client certificate
cert = /etc/kojisd/kojisd_key_and_cert.pem
;certificate of the CA that issued the client certificate
ca = /etc/pki/tls/certs/extras_cacert.pem
;certificate of the CA that issued the HTTP server certificate
serverca = /etc/pki/tls/certs/extras_cacert.pem

85
util/kojisd.init Normal file
View file

@ -0,0 +1,85 @@
#! /bin/sh
#
# kojisd Start/Stop kojisd
#
# chkconfig: 345 99 99
# description: koji subscriber daemon
# processname: kojisd
# This is an interactive program, we need the current locale
# Source function library.
. /etc/init.d/functions
# Check that we're a priviledged user
[ `id -u` = 0 ] || exit 0
[ -f /etc/sysconfig/kojisd ] && . /etc/sysconfig/kojisd
prog="kojisd"
# Check that networking is up.
if [ "$NETWORKING" = "no" ]
then
exit 0
fi
[ -f /usr/sbin/kojisd ] || exit 0
RETVAL=0
start() {
echo -n $"Starting $prog: "
cd /
ARGS=""
[ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock"
[ "$KOJIRA_DEBUG" == "Y" ] && ARGS="$ARGS --debug"
[ "$KOJIRA_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose"
if [ -n "$RUNAS" -a "$RUNAS" != "root" ]; then
daemon --user "$RUNAS" /usr/sbin/kojisd $ARGS
else
daemon /usr/sbin/kojisd $ARGS
fi
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojisd
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc kojisd
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojisd
return $RETVAL
}
restart() {
stop
start
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart|reload)
restart
;;
condrestart)
[ -f /var/lock/subsys/kojisd ] && restart || :
;;
*)
echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}"
exit 1
esac
exit $?

View file

@ -89,7 +89,11 @@ $value
#set $buildTarget = $params[2]
<strong>Recipients:</strong>&nbsp;$printValue('', $params[0])<br/>
<strong>Build:</strong> <a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a><br/>
#if $buildTarget
<strong>Build Target:</strong> <a href="buildtargetinfo?targetID=$buildTarget.id">$buildTarget.name</a><br/>
#else
<strong>Build Target:</strong> (no build target)<br/>
#end if
<strong>Web URL:</strong> <a href="$params[3]">$params[3]</a>
#elif $task.method == 'tagNotification'
<strong>Recipients:</strong>&nbsp;$printValue('', $params[0])<br/>