Merge branch 'shadow'

This commit is contained in:
Mike McLean 2008-11-06 15:12:51 -05:00
commit 4336e34fc4
13 changed files with 2584 additions and 95 deletions

View file

@ -305,7 +305,7 @@ class BuildRoot(object):
self.name = "%(tag_name)s-%(id)s-%(repoid)s" % vars(self)
self.config = session.getBuildConfig(self.tag_id)
def _new(self, tag, arch, task_id, distribution=None):
def _new(self, tag, arch, task_id, distribution=None, repo_id=None):
"""Create a brand new repo"""
self.task_id = task_id
self.distribution = distribution
@ -318,12 +318,26 @@ class BuildRoot(object):
raise koji.BuildrootError("Could not get config info for tag: %s" % tag)
self.tag_id = self.config['id']
self.tag_name = self.config['name']
while 1:
repo_info = session.getRepo(self.tag_id)
if repo_info and repo_info['state'] == koji.REPO_READY:
break
self.logger.debug("Waiting for repo to be created %s" % self.tag_name)
time.sleep(5)
if repo_id is None:
#use current active repo for tag, waiting if necessary
while 1:
repo_info = session.getRepo(self.tag_id)
if repo_info and repo_info['state'] == koji.REPO_READY:
break
self.logger.debug("Waiting for repo to be created %s" % self.tag_name)
time.sleep(30)
else:
repo_info = session.repoInfo(repo_id)
if self.config['id'] != repo_info['tag_id']:
raise koji.BuildrootError, "tag/repo mismatch: %s vs %s" \
% (self.config['name'], repo_info['tag_name'])
repo_state = koji.REPO_STATES[repo_info['state']]
if repo_state == 'EXPIRED':
# This should be ok. Expired repos are still intact, just not
# up-to-date (which may be the point in some cases).
self.logger.info("Requested repo (%i) is no longer current" % repo_id)
elif repo_state != 'READY':
raise koji.BuildrootError, "Requested repo (%i) is %s" % (repo_id, repo_state)
self.repoid = repo_info['id']
self.br_arch = koji.canonArch(arch)
self.logger.debug("New buildroot: %(tag_name)s/%(br_arch)s/%(repoid)s" % vars(self))
@ -1330,6 +1344,12 @@ class BaseTaskHandler(object):
fn = "%s/%s" % (options.topdir, relpath)
return fn
def subtask(self, method, arglist, **opts):
return session.host.subtask(method, arglist, self.id, **opts)
def subtask2(self, __taskopts, __method, *args, **kwargs):
return session.host.subtask2(self.id, __taskopts, __method, *args, **kwargs)
class FakeTask(BaseTaskHandler):
Methods = ['someMethod']
Foreground = True
@ -1450,6 +1470,9 @@ class ChainBuildTask(BaseTaskHandler):
def handler(self, srcs, target, opts=None):
if opts.get('scratch'):
raise koji.BuildError, "--scratch is not allowed with chain-builds"
target_info = session.getBuildTarget(target)
if not target_info:
raise koji.GenericError, 'unknown build target: %s' % target
for build_level in srcs:
subtasks = []
build_tasks = []
@ -1466,7 +1489,7 @@ class ChainBuildTask(BaseTaskHandler):
nvrs.append(src)
if nvrs:
task_id = session.host.subtask(method='waitrepo',
arglist=[target, None, nvrs],
arglist=[target['build_tag'], None, nvrs],
label=','.join(nvrs),
parent=self.id)
subtasks.append(task_id)
@ -1482,7 +1505,7 @@ class ChainBuildTask(BaseTaskHandler):
nvrs.append(builds[0]['nvr'])
if nvrs:
task_id = session.host.subtask(method='waitrepo',
arglist=[target, None, nvrs],
arglist=[target['build_tag'], None, nvrs],
label=','.join(nvrs),
parent=self.id)
self.wait(task_id, all=True, failany=True)
@ -1500,24 +1523,60 @@ class BuildTask(BaseTaskHandler):
self.opts = opts
if opts.get('arch_override') and not opts.get('scratch'):
raise koji.BuildError, "arch_override is only allowed for scratch builds"
if opts.get('repo_id') is not None:
repo_info = session.repoInfo(opts['repo_id'])
if not repo_info:
raise koji.BuildError, 'No such repo: %s' % opts['repo_id']
repo_state = koji.REPO_STATES[repo_info['state']]
if repo_state not in ('READY', 'EXPIRED'):
raise koji.BuildError, 'Bad repo: %s (%s)' % (repo_info['id'], repo_state)
else:
repo_info = None
#we'll wait for a repo later (self.getRepo)
task_info = session.getTaskInfo(self.id)
target_info = session.getBuildTarget(target)
if not target_info:
raise koji.GenericError, 'unknown build target: %s' % target
dest_tag = target_info['dest_tag']
build_tag = target_info['build_tag']
target_info = None
if target:
target_info = session.getBuildTarget(target)
if target_info:
dest_tag = target_info['dest_tag']
build_tag = target_info['build_tag']
if repo_info is not None:
#make sure specified repo matches target
if repo_info['tag_id'] != target_info['build_tag']:
raise koji.BuildError, 'Repo/Target mismatch: %s/%s' \
% (repo_info['tag_name'], target_info['build_tag_name'])
else:
# if repo_id is specified, we can allow the 'target' arg to simply specify
# the destination tag (since the repo specifies the build tag).
if repo_info is None:
raise koji.GenericError, 'unknown build target: %s' % target
build_tag = repo_info['tag_id']
if target is None:
#ok, call it skip-tag for the buildroot tag
self.opts['skip_tag'] = True
dest_tag = build_tag
else:
taginfo = session.getTag(target)
if not taginfo:
raise koji.GenericError, 'neither tag nor target: %s' % target
dest_tag = taginfo['id']
#policy checks...
policy_data = {
'user_id' : task_info['owner'],
'source' : src,
'task_id' : self.id,
'build_tag' : build_tag, #id
}
if target_info:
policy_data['target'] = target_info['id'],
if not self.opts.get('skip_tag'):
policy_data['tag'] = dest_tag #id
if not SCM.is_scm_url(src) and not opts.get('scratch'):
#let hub policy decide
policy_data = {
'user_id' : task_info['owner'],
'source' : src,
'task_id' : self.id,
'target' : target['id'],
'build_tag' : build_tag, #id
}
if not self.opts.get('skip_tag'):
policy_data['tag'] = dest_tag #id
session.assertPolicy('build_from_srpm', policy_data)
session.host.assertPolicy('build_from_srpm', policy_data)
if opts.get('repo_id') is not None:
# use of this option is governed by policy
session.host.assertPolicy('build_from_repo_id', policy_data)
srpm = self.getSRPM(src)
h = self.readSRPMHeader(srpm)
data = koji.get_header_fields(h,['name','version','release','epoch'])
@ -1545,8 +1604,10 @@ class BuildTask(BaseTaskHandler):
build_id = session.host.initBuild(data)
session.host.importChangelog(build_id, srpm)
#(initBuild raises an exception if there is a conflict)
if not repo_info:
repo_info = self.getRepo(build_tag) #(subtask)
try:
srpm,rpms,brmap,logs = self.runBuilds(srpm,build_tag,archlist)
srpm,rpms,brmap,logs = self.runBuilds(srpm,build_tag,archlist,repo_info['id'])
if opts.get('scratch'):
#scratch builds do not get imported
session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs)
@ -1645,13 +1706,24 @@ class BuildTask(BaseTaskHandler):
raise koji.BuildError, "No matching arches were found"
return archdict.keys()
def runBuilds(self, srpm, build_tag, archlist):
def getRepo(self, tag):
"""Get repo to use for builds"""
repo_info = session.getRepo(tag)
if not repo_info:
#wait for it
task_id = session.host.subtask(method='waitrepo',
arglist=[tag, None, None],
parent=self.id)
repo_info = self.wait(task_id)[task_id]
return repo_info
def runBuilds(self, srpm, build_tag, archlist, repo_id):
self.logger.debug("Spawning jobs for arches: %r" % (archlist))
subtasks = {}
keep_srpm = True
for arch in archlist:
subtasks[arch] = session.host.subtask(method='buildArch',
arglist=[srpm,build_tag,arch,keep_srpm],
arglist=[srpm, build_tag, arch, keep_srpm, {'repo_id': repo_id}],
label=arch,
parent=self.id,
arch=koji.canonArch(arch))
@ -1721,11 +1793,13 @@ class BuildArchTask(BaseTaskHandler):
if not header[rpm.RPMTAG_DISTRIBUTION]:
raise koji.BuildError, "The build system failed to set the distribution tag"
def handler(self, pkg, root, arch, keep_srpm, opts={}):
def handler(self, pkg, root, arch, keep_srpm, opts=None):
"""Build a package in a buildroot for one arch"""
global options
ret = {}
if opts is None:
opts = {}
#noarch is funny
if arch == "noarch":
@ -1771,7 +1845,12 @@ class BuildArchTask(BaseTaskHandler):
# if not h[rpm.RPMTAG_DISTRIBUTION]:
# raise koji.BuildError, "the distribution tag is not set in the original srpm"
broot = BuildRoot(root, br_arch, self.id, distribution=h[rpm.RPMTAG_DISTRIBUTION])
rootopts = {
'distribution' : h[rpm.RPMTAG_DISTRIBUTION],
}
if opts.get('repo_id') is not None:
rootopts['repo_id'] = opts['repo_id']
broot = BuildRoot(root, br_arch, self.id, **rootopts)
self.logger.debug("Initializing buildroot")
broot.init()
@ -2109,8 +2188,10 @@ Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
build_nvr = koji.buildLabel(build)
build_id = build['id']
build_owner = build['owner_name']
# target comes from session.py:_get_build_target()
dest_tag = target['dest_tag_name']
# target comes from session.py:_get_build_target()
dest_tag = None
if target is not None:
dest_tag = target['dest_tag_name']
status = koji.BUILD_STATES[build['state']].lower()
creation_time = koji.formatTimeLong(build['creation_time'])
completion_time = koji.formatTimeLong(build['completion_time'])
@ -2213,10 +2294,13 @@ class NewRepoTask(BaseTaskHandler):
Methods = ['newRepo']
_taskWeight = 0.1
def handler(self, tag):
def handler(self, tag, event=None):
self.uploadpath = self.getUploadDir()
tinfo = session.getTag(tag, strict=True)
repo_id, event_id = session.host.repoInit(tinfo['id'])
kwargs = {}
if event is not None:
kwargs['event'] = event
repo_id, event_id = session.host.repoInit(tinfo['id'], **kwargs)
path = koji.pathinfo.repo(repo_id, tinfo['name'])
if not os.path.isdir(path):
raise koji.GenericError, "Repo directory missing: %s" % path
@ -2239,7 +2323,10 @@ class NewRepoTask(BaseTaskHandler):
for (arch, task_id) in subtasks.iteritems():
data[arch] = results[task_id]
self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],))
session.host.repoDone(repo_id, data)
kwargs = {}
if event is not None:
kwargs['expire'] = True
session.host.repoDone(repo_id, data, **kwargs)
return repo_id, event_id
class CreaterepoTask(BaseTaskHandler):
@ -2278,7 +2365,8 @@ class CreaterepoTask(BaseTaskHandler):
koji.ensuredir(datadir)
os.system('cp -a %s/* %s' % (olddatadir, datadir))
cmd.append('--update')
cmd.append('--skip-stat')
if options.createrepo_skip_stat:
cmd.append('--skip-stat')
# note: we can't easily use a cachedir because we do not have write
# permission. The good news is that with --update we won't need to
# be scanning many rpms.
@ -2310,44 +2398,66 @@ class WaitrepoTask(BaseTaskHandler):
# time in minutes before we fail this task
TIMEOUT = 120
def handler(self, build_target_info, newer_then=None, nvrs=None):
def handler(self, tag, newer_than=None, nvrs=None):
"""Wait for a repo for the tag, subject to given conditions
newer_than: create_event timestamp should be newer than this
nvr: repo should contain this nvr (which may not exist at first)
Only one of the options may be specified. If neither is, then
the call will wait for the first ready repo.
Returns the repo info (from getRepo) of the chosen repo
"""
start = time.time()
build_target = session.getBuildTarget(build_target_info)
if not build_target:
raise koji.GenericError, "invalid build target: %s" % build_target_info
taginfo = session.getTag(tag)
targets = session.getBuildTargets(buildTagID=taginfo['id'])
if not targets:
raise koji.GenericError("No build target for tag: %s" % taginfo['name'])
if isinstance(newer_than, basestring) and newer_than.lower() == "now":
newer_than = start
if not isinstance(newer_than, (int, long, float)):
raise koji.GenericError, "Invalid value for newer_than"
if newer_than and nvrs:
raise koji.GenericError, "only one of (newer_than, nvrs) may be specified"
if not nvrs:
nvrs = []
builds = [koji.parse_NVR(nvr) for nvr in nvrs]
if not newer_then and not builds:
newer_then = time.time()
last_repo = None
repo = session.getRepo(build_target['build_tag'])
while True:
if builds and repo and repo != last_repo:
if koji.util.checkForBuilds(session, build_target['build_tag'], builds, repo['create_event']):
return "Successfully waited %s for %s to appear in the %s repo" % \
(koji.util.duration(start), koji.util.printList(nvrs), build_target['build_tag_name'])
elif newer_then:
if repo['create_ts'] > newer_then:
return "Successfully waited %s for a new %s repo" % \
(koji.util.duration(start), build_target['build_tag_name'])
repo = session.getRepo(taginfo['id'])
if repo and repo != last_repo:
if builds:
if koji.util.checkForBuilds(session, taginfo['id'], builds, repo['create_event']):
self.logger.debug("Successfully waited %s for %s to appear in the %s repo" % \
(koji.util.duration(start), koji.util.printList(nvrs), taginfo['name']))
return repo
elif newer_than:
if repo['create_ts'] > newer_than:
self.logger.debug("Successfully waited %s for a new %s repo" % \
(koji.util.duration(start), taginfo['name']))
return repo
else:
#no check requested -- return first ready repo
return repo
if (time.time() - start) > (self.TIMEOUT * 60.0):
if builds:
raise koji.GenericError, "Unsuccessfully waited %s for %s to appear in the %s repo" % \
(koji.util.duration(start), koji.util.printList(nvrs), build_target['build_tag_name'])
(koji.util.duration(start), koji.util.printList(nvrs), taginfo['name'])
else:
raise koji.GenericError, "Unsuccessfully waited %s for a new %s repo" % \
(koji.util.duration(start), build_target['build_tag_name'])
(koji.util.duration(start), taginfo['name'])
time.sleep(self.PAUSE)
last_repo = repo
repo = session.getRepo(build_target['build_tag'])
class SCM(object):
"SCM abstraction class"
@ -2691,6 +2801,7 @@ def get_options():
'max_retries': 120,
'offline_retry': True,
'offline_retry_interval': 120,
'createrepo_skip_stat': True,
'pkgurl': None,
'allowed_scms': '',
'cert': '/etc/kojid/client.crt',
@ -2704,6 +2815,8 @@ def get_options():
defaults[name] = int(value)
except ValueError:
quit("value for %s option must be a valid integer" % name)
elif name in ['offline_retry', 'createrepo_skip_stat']:
defaults[name] = config.getboolean('kojid', name)
elif name in ['plugin', 'plugins']:
defaults['plugin'] = value.split()
elif name in defaults.keys():

View file

@ -645,6 +645,7 @@ def handle_build(options, session, args):
parser.add_option("--nowait", action="store_true",
help=_("Don't wait on build"))
parser.add_option("--arch-override", help=_("Override build arches"))
parser.add_option("--repo-id", type="int", help=_("Use a specific repo"))
parser.add_option("--noprogress", action="store_true",
help=_("Do not display progress of the upload"))
parser.add_option("--background", action="store_true",
@ -657,19 +658,23 @@ def handle_build(options, session, args):
parser.error(_("--arch_override is only allowed for --scratch builds"))
activate_session(session)
target = args[0]
build_target = session.getBuildTarget(target)
if not build_target:
parser.error(_("Unknown build target: %s" % target))
dest_tag = session.getTag(build_target['dest_tag'])
if not dest_tag:
parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
if dest_tag['locked'] and not build_opts.scratch:
parser.error(_("Destination tag %s is locked" % dest_tag['name']))
if target.lower() == "none" and build_opts.repo_id:
target = None
build_opts.skip_tag = True
else:
build_target = session.getBuildTarget(target)
if not build_target:
parser.error(_("Unknown build target: %s" % target))
dest_tag = session.getTag(build_target['dest_tag'])
if not dest_tag:
parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
if dest_tag['locked'] and not build_opts.scratch:
parser.error(_("Destination tag %s is locked" % dest_tag['name']))
source = args[1]
opts = {}
if build_opts.arch_override:
opts['arch_override'] = ' '.join(build_opts.arch_override.replace(',',' ').split())
for key in ('skip_tag','scratch'):
for key in ('skip_tag', 'scratch', 'repo_id'):
val = getattr(build_opts, key)
if val is not None:
opts[key] = val

View file

@ -1684,7 +1684,7 @@ def get_task_descendents(task, childMap=None, request=False):
get_task_descendents(Task(child['id']), childMap, request)
return childMap
def repo_init(tag, with_src=False, with_debuginfo=False):
def repo_init(tag, with_src=False, with_debuginfo=False, event=None):
"""Create a new repo entry in the INIT state, return full repo data
Returns a dictionary containing
@ -1699,7 +1699,13 @@ def repo_init(tag, with_src=False, with_debuginfo=False):
for arch in tinfo['arches'].split():
repo_arches[koji.canonArch(arch)] = 1
repo_id = _singleValue("SELECT nextval('repo_id_seq')")
event_id = _singleValue("SELECT get_event()")
if event is None:
event_id = _singleValue("SELECT get_event()")
else:
#make sure event is valid
q = "SELECT time FROM events WHERE id=%(event)s"
event_time = _singleValue(q, locals(), strict=True)
event_id = event
q = """INSERT INTO repo(id, create_event, tag_id, state)
VALUES(%(repo_id)s, %(event_id)s, %(tag_id)s, %(state)s)"""
_dml(q,locals())
@ -3542,7 +3548,11 @@ def _get_build_target(task_id):
task = Task(task_id)
request = task.getRequest()
# request is (path-to-srpm, build-target-name, map-of-other-options)
return get_build_targets(request[1])[0]
ret = get_build_targets(request[1])
if ret:
return ret[0]
else:
return None
def get_notification_recipients(build, tag_id, state):
"""
@ -3556,26 +3566,37 @@ def get_notification_recipients(build, tag_id, state):
for this tag and the user who submitted the build. The list will not contain
duplicates.
"""
package_id = build['package_id']
query = """SELECT email FROM build_notifications
WHERE ((package_id = %(package_id)i OR package_id IS NULL)
AND (tag_id = %(tag_id)i OR tag_id IS NULL))
"""
if state != koji.BUILD_STATES['COMPLETE']:
query += """AND success_only = FALSE
"""
clauses = []
emails = [result[0] for result in _fetchMulti(query, locals())]
if build:
package_id = build['package_id']
clauses.append('package_id = %(package_id)i OR package_id IS NULL')
else:
clauses.append('package_id IS NULL')
if tag_id:
clauses.append('tag_id = %(tag_id)i OR tag_id IS NULL')
else:
clauses.append('tag_id IS NULL')
if state != koji.BUILD_STATES['COMPLETE']:
clauses.append('success_only = FALSE')
query = QueryProcessor(columns=('email',), tables=['build_notifications'],
clauses=clauses, values=locals(),
opts={'asList':True})
emails = [result[0] for result in query.execute()]
email_domain = context.opts['EmailDomain']
# user who submitted the build
emails.append('%s@%s' % (build['owner_name'], email_domain))
packages = readPackageList(pkgID=package_id, tagID=tag_id, inherit=True)
# owner of the package in this tag, following inheritance
emails.append('%s@%s' % (packages[package_id]['owner_name'], email_domain))
if tag_id:
packages = readPackageList(pkgID=package_id, tagID=tag_id, inherit=True)
# owner of the package in this tag, following inheritance
emails.append('%s@%s' % (packages[package_id]['owner_name'], email_domain))
#FIXME - if tag_id is None, we don't have a good way to get the package owner.
# using all package owners from all tags would be way overkill.
emails_uniq = dict(zip(emails, [1] * len(emails))).keys()
return emails_uniq
@ -3612,12 +3633,16 @@ def build_notification(task_id, build_id):
build = get_build(build_id)
target = _get_build_target(task_id)
dest_tag = None
if target:
dest_tag = target['dest_tag']
if build['state'] == koji.BUILD_STATES['BUILDING']:
raise koji.GenericError, 'never send notifications for incomplete builds'
web_url = context.opts.get('KojiWebURL', 'http://localhost/koji')
recipients = get_notification_recipients(build, target['dest_tag'], build['state'])
recipients = get_notification_recipients(build, dest_tag, build['state'])
if len(recipients) > 0:
make_task('buildNotification', [recipients, build, target, web_url])
@ -5189,10 +5214,14 @@ class RootExports(object):
repoInfo = staticmethod(repo_info)
getActiveRepos = staticmethod(get_active_repos)
def newRepo(self, tag):
def newRepo(self, tag, event=None):
"""Create a newRepo task. returns task id"""
context.session.assertPerm('repo')
return make_task('newRepo', [tag], priority=15, channel='createrepo')
if event:
args = koji.encode_args(tag, event=None)
else:
args = [tag]
return make_task('newRepo', args, priority=15, channel='createrepo')
def repoExpire(self, repo_id):
"""mark repo expired"""
@ -6476,11 +6505,11 @@ class HostExports(object):
br.assertTask(task_id)
return br.updateList(rpmlist)
def repoInit(self, tag, with_src=False):
def repoInit(self, tag, with_src=False, event=None):
"""Initialize a new repo for tag"""
host = Host()
host.verify()
return repo_init(tag, with_src=with_src)
return repo_init(tag, with_src=with_src, event=event)
def repoAddRPM(self, repo_id, path):
"""Add an uploaded rpm to a repo"""
@ -6525,11 +6554,14 @@ class HostExports(object):
else:
os.link(filepath, dst)
def repoDone(self, repo_id, data):
def repoDone(self, repo_id, data, expire=False):
"""Move repo data into place, mark as ready, and expire earlier repos
repo_id: the id of the repo
data: a dictionary of the form { arch: (uploadpath, files), ...}
expire(optional): if set to true, mark the repo expired immediately*
* This is used when a repo from an older event is generated
"""
host = Host()
host.verify()
@ -6551,6 +6583,10 @@ class HostExports(object):
raise koji.GenericError, "uploaded file missing: %s" % src
os.link(src, dst)
os.unlink(src)
if expire:
repo_expire(repo_id)
return
#else:
repo_ready(repo_id)
repo_expire_older(rinfo['tag_id'], rinfo['create_event'])
#make a latest link

View file

@ -446,6 +446,10 @@ _default_policies = {
has_perm admin :: allow
all :: deny
''',
'build_from_repo_id' : '''
has_perm admin :: allow
all :: deny
''',
}
def get_policy(opts, plugins):

View file

@ -117,11 +117,14 @@ rm -rf $RPM_BUILD_ROOT
%{_sbindir}/kojira
%{_initrddir}/kojira
%config(noreplace) %{_sysconfdir}/sysconfig/kojira
%{_sysconfdir}/kojira
%dir %{_sysconfdir}/kojira
%config(noreplace) %{_sysconfdir}/kojira/kojira.conf
%{_sbindir}/koji-gc
%dir %{_sysconfdir}/koji-gc
%config(noreplace) %{_sysconfdir}/koji-gc/koji-gc.conf
%{_sbindir}/koji-shadow
%dir %{_sysconfdir}/koji-shadow
%config(noreplace) %{_sysconfdir}/koji-shadow/koji-shadow.conf
%files web
%defattr(-,root,root)
@ -134,7 +137,7 @@ rm -rf $RPM_BUILD_ROOT
%{_sbindir}/kojid
%{_initrddir}/kojid
%config(noreplace) %{_sysconfdir}/sysconfig/kojid
%{_sysconfdir}/kojid
%dir %{_sysconfdir}/kojid
%config(noreplace) %{_sysconfdir}/kojid/kojid.conf
%{_datadir}/koji-builder
%attr(-,kojibuilder,kojibuilder) /etc/mock/koji

View file

@ -1,4 +1,4 @@
BINFILES = kojira koji-gc
BINFILES = kojira koji-gc koji-shadow
_default:
@echo "nothing to make. try make install"
@ -26,3 +26,6 @@ install:
mkdir -p $(DESTDIR)/etc/koji-gc
install -p -m 644 koji-gc.conf $(DESTDIR)/etc/koji-gc/koji-gc.conf
mkdir -p $(DESTDIR)/etc/koji-shadow
install -p -m 644 koji-shadow.conf $(DESTDIR)/etc/koji-shadow/koji-shadow.conf

1157
util/koji-shadow Executable file

File diff suppressed because it is too large Load diff

7
util/koji-shadow.conf Normal file
View file

@ -0,0 +1,7 @@
# koji-shadow example config file
# (still working out all the config options)
[main]
server=http://localhost/kojihub/
remote=http://koji.fedoraproject.org/kojihub

View file

@ -119,8 +119,10 @@ class ManagedRepo(object):
def tryDelete(self):
"""Remove the repo from disk, if possible"""
#we check just the event age first since it is faster
age = time.time() - self.event_ts
if age < options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
return False
self.logger.debug("Attempting to delete repo %s.." % self.repo_id)
if self.state != koji.REPO_EXPIRED:
@ -138,6 +140,16 @@ class ManagedRepo(object):
return False
tag_name = tag_info['name']
path = pathinfo.repo(self.repo_id, tag_name)
#also check dir age. We do this because a repo can be created from an older event
#and should not be removed based solely on that event's timestamp.
try:
age = time.time() - os.stat(path).st_mtime
except OSError:
self.logger.error("Can't stat repo directory: %s" % path)
return True
if age < options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
return False
safe_rmtree(path, strict=False)
return True
@ -227,22 +239,25 @@ class RepoManager(object):
if self.repos.has_key(repo_id):
#we're already managing it, no need to deal with it here
continue
try:
dir_ts = os.stat(repodir).st_mtime
except OSError:
#just in case something deletes the repo out from under us
continue
rinfo = session.repoInfo(repo_id)
if rinfo is None:
try:
age = time.time() - os.stat(repodir).st_mtime
except OSError:
#just in case something deletes the repo out from under us
continue
age = time.time() - dir_ts
if age > 36000:
self.logger.warn("Unexpected directory (no such repo): %s" % repodir)
if not options.ignore_stray_repos:
self.logger.warn("Unexpected directory (no such repo): %s" % repodir)
continue
if rinfo['tag_name'] != taginfo['name']:
self.logger.warn("Tag name mismatch: %s" % repodir)
continue
if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):
age = time.time() - rinfo['create_ts']
age = time.time() - max(rinfo['create_ts'], dir_ts)
if age > options.deleted_repo_lifetime:
#XXX should really be called expired_repo_lifetime
count += 1
logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))
safe_rmtree(repodir, strict=False)
@ -389,6 +404,8 @@ def get_options():
help="run in foreground")
parser.add_option("-d", "--debug", action="store_true",
help="show debug output")
parser.add_option("-q", "--quiet", action="store_true",
help="don't show warnings")
parser.add_option("-v", "--verbose", action="store_true",
help="show verbose output")
parser.add_option("--with-src", action="store_true",
@ -415,6 +432,7 @@ def get_options():
defaults = {'with_src': False,
'verbose': False,
'debug': False,
'ignore_stray_repos': False,
'topdir': '/mnt/koji',
'server': None,
'logfile': '/var/log/kojira.log',
@ -428,6 +446,7 @@ def get_options():
'delete_batch_size': 3,
'max_repo_tasks' : 10,
'deleted_repo_lifetime': 7*24*3600,
#XXX should really be called expired_repo_lifetime
'cert': '/etc/kojira/client.crt',
'ca': '/etc/kojira/clientca.crt',
'serverca': '/etc/kojira/serverca.crt'
@ -436,7 +455,7 @@ def get_options():
int_opts = ('prune_batch_size', 'deleted_repo_lifetime', 'max_repo_tasks',
'delete_batch_size', 'retry_interval', 'max_retries', 'offline_retry_interval')
str_opts = ('topdir','server','user','password','logfile', 'principal', 'keytab', 'cert', 'ca', 'serverca')
bool_opts = ('with_src','verbose','debug', 'offline_retry')
bool_opts = ('with_src','verbose','debug','ignore_stray_repos', 'offline_retry')
for name in config.options(section):
if name in int_opts:
defaults[name] = config.getint(section, name)
@ -486,6 +505,8 @@ if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
elif options.verbose:
logger.setLevel(logging.INFO)
elif options.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.WARNING)
session_opts = {}

996
util/kojisd Executable file
View file

@ -0,0 +1,996 @@
#!/usr/bin/python
# kojisd: a tool to subscribe to builds between koji instances
# Copyright (c) 2007-2008 Red Hat
# Copyright (c) 2007-2008 Dennis Gilmore
#
# Koji is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Mike McLean <mikem@redhat.com>
# Dennis Gilmore <dennis@ausil.us>
try:
import krbV
except ImportError:
pass
import koji
import ConfigParser
from email.MIMEText import MIMEText
import fnmatch
import optparse
import os
import pprint
import smtplib
import socket # for socket.error and socket.setdefaulttimeout
import sys
import time
import xmlrpclib # for ProtocolError and Fault
import urlgrabber.grabber as grabber
import rpm
# koji.fp.o keeps stalling, probably network errors...
# better to time out than to stall
socket.setdefaulttimeout(180) #XXX - too short?
OptionParser = optparse.OptionParser
if optparse.__version__ == "1.4.1+":
def _op_error(self, msg):
self.print_usage(sys.stderr)
msg = "%s: error: %s\n" % (self._get_prog_name(), msg)
if msg:
sys.stderr.write(msg)
sys.exit(2)
OptionParser.error = _op_error
def _(args):
"""Stub function for translation"""
return args
def get_options():
"""process options from command line and config file"""
usage = _("%prog [options]")
parser = OptionParser(usage=usage)
parser.add_option("-c", "--config-file", metavar="FILE",
help=_("use alternate configuration file"))
parser.add_option("--keytab", help=_("specify a Kerberos keytab to use"))
parser.add_option("--principal", help=_("specify a Kerberos principal to use"))
parser.add_option("--runas", metavar="USER",
help=_("run as the specified user (requires special privileges)"))
parser.add_option("--user", help=_("specify user"))
parser.add_option("--password", help=_("specify password"))
parser.add_option("--noauth", action="store_true", default=False,
help=_("do not authenticate"))
parser.add_option("-n", "--test", action="store_true", default=False,
help=_("test mode"))
parser.add_option("-d", "--debug", action="store_true", default=False,
help=_("show debug output"))
parser.add_option("--first-one", action="store_true", default=False,
help=_("stop after scanning first build -- debugging"))
parser.add_option("--debug-xmlrpc", action="store_true", default=False,
help=_("show xmlrpc debug output"))
parser.add_option("--skip-main", action="store_true", default=False,
help=_("don't actually run main"))
parser.add_option("--build",
help=_("scan just this build"))
parser.add_option("-s", "--server",
help=_("url of local XMLRPC server"))
parser.add_option("-r", "--remote",
help=_("url of remote XMLRPC server"))
parser.add_option("--validtags", action="append", default=[],
help=_("List of valid tags to build for"))
parser.add_option("--invalidtags", action="append", default=[],
help=_("List of tags to not build for"))
parser.add_option("--logfile", default="/var/log/kojisd.log",
help=_("location of log file"))
parser.add_option("--topdir", default="/mnt/koji",
help=_(""))
parser.add_option("--workpath", default="/mnt/koji/work/kojisd",
help=_("location to save import files"))
parser.add_option("--importarches", default="",
help=_("arches to import"))
parser.add_option("--buildarches", action="store", default="",
help=_("arches to build"))
parser.add_option("--pkgurl", action="store", default="",
help=_("url to base packages on remote server"))
#parse once to get the config file
(options, args) = parser.parse_args()
defaults = parser.get_default_values()
config = ConfigParser.ConfigParser()
cf = getattr(options, 'config_file', None)
if cf:
if not os.access(cf, os.F_OK):
parser.error(_("No such file: %s") % cf)
assert False
else:
cf = '/etc/kojisd/kojisd.conf'
if not os.access(cf, os.F_OK):
cf = None
if not cf:
print "no config file"
config = None
else:
config.read(cf)
#allow config file to update defaults for certain options
cfgmap = [
['keytab', None, 'string'],
['principal', None, 'string'],
['runas', None, 'string'],
['user', None, 'string'],
['password', None, 'string'],
['noauth', None, 'boolean'],
['server', None, 'string'],
['remote', None, 'string'],
['importarches', None, 'list'],
['serverca', None, 'string'],
['cert', None, 'string'],
['ca', None, 'string'],
['validtags', None, 'list'],
['invalidtags', None, 'list'],
['logfile', None, 'string'],
['topdir', None, 'string'],
['workpath', None, 'string'],
['buildarches', None, 'string'],
['pkgurl', None, 'string'],
]
for name, alias, type in cfgmap:
print "Checking %s" % name
if alias is None:
alias = ('kojisd', name)
if config.has_option(*alias):
print "Using option %s from config file" % (alias,)
if type == 'integer':
setattr(defaults, name, config.getint(*alias))
elif type == 'boolean':
setattr(defaults, name, config.getboolean(*alias))
elif type == 'list':
line = config.get(*alias)
line = line.split()
setattr(defaults, name, line)
else:
setattr(defaults, name, config.get(*alias))
#parse again with updated defaults
(options, args) = parser.parse_args(values=defaults)
return options, args
time_units = {
'second' : 1,
'minute' : 60,
'hour' : 3600,
'day' : 86400,
'week' : 604800,
}
time_unit_aliases = [
#[unit, alias, alias, ...]
['week', 'weeks', 'wk', 'wks'],
['hour', 'hours', 'hr', 'hrs'],
['day', 'days'],
['minute', 'minutes', 'min', 'mins'],
['second', 'seconds', 'sec', 'secs', 's'],
]
def parse_duration(str):
"""Parse time duration from string, returns duration in seconds"""
ret = 0
n = None
unit = None
def parse_num(s):
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return None
for x in str.split():
if n is None:
n = parse_num(x)
if n is not None:
continue
#perhaps the unit is appended w/o a space
for names in time_unit_aliases:
for name in names:
if x.endswith(name):
n = parse_num(x[:-len(name)])
if n is None:
continue
unit = names[0]
# combined at end
break
if unit:
break
else:
raise ValueError, "Invalid time interval: %s" % str
if unit is None:
x = x.lower()
for names in time_unit_aliases:
for name in names:
if x == name:
unit = names[0]
break
if unit:
break
else:
raise ValueError, "Invalid time interval: %s" % str
ret += n * time_units[unit]
n = None
unit = None
return ret
def error(msg=None, code=1):
if msg:
sys.stderr.write(msg + "\n")
sys.stderr.flush()
sys.exit(code)
def warn(msg):
sys.stderr.write(msg + "\n")
sys.stderr.flush()
def ensure_connection(session):
try:
ret = session.getAPIVersion()
except xmlrpclib.ProtocolError:
error(_("Error: Unable to connect to server"))
if ret != koji.API_VERSION:
warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))
def activate_session(session):
"""Test and login the session is applicable"""
global options
if options.noauth:
#skip authentication
pass
elif os.path.isfile(options.cert):
# authenticate using SSL client cert
session.ssl_login(options.cert, options.ca, options.serverca, proxyuser=options.runas)
elif options.user:
# authenticate using user/password
session.login()
elif has_krb_creds():
try:
if options.keytab and options.principal:
session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)
else:
session.krb_login(proxyuser=options.runas)
except krbV.Krb5Error, e:
error(_("Kerberos authentication failed: %s (%s)") % (e.args[1], e.args[0]))
except socket.error, e:
warn(_("Could not connect to Kerberos authentication service: %s") % e.args[1])
if not options.noauth and not session.logged_in:
error(_("Unable to log in, no authentication methods available"))
ensure_connection(session)
if options.debug:
print "successfully connected to hub"
def getHubTags(session):
'''Determine the tags on the build hub'''
tags = []
allTags = session.listTags()
for remoteTag in allTags:
tags.append(remoteTag['name'])
print "tags : %s" % buildTags
return buildTags
def syncTags():
''' sync the tags from the master to the slave. due to inheritance
its easier to sync tags completely between the hubs
'''
toAddTags = []
for tag in buildTags:
if tag not in localTags:
toAddTags.append(tag)
orderToAddTags = []
for tag in toAddTags:
rawParents = remote.getFullInheritance(tag)
for rawParent in rawParents:
if rawParent['currdepth'] == 1:
print tag
print rawParent['name']
orderToAddTags.append(["%s", "%s"] % (tag, rawParent['name']))
for tag, parent in orderToAddTags:
session.createTag(tag, parent, arches=buildarches)
# TODO: handle errors gracefully, order tag creation. handle targets
return
def main(args):
#XXX get tags
buildTags = []
if options.validtags != None:
buildTags = options.validtags
else:
buildTags = getHubTags(remote)
for tag in options.invalidtags:
if tag in buildTags:
buildTags.remove(tag)
print "BuildTags: %s" % buildTags
#syncTags()
tracker = BuildTracker()
# go through each tag and see what needs building
for buildTag in buildTags :
print "BuildTag: %s" % buildTag
tracker.scanTag(buildTag)
tracker.report()
tracker.showOrder()
tracker.runRebuilds()
def remote_buildroots(build_id):
"""Return a list of buildroots for remote build"""
#XXX - only used in old test code (foo)
rpms = remote.listRPMs(build_id)
brs = {}
for rinfo in rpms:
br_id = rinfo.get('buildroot_id')
if not br_id:
print "Warning: no buildroot for: %s" % rinfo
continue
brs[br_id] = 1
return brs.keys()
def remote_br_builds(brlist):
"""Given a list of buildroots, return build data of contents"""
#XXX - only used in old test code (foo)
seen = {}
builds = {}
for br_id in brlist:
if seen.has_key(br_id):
continue
seen[br_id] = 1
#print "."
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
builds[rinfo['build_id']] = 1
return dict([(b, remote.getBuild(b)) for b in builds])
def foo():
"""just experimenting...."""
binfo = remote.getBuild(args[0])
buildroots = remote_buildroots(binfo['id'])
if not buildroots:
#nothing we can do
return
build_idx = remote_br_builds(buildroots)
name_idx = {}
for binfo2 in build_idx.itervalues():
name_idx.setdefault(binfo2['name'], []).append(binfo2)
names = name_idx.keys()
missing = {}
found = {}
for name, builds in name_idx.iteritems():
if len(builds) > 1:
print "Warning: found multiple versions of %s: %s" % (name, builds)
#pick latest (by completion time)
order = [(b['completion_ts'], b) for b in builds]
order.sort()
build = order[-1][1]
else:
build = builds[0]
nvr = "%(name)s-%(version)s-%(release)s" % build
build.setdefault('nvr', nvr)
#see if our server has it
ours = session.getBuild(nvr)
if ours:
ours.setdefault('nvr', nvr)
found[name] = ours
else:
missing[name] = build
names = found.keys()
names.sort()
for name in names:
print "Found common build: %(nvr)s" % found[name]
names = missing.keys()
names.sort()
for name in names:
print "Missing remote build: %(nvr)s" % missing[name]
class TrackedBuild(object):
def __init__(self, build_id, child=None, tracker=None):
self.id = build_id
self.tracker = tracker
self.info = remote.getBuild(build_id)
self.nvr = "%(name)s-%(version)s-%(release)s" % self.info
self.children = {}
self.state = None
self.order = 0
if child is not None:
#children tracks the builds that were built using this one
self.children[child] = 1
#see if we have it
ours = session.getBuild(self.nvr)
self.rebuilt = False
if ours is not None:
state = koji.BUILD_STATES[ours['state']]
if state == 'COMPLETE':
self.setState("common")
if ours['task_id']:
self.rebuilt = True
return
elif state in ('FAILED', 'CANCELED'):
#treat these as having no build
pass
else:
# DELETED, BUILDING
self.setState("broken")
return
self.setState("missing")
self.getDeps() #sets deps, br_tag, base, order, (maybe state)
def setState(self, state):
#print "%s -> %s" % (self.nvr, state)
if state == self.state:
return
if self.state is not None and self.tracker:
del self.tracker.state_idx[self.state][self.id]
self.state = state
if self.tracker:
self.tracker.state_idx.setdefault(self.state, {})[self.id] = 1
def addChild(self, child):
self.children[child] = 1
def setExtraArchesFromRPMs(self, rpms=None):
if rpms is None:
rpms = remote.listRPMs(self.id)
arches = {}
for rpminfo in rpms:
arches.setdefault(rpminfo['arch'], 1)
self.extraArches = [a for a in arches if koji.canonArch(a) != a]
def getBuildroots(self):
"""Return a list of buildroots for remote build"""
rpms = remote.listRPMs(self.id)
#while we've got the rpm list, let's note the extra arches
#XXX - really should reorganize this a bit
self.setExtraArchesFromRPMs(rpms)
brs = {}
bad = []
for rinfo in rpms:
br_id = rinfo.get('buildroot_id')
if not br_id:
bad.append(rinfo)
continue
brs[br_id] = 1
if brs and bad:
print "Warning: some rpms for %s lacked buildroots:" % self.nvr
for rinfo in bad:
print " %(name)-%(version)-%(release).%(arch)" % rinfo
return brs.keys()
def getDeps(self):
buildroots = self.getBuildroots()
if not buildroots:
self.setState("noroot")
return
buildroots.sort()
self.order = buildroots[-1]
seen = {} #used to avoid scanning the same buildroot twice
builds = {} #track which builds we need for a rebuild
bases = {} #track base install for buildroots
tags = {} #track buildroot tag(s)
for br_id in buildroots:
if seen.has_key(br_id):
continue
seen[br_id] = 1
br_info = remote.getBuildroot(br_id, strict=True)
tags.setdefault(br_info['tag_name'], 0)
tags[br_info['tag_name']] += 1
#print "."
for rinfo in remote.listRPMs(componentBuildrootID=br_id):
builds[rinfo['build_id']] = 1
if not rinfo['is_update']:
bases.setdefault(rinfo['name'], {})[br_id] = 1
# we want to record the intersection of the base sets
# XXX - this makes some assumptions about homogeneity that, while reasonable,
# are not strictly required of the db.
# The only way I can think of to break this is if some significant tag/target
# changes happened during the build startup and some subtasks got the old
# repo and others the new one.
base = []
for name, brlist in bases.iteritems():
for br_id in buildroots:
if br_id not in brlist:
break
else:
#each buildroot had this as a base package
base.append(name)
if len(tags) > 1:
print "Warning: found multiple buildroot tags for %s: %s" % (self.nvr, tags.keys())
counts = [(n, tag) for tag, n in tags.iteritems()]
sort(counts)
tag = counts[-1][1]
else:
tag = tags.keys()[0]
self.deps = builds
self.br_tag = tag
self.base = base
class BuildTracker(object):
builds = {}
state_idx = {}
def rpmvercmp (self, (e1, v1, r1), (e2, v2, r2)):
"""find out which build is newer"""
rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
if rc == 1:
return "first"
elif rc == 0:
return "same"
else:
return "second"
def scanBuild(self, build_id, tag, from_build=None, depth=0):
"""Recursively scan a build and its dependencies"""
#print build_id
build = self.builds.get(build_id)
if build:
#already scanned
if from_build:
build.addChild(from_build.id)
return build
#otherwise...
child_id = None
if from_build:
child_id = from_build.id
build = TrackedBuild(build_id, child=child_id, tracker=self)
#print build.id, build.nvr
self.builds[build_id] = build
if len(self.builds) % 50 == 0:
self.report()
if from_build:
tail = " (from %s)" % from_build.nvr
else:
tail = ""
head = " " * depth
parentTask = remote.getBuild(int(build.id))
latestBuild = session.getLatestBuilds(tag, package=parentTask['package_name'])
if latestBuild:
parentevr = (str(parentTask['epoch']), parentTask['version'], parentTask['release'])
latestevr = (str(latestBuild[0]['epoch']), latestBuild[0]['version'], latestBuild[0]['release'])
newestRPM = self.rpmvercmp( parentevr, latestevr)
newBuild = remote.getBuild(latestBuild[0]['nvr'])
else:
# We get here when there is no build on the local hub
newestRPM = "first"
if newestRPM == "first":
if build.state == "common":
#we're good
if build.rebuilt:
print "%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail)
else:
print "%sCommon build %s%s" % (head, build.nvr, tail)
elif build.state == "noroot":
#we're fucked, so build with latest build root we have
#TODO: build with the latest buildroot
print "%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail)
#get src url
if parentTask['task_id'] is not None:
parentRequest = remote.getTaskRequest(parentTask['task_id'])
session.build(parentRequest[0], tag, parentRequest[2])
print "%sInfo: building %s%s"%(head, parentRequest[0], tail)
build.state = "broken"
else:
print "Error: unable to queue %s to build it was imported upstream" % parentTask['nvr']
elif build.state == "broken":
#also fucked
#TODO: find replacement package version
print "%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail)
elif build.state == "missing":
# check to see if we just import this from the remote host.
rpmfiles = remote.listRPMs(buildID=build.id, arches=options.importarches)
rpmname = remote.getBuild(build.id)['package_name']
# kernel is funky we should never just import it
# XXX: should this be a config of packages we dont import?
if rpmfiles and rpmname != "kernel":
print "%sInfo: Importing build %s%s" %(head, build.nvr, tail)
self.importBuild(build.id, tag, rpmfiles, rpmname, build.nvr )
build.state = "imported"
return build
# lets see if we have a newer build
#scan its deps
print "%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail)
for dep_id in build.deps:
for retry in xrange(10):
try:
self.scanBuild(dep_id, tag, from_build=build, depth=depth+1)
except (socket.timeout, socket.error):
print "retry"
continue
break
else:
print "Error: unable to scan dep: %i for %s" % (dep_id, build.nvr)
continue
elif newestRPM == "second":
# newBuild will be None when the build does not exist on the remote hub
if newBuild == None:
#if the newer build does not exist on the remote hub fill in the info from the localhub
# this should only ever haaaaappen during bootstrapping or if we build something on the
# local hub to make sure a fix works before building on the remote system
newBuild = session.getBuild(latestBuild[0]['nvr'])
build.id = newBuild['id']
build.tracker = None
build.info = newBuild
build.nvr = "%s" % newBuild['nvr']
build.children = {}
build.state = "common"
else:
build = TrackedBuild(newBuild['id'], child=None, tracker=self)
build.state = "common"
#self.builds.get(newBuild['task_id'])
print "%sNewer build %s%s" % (head, build.nvr, tail)
elif newestRPM == "same":
#we're good
if build.rebuilt:
print "%sCommon/Latest build (rebuilt) %s%s" % (head, build.nvr, tail)
else:
print "%sCommon/Latest build %s%s" % (head, build.nvr, tail)
return build
def importBuild(self, build_id, tag, rpmfiles, pkg, nvr):
'''import and tag a build from remote hub'''
fname = "%s-%s-%s.src.rpm" % (pkg, rpmfiles[0]['version'], rpmfiles[0]['release'])
url = "%s/%s/%s/%s/src/%s" % (options.pkgurl, pkg, rpmfiles[0]['version'], rpmfiles[0]['release'], fname)
print url
file = grabber.urlopen(url, text = "%s.%s" % (pkg, 'src'))
out = os.open(os.path.join(options.workpath, fname), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
try:
while 1:
buf = file.read(4096)
if not buf:
break
os.write(out, buf)
finally:
os.close(out)
file.close()
print 'Downloaded: %s' % fname
session.importRPM('kojisd', fname)
print 'Imported: %s' % fname
for rpm in rpmfiles:
fname = "%s-%s-%s.%s.rpm" % (rpm['name'], rpm['version'], rpm['release'], rpm['arch'])
url = "%s/%s/%s/%s/%s/%s" % (options.pkgurl, pkg, rpm['version'], rpm['release'], rpm['arch'], fname)
print url
file = grabber.urlopen(url, text = "%s.%s" % (rpm['name'], rpm['arch']))
out = os.open(os.path.join(options.workpath, fname), os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
try:
while 1:
buf = file.read(4096)
if not buf:
break
os.write(out, buf)
finally:
os.close(out)
file.close()
print 'Downloaded: %s' % fname
session.importRPM('kojisd', fname)
print 'Imported: %s' % fname
session.tagBuildBypass(tag, nvr)
print 'Tagged: %s' % nvr
def scanTag(self, tag):
"""Scan the latest builds in a remote tag"""
taginfo = remote.getTag(tag)
builds = remote.listTagged(taginfo['id'], latest=True)
for build in builds:
for retry in xrange(10):
try:
self.scanBuild(build['id'], tag)
if options.first_one:
return
except (socket.timeout, socket.error):
print "retry"
continue
break
else:
print "Error: unable to scan %(name)s-%(version)s-%(release)s" % build
continue
def scan(self):
"""Scan based on config file"""
to_scan = []
alltags = remote.listTags()
def rebuild(self, build):
"""Rebuild a remote build using closest possible buildroot"""
#first check that we can
deps = []
for build_id in build.deps:
dep = self.builds.get(build_id)
if not dep:
print "Missing dependency %i for %s. Not scanned?" % (build_id, build.nvr)
return
if dep.state != 'common':
print "Dependency missing for %s: %s (%s)" % (build.nvr, dep.nvr, dep.state)
return
deps.append(dep)
#check/create tag
our_tag = "SHADOWBUILD-%s" % build.br_tag
taginfo = session.getTag(our_tag)
parents = None
if not taginfo:
#XXX - not sure what is best here
#how do we pick arches? for now just assume all....
# config option for
#XXX this call for perms is stupid, but it's all we've got
perm_id = None
for data in session.getAllPerms():
if data['name'] == 'admin':
perm_id = data['id']
break
# use config option for arches
session.createTag(our_tag, perm=perm_id, arches='%s' % buildarches)
taginfo = session.getTag(our_tag, strict=True)
session.createBuildTarget(taginfo['name'], taginfo['id'], taginfo['id'])
else:
parents = session.getInheritanceData(taginfo['id'])
if parents:
print "Warning: shadow build tag has inheritance"
#check package list
pkgs = {}
for pkg in session.listPackages(tagID=taginfo['id']):
pkgs[pkg['package_name']] = pkg
missing_pkgs = []
for dep in deps:
name = dep.info['name']
if not pkgs.has_key(name):
#guess owner
owners = {}
for pkg in session.listPackages(pkgID=name):
owners.setdefault(pkg['owner_id'], []).append(pkg)
if owners:
order = [(len(v), k) for k, v in owners.iteritems()]
order.sort()
owner = order[-1][1]
else:
#just use ourselves
owner=session.getLoggedInUser()['id']
missing_pkgs.append((name, owner))
#check build list
cur_builds = {}
for binfo in session.listTagged(taginfo['id']):
#index by name in tagging order (latest first)
cur_builds.setdefault(binfo['name'], []).append(binfo)
to_untag = []
to_tag = []
for dep in deps:
#XXX - assuming here that there is only one dep per 'name'
# may want to check that this is true
cur_order = cur_builds.get(dep.info['name'], [])
tagged = False
for binfo in cur_order:
if binfo['nvr'] == dep.nvr:
tagged = True
#may not be latest now, but it will be after we do all the untagging
else:
# note that the untagging keeps older builds from piling up. In a sense
# we're gc-pruning this tag ourselves every pass.
to_untag.append(binfo)
if not tagged:
to_tag.append(dep)
drop_groups = []
build_group = None
for group in session.getTagGroups(taginfo['id']):
if group['name'] == 'build':
build_group = group
else:
# we should have no other groups but build
print "Warning: found stray group: %s" % group
drop_groups.append(group['name'])
if build_group:
#TODO - fix build group package list based on base of build to shadow
needed = dict([(n,1) for n in build.base])
current = dict([(p['package'],1) for p in build_group['packagelist']])
add_pkgs = [n for n in needed if not current.has_key(n)]
drop_pkgs = [n for n in current if not needed.has_key(n)]
#no group deps needed/allowed
drop_deps = [(g['name'], 1) for g in build_group['grouplist']]
if drop_deps:
print "Warning: build group had deps: %r" % build_group
else:
add_pkgs = build.base
drop_pkgs = []
drop_deps = []
#update package list, tagged packages, and groups in one multicall/transaction
#(avoid useless repo regens)
session.multicall = True
for name, owner in missing_pkgs:
session.packageListAdd(taginfo['id'], name, owner=owner)
for binfo in to_untag:
session.untagBuildBypass(taginfo['id'], binfo['id'])
for dep in to_tag:
session.tagBuildBypass(taginfo['id'], dep.nvr)
#shouldn't need force here
#set groups data
if not build_group:
# build group not present. add it
session.groupListAdd(taginfo['id'], 'build', force=True)
#using force in case group is blocked. This shouldn't be the case, but...
for pkg_name in drop_pkgs:
#in principal, our tag should not have inheritance, so the remove call is the right thing
session.groupPackageListRemove(taginfo['id'], 'build', pkg_name)
for pkg_name in add_pkgs:
session.groupPackageListAdd(taginfo['id'], 'build', pkg_name)
#we never add any blocks, so forcing shouldn't be required
#TODO - adjust extra_arches for package to build
#TODO - get event id to facilitate waiting on repo
# not sure if getLastEvent is good enough
# short of adding a new call, perhaps use getLastEvent together with event of
# current latest repo for tag
session.getLastEvent()
results = session.multiCall()
[event_id, event_ts] = results[-1]
#TODO - verify / check results ?
#TODO - call newRepo
#TODO - upload src
src = "" #XXX
#TODO - wait for repo
#TODO - kick off build
#task_id = session.build(src, taginfo['name'], ... ) #XXX
#TODO - add task/build to some sort of watch list
#TODO - post-build validation
def report(self):
print time.asctime()
print "%i builds" % len(self.builds)
states = self.state_idx.keys()
states.sort()
for s in states:
print "%s: %i" % (s, len(self.state_idx[s]))
def runRebuilds(self):
"""Rebuild missing builds"""
print "Determining rebuild order"
builds = [(b.order, b.id, b) for b in self.builds.itervalues()]
builds.sort()
b_avail = {}
ok = 0
bad = 0
for order, build_id, build in builds:
if build.state == 'common':
b_avail[build_id] = 1
elif build.state == 'missing':
#check deps
not_avail = [x for x in build.deps.iterkeys() if not b_avail.get(x)]
if not_avail:
print "Can't rebuild %s, missing %i deps" % (build.nvr, len(not_avail))
b_avail[build_id] = 0
bad += 1
for dep_id in not_avail:
dep = self.builds[dep_id]
avail = b_avail.get(dep_id)
if avail is None:
print " %s (out of order?)" % dep.nvr
elif not avail:
print " %s (%s)" % (dep.nvr, dep.state)
else:
ok += 1
print "rebuild: %s" % build.nvr
self.rebuild(build)
break #XXX
b_avail[build_id] = 1
else:
print "build: %s, state: %s, #children: %i" \
% (build.nvr, build.state, len(build.children))
b_avail[build_id] = 0
print "ok: %i, bad: %i" % (ok, bad)
def showOrder(self):
"""Show order of rebuilds (for debugging)
This is sort of a dress rehearsal for the rebuild scheduler
"""
print "Determining rebuild order"
builds = [(b.order, b.id, b) for b in self.builds.itervalues()]
#builds = self.builds.items() # (id, build)
builds.sort()
b_avail = {}
ok = 0
bad = 0
#for build_id, build in builds:
for order, build_id, build in builds:
if build.state == 'common':
b_avail[build_id] = 1
elif build.state == 'missing':
#for sanity, check deps
for dep_id in build.deps.iterkeys():
dep = self.builds[dep_id]
avail = b_avail.get(dep_id)
if avail is None:
print "Can't rebuild %s, missing %s (out of order?)" % (build.nvr, dep.nvr)
b_avail[build_id] = 0
bad += 1
break
elif not avail:
print "Can't rebuild %s, missing %s (%s)" % (build.nvr, dep.nvr, dep.state)
b_avail[build_id] = 0
bad += 1
break
else:
ok += 1
print "rebuild: %s" % build.nvr
b_avail[build_id] = 1
else:
print "build: %s, state: %s, #children: %i" \
% (build.nvr, build.state, len(build.children))
#show_children(build_id)
b_avail[build_id] = 0
print "ok: %i, bad: %i" % (ok, bad)
def bar():
tracker = BuildTracker()
#binfo = remote.getBuild(args[0], strict=True)
#tracker.scanBuild(binfo['id'])
if options.build:
binfo = remote.getBuild(options.build, strict=True)
tracker.scanBuild(binfo['id'])
else:
tracker.scanTag(args[0])
tracker.report()
tracker.showOrder()
tracker.runRebuilds()
if __name__ == "__main__":
options, args = get_options()
print options
session_opts = {}
for k in ('user', 'password', 'debug_xmlrpc', 'debug'):
session_opts[k] = getattr(options,k)
print options.server
session = koji.ClientSession(options.server, session_opts)
if not options.noauth:
activate_session(session)
#XXX - sane auth
#XXX - config!
remote = koji.ClientSession(options.remote, session_opts)
rv = 0
try:
rv = main(args)
if not rv:
rv = 0
except KeyboardInterrupt:
pass
except SystemExit:
rv = 1
#except:
# if options.debug:
# raise
# else:
# exctype, value = sys.exc_info()[:2]
# rv = 1
# print "%s: %s" % (exctype, value)
try:
session.logout()
except:
pass
sys.exit(rv)

55
util/kojisd.conf Normal file
View file

@ -0,0 +1,55 @@
[kojisd]
; For user/pass authentication
; user=kojisd
; password=kojisd
; For Kerberos authentication
; the principal to connect with
;principal=koji/repo@EXAMPLE.COM
; The location of the keytab for the principal above
;keytab=/etc/kojira.keytab
; The URL for the building koji hub server
server = http://sparc.koji.fedoraproject.org/kojihub
; The URL for the master koji hub server
remote = http://koji.fedoraproject.org/kojihub
; The directory containing the repos/ directory
;topdir = /mnt/koji
; Logfile
;logfile = /var/log/kojisad.log
; kojisd daemon user cert for secondary hub
clientcert = /etc/kojisd/fedora.cert
clientca = /etc/kojisad/fedora-upload-ca.cert
serverca = /etc/kojisad/fedora-server-ca.cert
; tags on primary we want to build
validtags = dist-f8 dist-f9 dist-f8-updates dist-f8-updates-candidate dist-f8-updates-testing
;validtags = ['dist-f8', 'dist-f9', 'dist-f8-updates', 'dist-f8-updates-candidate', 'dist-f8-updates-testing']
; tags from primary we do not want to build
;invalidtags =
; Arches we import directly from the master hub
importarches = noarch
; Arches we will build for
buildarches = 'sparcv9 sparc64'
; Work directory, where we download files to
;workpath = /mnt/koji/work/kojisd
pkgurl = http://koji.fedoraproject.org/packages
; user to run build as
;buildas =
;configuration for SSL athentication
;client certificate
cert = /etc/kojisd/kojisd_key_and_cert.pem
;certificate of the CA that issued the client certificate
ca = /etc/pki/tls/certs/extras_cacert.pem
;certificate of the CA that issued the HTTP server certificate
serverca = /etc/pki/tls/certs/extras_cacert.pem

85
util/kojisd.init Normal file
View file

@ -0,0 +1,85 @@
#! /bin/sh
#
# kojisd Start/Stop kojisd
#
# chkconfig: 345 99 99
# description: koji subscriber daemon
# processname: kojisd
# This is an interactive program, we need the current locale
# Source function library.
. /etc/init.d/functions
# Check that we're a priviledged user
[ `id -u` = 0 ] || exit 0
[ -f /etc/sysconfig/kojisd ] && . /etc/sysconfig/kojisd
prog="kojisd"
# Check that networking is up.
if [ "$NETWORKING" = "no" ]
then
exit 0
fi
[ -f /usr/sbin/kojisd ] || exit 0
RETVAL=0
start() {
echo -n $"Starting $prog: "
cd /
ARGS=""
[ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock"
[ "$KOJIRA_DEBUG" == "Y" ] && ARGS="$ARGS --debug"
[ "$KOJIRA_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose"
if [ -n "$RUNAS" -a "$RUNAS" != "root" ]; then
daemon --user "$RUNAS" /usr/sbin/kojisd $ARGS
else
daemon /usr/sbin/kojisd $ARGS
fi
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojisd
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc kojisd
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojisd
return $RETVAL
}
restart() {
stop
start
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart|reload)
restart
;;
condrestart)
[ -f /var/lock/subsys/kojisd ] && restart || :
;;
*)
echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}"
exit 1
esac
exit $?

View file

@ -85,7 +85,11 @@
#set $buildTarget = $params[2]
<strong>Recipients:</strong>&nbsp;$printValue('', $params[0])<br/>
<strong>Build:</strong> <a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a><br/>
#if $buildTarget
<strong>Build Target:</strong> <a href="buildtargetinfo?targetID=$buildTarget.id">$buildTarget.name</a><br/>
#else
<strong>Build Target:</strong> (no build target)<br/>
#end if
<strong>Web URL:</strong> <a href="$params[3]">$params[3]</a>
#elif $task.method == 'tagNotification'
<strong>Recipients:</strong>&nbsp;$printValue('', $params[0])<br/>