debian-koji/util/kojira
2018-07-11 17:37:40 -04:00

956 lines
37 KiB
Python
Executable file

#!/usr/bin/python2
# Koji Repository Administrator (kojira)
# Copyright (c) 2005-2014 Red Hat, Inc.
#
# Koji is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Mike McLean <mikem@redhat.com>
from __future__ import absolute_import
from __future__ import division
import sys
import os
import koji
from koji.util import rmtree, parseStatus, to_list
from optparse import OptionParser
from six.moves.configparser import ConfigParser
import errno
import logging
import logging.handlers
import pprint
import signal
import time
import threading
import traceback
import six
tag_cache = {}
def getTag(session, tag, event=None):
"""A caching version of the hub call"""
cache = tag_cache
now = time.time()
if (tag, event) in cache:
ts, info = cache[(tag,event)]
if now - ts < 600:
#use the cache
return info
info = session.getTag(tag, event=event)
if info:
cache[(info['id'], event)] = (now, info)
cache[(info['name'], event)] = (now, info)
return info
class ManagedRepo(object):
def __init__(self, manager, data):
self.manager = manager
self.session = manager.session
self.options = manager.options
self.logger = logging.getLogger("koji.repo")
self.current = True
self.repo_id = data['id']
self.event_id = data['create_event']
self.event_ts = data['create_ts']
self.tag_id = data['tag_id']
self.state = data['state']
if 'dist' in data:
self._dist = data['dist']
self.tag_name = data['tag_name']
self.expire_ts = None
if koji.REPO_STATES[self.state] in ['EXPIRED', 'DELETED', 'PROBLEM']:
self.current = False
self.expire_ts = time.time()
# TODO use hub data to find the actual expiration time
self.first_seen = time.time()
if self.current:
order = self.session.getFullInheritance(self.tag_id, event=self.event_id)
#order may contain same tag more than once
tags = {self.tag_id : 1}
for x in order:
tags[x['parent_id']] = 1
self.taglist = to_list(tags.keys())
@property
def dist(self):
# TODO: remove this indirection once we can rely on the hub to return
# dist field in getActiveRepos
if hasattr(self, '_dist'):
return self._dist
rinfo = self.session.repoInfo(self.repo_id)
self._dist = rinfo['dist']
def expire(self):
"""Mark the repo expired"""
if self.state == koji.REPO_EXPIRED:
return
elif self.state == koji.REPO_DELETED:
raise koji.GenericError("Repo already deleted")
self.logger.info("Expiring repo %s.." % self.repo_id)
self.session.repoExpire(self.repo_id)
self.state = koji.REPO_EXPIRED
def expired(self):
return self.state == koji.REPO_EXPIRED
def pending(self, timeout=180):
"""Determine if repo generation appears to be in progress and not already obsolete"""
if self.state != koji.REPO_INIT:
return False
age = time.time() - self.event_ts
return self.current and age < timeout
def stale(self):
"""Determine if repo seems stale
By stale, we mean:
- state=INIT
- timestamp really, really old
"""
timeout = 36000
#XXX - config
if self.state != koji.REPO_INIT:
return False
age = time.time() - max(self.event_ts, self.first_seen)
#the first_seen timestamp is also factored in because a repo can be
#created from an older event and should not be expired based solely on
#that event's timestamp.
return age > timeout
def tryDelete(self):
"""Remove the repo from disk, if possible"""
tag_info = getTag(self.session, self.tag_id)
if not tag_info:
tag_info = getTag(self.session, self.tag_id, self.event_id)
if not tag_info:
self.logger.warn('Could not get info for tag %i, skipping delete of repo %i' %
(self.tag_id, self.repo_id))
return False
tag_name = tag_info['name']
if self.dist:
path = pathinfo.distrepo(self.repo_id, tag_name)
lifetime = self.options.dist_repo_lifetime
else:
path = pathinfo.repo(self.repo_id, tag_name)
lifetime = self.options.deleted_repo_lifetime
# (should really be called expired_repo_lifetime)
try:
#also check dir age. We do this because a repo can be created from an older event
#and should not be removed based solely on that event's timestamp.
mtime = os.stat(path).st_mtime
except OSError as e:
if e.errno == 2:
# No such file or directory, so the repo either never existed,
# or has already been deleted, so allow it to be marked deleted.
self.logger.info("Repo directory does not exist: %s" % path)
pass
else:
self.logger.error("Can't stat repo directory: %s, %s" % (path, e.strerror))
return False
else:
times = [self.event_ts, mtime, self.first_seen, self.expire_ts]
times = [ts for ts in times if ts is not None]
age = time.time() - max(times)
self.logger.debug("Repo %s (%s) age: %i sec", self.repo_id, path, age)
if age < lifetime:
return False
self.logger.debug("Attempting to delete repo %s.." % self.repo_id)
if self.state != koji.REPO_EXPIRED:
raise koji.GenericError("Repo not expired")
if self.session.repoDelete(self.repo_id) > 0:
#cannot delete, we are referenced by a buildroot
self.logger.debug("Cannot delete repo %s, still referenced" % self.repo_id)
return False
self.logger.info("Deleted repo %s" % self.repo_id)
self.state = koji.REPO_DELETED
self.manager.rmtree(path)
return True
def ready(self):
return self.state == koji.REPO_READY
def deleted(self):
return self.state == koji.REPO_DELETED
def problem(self):
return self.state == koji.REPO_PROBLEM
class RepoManager(object):
def __init__(self, options, session):
self.options = options
self._local = threading.local()
self._local.session = session
self.repos = {}
self.tasks = {}
self.other_tasks = {}
self.needed_tags = {}
self.tag_use_stats = {}
self.delete_pids = {}
self.delete_queue = []
self.logger = logging.getLogger("koji.repo.manager")
@property
def session(self):
# session is stored in our threadlocal instance
return self._local.session
@session.setter
def session(self, value):
self._local.session = value
def printState(self):
self.logger.debug('Tracking %i repos, %i child processes', len(self.repos), len(self.delete_pids))
for tag_id, task_id in six.iteritems(self.tasks):
self.logger.debug("Tracking task %s for tag %s", task_id, tag_id)
for pid, desc in six.iteritems(self.delete_pids):
self.logger.debug("Delete job %s: %r", pid, desc)
def rmtree(self, path):
"""Spawn (or queue) and rmtree job"""
self.logger.info("Queuing rmtree job for %s", path)
self.delete_queue.append(path)
self.checkQueue()
def checkQueue(self):
finished = [pid for pid in self.delete_pids if self.waitPid(pid)]
for pid in finished:
path = self.delete_pids[pid]
self.logger.info("Completed rmtree job for %s", path)
del self.delete_pids[pid]
while self.delete_queue and len(self.delete_pids) <= self.options.max_delete_processes:
path = self.delete_queue.pop(0)
pid = self._rmtree(path)
self.logger.info("Started rmtree (pid %i) for %s", pid, path)
self.delete_pids[pid] = path
def waitPid(self, pid):
# XXX - can we unify with TaskManager?
prefix = "pid %i (%s)" % (pid, self.delete_pids.get(pid))
try:
(childpid, status) = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno != errno.ECHILD:
#should not happen
raise
#otherwise assume the process is gone
self.logger.info("%s: %s" % (prefix, e))
return True
if childpid != 0:
self.logger.info(parseStatus(status, prefix))
return True
return False
def _rmtree(self, path):
pid = os.fork()
if pid:
return pid
# no return
try:
status = 1
self.session._forget()
try:
rmtree(path)
status = 0
except Exception:
logger.error(''.join(traceback.format_exception(*sys.exc_info())))
logging.shutdown()
finally:
os._exit(status)
def killChildren(self):
# XXX - unify with TaskManager?
sig = signal.SIGTERM
for pid in self.delete_pids:
try:
os.kill(pid, sig)
except OSError as e:
if e.errno != errno.ESRCH:
logger.error("Unable to kill process %s", pid)
def readCurrentRepos(self):
self.logger.debug("Reading current repo data")
repodata = self.session.getActiveRepos()
self.logger.debug("Repo data: %r" % repodata)
for data in repodata:
repo_id = data['id']
repo = self.repos.get(repo_id)
if repo:
#we're already tracking it
if repo.state != data['state']:
self.logger.info('State changed for repo %s: %s -> %s'
%(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))
repo.state = data['state']
else:
self.logger.info('Found repo %s, state=%s'
%(repo_id, koji.REPO_STATES[data['state']]))
self.repos[repo_id] = ManagedRepo(self, data)
if len(self.repos) > len(repodata):
# This shouldn't normally happen, but might if someone else calls
# repoDelete or similar
active = set([r['id'] for r in repodata])
for repo_id in self.repos.keys():
if repo_id not in active:
self.logger.info('Dropping entry for inactive repo: %s', repo_id)
del self.repos[repo_id]
def checkCurrentRepos(self):
"""Determine which repos are current"""
to_check = []
repo_ids = to_list(self.repos.keys())
for repo_id in repo_ids:
repo = self.repos.get(repo_id)
if repo is None:
# removed by main thread
continue
if not repo.current:
# no point in checking again
continue
if repo.state not in (koji.REPO_READY, koji.REPO_INIT):
repo.current = False
if repo.expire_ts is None:
repo.expire_ts = time.time()
#also no point in further checking
continue
to_check.append(repo)
if self.logger.isEnabledFor(logging.DEBUG):
skipped = set(repo_ids).difference([r.repo_id for r in to_check])
self.logger.debug("Skipped check for repos: %r", skipped)
if not to_check:
return
for repo in to_check:
changed = self.session.tagChangedSinceEvent(repo.event_id, repo.taglist)
if changed:
self.logger.info("Repo %i no longer current", repo.repo_id)
repo.current = False
repo.expire_ts = time.time()
def currencyChecker(self, session):
"""Continually checks repos for currency. Runs as a separate thread"""
self.session = session
self.logger = logging.getLogger("koji.repo.currency")
self.logger.info('currencyChecker starting')
try:
while True:
self.checkCurrentRepos()
time.sleep(self.options.sleeptime)
except:
self.logger.exception('Error in currency checker thread')
raise
finally:
session.logout()
def regenLoop(self, session):
"""Triggers regens as needed/possible. Runs in a separate thread"""
self.session = session
self.logger = logging.getLogger("koji.repo.regen")
self.logger.info('regenLoop starting')
try:
while True:
self.regenRepos()
time.sleep(self.options.sleeptime)
except:
self.logger.exception('Error in regen thread')
raise
finally:
session.logout()
def pruneLocalRepos(self, topdir, timername):
"""Scan filesystem for repos and remove any deleted ones
Also, warn about any oddities"""
if self.delete_pids:
#skip
return
if not os.path.exists(topdir):
self.logger.warn("%s doesn't exist, skipping", topdir)
return
if os.path.isfile(topdir):
self.logger.warn("%s is not directory, skipping", topdir)
return
self.logger.debug("Scanning %s for repos", topdir)
self.logger.debug('max age allowed: %s seconds (from %s)',
getattr(self.options, timername), timername)
for tag in os.listdir(topdir):
tagdir = "%s/%s" % (topdir, tag)
if not os.path.isdir(tagdir):
self.logger.debug("%s is not a directory, skipping", tagdir)
continue
for repo_id in os.listdir(tagdir):
if repo_id == 'latest':
# ignore latest symlinks
continue
try:
repo_id = int(repo_id)
except ValueError:
self.logger.debug("%s/%s not an int, skipping", tagdir, repo_id)
continue
repodir = "%s/%s" % (tagdir, repo_id)
if not os.path.isdir(repodir):
self.logger.debug("%s not a directory, skipping", repodir)
continue
if repo_id in self.repos:
#we're already managing it, no need to deal with it here
continue
try:
dir_ts = os.stat(repodir).st_mtime
except OSError:
#just in case something deletes the repo out from under us
self.logger.debug("%s deleted already?!", repodir)
continue
rinfo = self.session.repoInfo(repo_id)
if rinfo is None:
if not self.options.ignore_stray_repos:
age = time.time() - dir_ts
self.logger.debug("did not expect %s; age: %s",
repodir, age)
if age > getattr(self.options, timername):
self.logger.info("Removing unexpected directory (no such repo): %s", repodir)
self.rmtree(repodir)
continue
if rinfo['tag_name'] != tag:
self.logger.warn("Tag name mismatch (rename?): %s vs %s", tag, rinfo['tag_name'])
continue
if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):
age = time.time() - max(rinfo['create_ts'], dir_ts)
self.logger.debug("potential removal candidate: %s; age: %s" % (repodir, age))
if age > getattr(self.options, timername):
logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))
self.rmtree(repodir)
def tagUseStats(self, tag_id):
stats = self.tag_use_stats.get(tag_id)
now = time.time()
if stats and now - stats['ts'] < 3600:
#use the cache
return stats
data = self.session.listBuildroots(tagID=tag_id,
queryOpts={'order': '-create_event_id', 'limit' : 100})
#XXX magic number (limit)
if data:
tag_name = data[0]['tag_name']
else:
tag_name = "#%i" % tag_id
stats = {'data': data, 'ts': now, 'tag_name': tag_name}
recent = [x for x in data if now - x['create_ts'] < 3600 * 24]
#XXX magic number
stats ['n_recent'] = len(recent)
self.tag_use_stats[tag_id] = stats
self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent)))
return stats
def setTagScore(self, entry):
"""Set score for needed_tag entry
We score the tags by two factors
- age of current repo
- last use in a buildroot
Having an older repo or a higher use count gives the tag a higher
priority for regen. The formula attempts to keep the last use factor
from overpowering, so that tags with very old repos still get priority
"""
stats = self.tagUseStats(entry['taginfo']['id'])
# normalize use count
max_n = max([t.get('n_recent', 0) for t in self.needed_tags.values()]
or [1])
if max_n == 0:
# no recent use or missing data
max_n = 1
adj = stats['n_recent'] * 9.0 // max_n + 1 # 1.0 to 10.0
ts = entry['expire_ts']
age = time.time() - ts
# XXX - need to make sure our times aren't far off, otherwise this
# scoring could have the opposite of the desired effect
if age < 0:
self.logger.warning("Needed tag has future expire_ts: %r", entry)
age = 0
entry['score'] = age * adj
self.logger.debug("Needed tag %s got score %.2f",
entry['taginfo']['name'], entry['score'])
# so a day old unused repo gets about the regen same score as a
# 2.4-hour-old, very popular repo
def updateTagScores(self):
for entry in self.needed_tags.values():
self.setTagScore(entry)
def updateRepos(self):
self.checkTasks()
self.logger.debug("Current tasks: %r" % self.tasks)
if self.other_tasks:
self.logger.debug("Found %i untracked newRepo tasks",
len(self.other_tasks))
self.logger.debug("Updating repos")
self.readCurrentRepos()
# check for stale repos
for repo in self.repos.values():
if repo.stale():
repo.expire()
# find out which tags require repos
self.checkNeeded()
self.updateTagScores()
# trigger deletes
n_deletes = 0
for repo in self.repos.values():
if n_deletes >= self.options.delete_batch_size:
break
if repo.expired():
#try to delete
if repo.tryDelete():
n_deletes += 1
del self.repos[repo.repo_id]
def checkTasks(self):
"""Check on newRepo tasks
- update taskinfo
- remove finished tasks
- check for other newRepo tasks (not generated by us)
"""
task_ids = list(self.tasks)
self.session.multicall = True
for task_id in task_ids:
self.session.getTaskInfo(task_id)
for task_id, [tinfo] in zip(task_ids, self.session.multiCall(strict=True)):
tstate = koji.TASK_STATES[tinfo['state']]
tag_id = self.tasks[task_id]['tag_id']
if tstate == 'CLOSED':
self.logger.info("Finished: newRepo task %s for tag %s" % (task_id, tag_id))
del self.tasks[task_id]
elif tstate in ('CANCELED', 'FAILED'):
self.logger.info("Problem: newRepo task %s for tag %s is %s" % (task_id, tag_id, tstate))
del self.tasks[task_id]
else:
self.tasks[task_id]['taskinfo'] = tinfo
# TODO: implement a timeout
# also check other newRepo tasks
repo_tasks = self.session.listTasks(opts={'method':'newRepo',
'state':([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])})
others = [t for t in repo_tasks if t['id'] not in self.tasks]
for tinfo in others:
if tinfo['id'] not in self.other_tasks:
self.logger.info("Untracked newRepo task: %(id)i", tinfo)
# note: possible race here, but only a log message
# TODO - determine tag and maven support
self.other_tasks = dict([(t['id'], t) for t in others])
def checkNeeded(self):
"""Determine which tags currently need regeneration"""
n_need = len(self.needed_tags)
ignore = self.options.ignore_tags.split()
self.build_tags = set([
t['build_tag'] for t in self.session.getBuildTargets()
if not koji.util.multi_fnmatch(t['build_tag_name'], ignore)
])
#index repos by tag
tag_repos = {}
for repo in self.repos.values():
tag_repos.setdefault(repo.tag_id, []).append(repo)
for tag_id in self.build_tags:
covered = False
for repo in tag_repos.get(tag_id, []):
if repo.current:
covered = True
break
elif repo.pending():
# one on the way
covered = True
break
if tag_id in self.needed_tags:
entry = self.needed_tags[tag_id]
if covered:
# no longer needed
self.logger.info("Tag %(name)s has a current or in "
"progress repo", entry['taginfo'])
del self.needed_tags[tag_id]
# if not covered, we already know
continue
if covered:
continue
# we haven't noted this need yet
taginfo = self.session.getTag(tag_id)
# (not using the caching version since we only call upon discovery)
if not taginfo:
self.logger.warning('Tag disappeared: %i', tag_id)
continue
self.logger.info('Tag needs regen: %(name)s', taginfo)
# how expired are we?
ts = 0
for repo in tag_repos.get(tag_id, []):
if repo.expire_ts:
if repo.expire_ts > ts:
ts = repo.expire_ts
else:
self.logger.warning("No expire timestamp for repo: %s", repo.repo_id)
if ts == 0:
ts = time.time()
entry = {
'taginfo': taginfo,
'expire_ts': ts,
'needed_since' : time.time(),
}
self.setTagScore(entry)
self.needed_tags[tag_id] = entry
# some cleanup
for tag_id in list(self.needed_tags):
entry = self.needed_tags.get(tag_id)
if tag_id not in self.build_tags:
self.logger.info("Tag %(name)s is no longer a build tag",
entry['taginfo'])
del self.needed_tags[tag_id]
for tag_id, repolist in tag_repos.items():
if tag_id not in self.build_tags:
# repos for these tags are no longer required
for repo in repolist:
if repo.ready():
repo.expire()
if n_need != len(self.needed_tags):
self.logger.info('Needed tags count went from %i to %i', n_need,
len(self.needed_tags))
def regenRepos(self):
"""Trigger newRepo tasks for needed tags"""
# first note currently running tasks
running_tasks = 0
running_tasks_maven = 0
for task in self.tasks.values():
if task['taskinfo']['waiting']:
self.logger.debug("Task %(id)i is waiting", task)
else:
# The largest hub impact is from the first part of the newRepo
# task. Once it is waiting on subtasks, that part is over
running_tasks += 1
if task['maven']:
running_tasks_maven += 1
debuginfo_pat = self.options.debuginfo_tags.split()
src_pat = self.options.source_tags.split()
order = sorted(self.needed_tags.values(), key=lambda t: t['score'])
for tag in order:
if running_tasks >= self.options.max_repo_tasks:
self.logger.info("Maximum number of repo tasks reached")
return
elif (len(self.tasks) + len(self.other_tasks)
>= self.options.repo_tasks_limit):
self.logger.info("Repo task limit reached")
return
tagname = tag['taginfo']['name']
task_id = tag.get('task_id')
if task_id:
if task_id in self.tasks:
# we already have a task
continue
else:
# should not happen
logger.warning('Needed tag refers to unknown task. '
'%s -> %i', tagname, task_id)
taskopts = {}
if koji.util.multi_fnmatch(tagname, debuginfo_pat):
taskopts['debuginfo'] = True
if koji.util.multi_fnmatch(tagname, src_pat):
taskopts['src'] = True
maven = tag['taginfo']['maven_support']
if maven:
if running_tasks_maven >= self.options.max_repo_tasks_maven:
continue
task_id = self.session.newRepo(tagname, **taskopts)
running_tasks += 1
if maven:
running_tasks_maven += 1
expire_ts = tag['expire_ts']
if expire_ts == 0: # can this still happen?
time_expired = '???'
else:
time_expired = "%.1f" % (time.time() - expire_ts)
self.logger.info("Created newRepo task %s for tag %s (%s), "
"expired for %s sec", task_id, tag['taginfo']['id'],
tag['taginfo']['name'], time_expired)
self.tasks[task_id] = {
'taskinfo': self.session.getTaskInfo(task_id),
'tag_id': tag['taginfo']['id'],
'maven': maven,
}
tag['task_id'] = task_id
if running_tasks_maven >= self.options.max_repo_tasks_maven:
self.logger.info("Maximum number of maven repo tasks reached")
def start_currency_checker(session, repomgr):
subsession = session.subsession()
thread = threading.Thread(name='currencyChecker',
target=repomgr.currencyChecker, args=(subsession,))
thread.setDaemon(True)
thread.start()
return thread
def start_regen_loop(session, repomgr):
subsession = session.subsession()
thread = threading.Thread(name='regenLoop',
target=repomgr.regenLoop, args=(subsession,))
thread.setDaemon(True)
thread.start()
return thread
def main(options, session):
repomgr = RepoManager(options, session)
repomgr.readCurrentRepos()
def shutdown(*args):
raise SystemExit
signal.signal(signal.SIGTERM,shutdown)
curr_chk_thread = start_currency_checker(session, repomgr)
regen_thread = start_regen_loop(session, repomgr)
# TODO also move rmtree jobs to threads
logger.info("Entering main loop")
repodir = "%s/repos" % pathinfo.topdir
distrepodir = "%s/repos-dist" % pathinfo.topdir
while True:
try:
repomgr.updateRepos()
repomgr.checkQueue()
repomgr.printState()
repomgr.pruneLocalRepos(repodir, 'deleted_repo_lifetime')
repomgr.pruneLocalRepos(distrepodir, 'dist_repo_lifetime')
if not curr_chk_thread.isAlive():
logger.error("Currency checker thread died. Restarting it.")
curr_chk_thread = start_currency_checker(session, repomgr)
if not regen_thread.isAlive():
logger.error("Currency checker thread died. Restarting it.")
regen_thread = start_regen_loop(session, repomgr)
except KeyboardInterrupt:
logger.warn("User exit")
break
except koji.AuthExpired:
logger.warn("Session expired")
break
except SystemExit:
logger.warn("Shutting down")
break
except:
# log the exception and continue
logger.error(''.join(traceback.format_exception(*sys.exc_info())))
try:
time.sleep(options.sleeptime)
except KeyboardInterrupt:
logger.warn("User exit")
break
try:
repomgr.checkQueue()
repomgr.killChildren()
finally:
session.logout()
def get_options():
"""process options from command line and config file"""
# parse command line args
parser = OptionParser("usage: %prog [opts]")
parser.add_option("-c", "--config", dest="configFile",
help="use alternate configuration file", metavar="FILE",
default="/etc/kojira/kojira.conf")
parser.add_option("--user", help="specify user")
parser.add_option("--password", help="specify password")
parser.add_option("--principal", help="Kerberos principal")
parser.add_option("--keytab", help="Kerberos keytab")
parser.add_option("-f", "--fg", dest="daemon",
action="store_false", default=True,
help="run in foreground")
parser.add_option("-d", "--debug", action="store_true",
help="show debug output")
parser.add_option("-q", "--quiet", action="store_true",
help="don't show warnings")
parser.add_option("-v", "--verbose", action="store_true",
help="show verbose output")
parser.add_option("--with-src", action="store_true",
help="include srpms in repos")
parser.add_option("--force-lock", action="store_true", default=False,
help="force lock for exclusive session")
parser.add_option("--debug-xmlrpc", action="store_true", default=False,
help="show xmlrpc debug output")
parser.add_option("--skip-main", action="store_true", default=False,
help="don't actually run main")
parser.add_option("--show-config", action="store_true", default=False,
help="Show config and exit")
parser.add_option("--sleeptime", type='int', help="Specify the polling interval")
parser.add_option("-s", "--server", help="URL of XMLRPC server")
parser.add_option("--topdir", help="Specify topdir")
parser.add_option("--logfile", help="Specify logfile")
(options, args) = parser.parse_args()
config = ConfigParser()
config.read(options.configFile)
section = 'kojira'
for x in config.sections():
if x != section:
quit('invalid section found in config file: %s' % x)
defaults = {'with_src': False,
'debuginfo_tags': '',
'source_tags': '',
'ignore_tags': '',
'verbose': False,
'debug': False,
'ignore_stray_repos': False,
'topdir': '/mnt/koji',
'server': None,
'logfile': '/var/log/kojira.log',
'principal': None,
'keytab': None,
'ccache': '/var/tmp/kojira.ccache',
'krbservice': 'host',
'krb_rdns': True,
'krb_canon_host': False,
'retry_interval': 60,
'max_retries': 120,
'offline_retry': True,
'offline_retry_interval': 120,
'no_ssl_verify': False,
'max_delete_processes': 4,
'max_repo_tasks' : 4,
'max_repo_tasks_maven' : 2,
'repo_tasks_limit' : 10,
'delete_batch_size' : 3,
'deleted_repo_lifetime': 7*24*3600,
#XXX should really be called expired_repo_lifetime
'dist_repo_lifetime': 7*24*3600,
'sleeptime' : 15,
'cert': None,
'ca': '', # FIXME: unused, remove in next major release
'serverca': None,
}
if config.has_section(section):
int_opts = ('deleted_repo_lifetime', 'max_repo_tasks', 'repo_tasks_limit',
'retry_interval', 'max_retries', 'offline_retry_interval',
'max_delete_processes', 'max_repo_tasks_maven',
'delete_batch_size', 'dist_repo_lifetime', 'sleeptime')
str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',
'cert', 'ca', 'serverca', 'debuginfo_tags',
'source_tags', 'ignore_tags') # FIXME: remove ca here
bool_opts = ('with_src','verbose','debug','ignore_stray_repos', 'offline_retry',
'krb_rdns', 'krb_canon_host', 'no_ssl_verify')
for name in config.options(section):
if name in int_opts:
defaults[name] = config.getint(section, name)
elif name in str_opts:
defaults[name] = config.get(section, name)
elif name in bool_opts:
defaults[name] = config.getboolean(section, name)
else:
quit("unknown config option: %s" % name)
for name, value in defaults.items():
if getattr(options, name, None) is None:
setattr(options, name, value)
if options.logfile in ('','None','none'):
options.logfile = None
# special handling for cert defaults
cert_defaults = {
'cert': '/etc/kojira/client.crt',
'serverca': '/etc/kojira/serverca.crt',
}
for name in cert_defaults:
if getattr(options, name, None) is None:
fn = cert_defaults[name]
if os.path.exists(fn):
setattr(options, name, fn)
return options
def quit(msg=None, code=1):
if msg:
logging.getLogger("koji.repo").error(msg)
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(code)
if __name__ == "__main__":
options = get_options()
topdir = getattr(options,'topdir',None)
pathinfo = koji.PathInfo(topdir)
if options.show_config:
pprint.pprint(options.__dict__)
sys.exit()
if options.logfile:
if not os.path.exists(options.logfile):
try:
logfile = open(options.logfile, "w")
logfile.close()
except:
sys.stderr.write("Cannot create logfile: %s\n" % options.logfile)
sys.exit(1)
if not os.access(options.logfile,os.W_OK):
sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile)
sys.exit(1)
koji.add_file_logger("koji", options.logfile)
#note we're setting logging for koji.*
logger = logging.getLogger("koji")
if options.debug:
logger.setLevel(logging.DEBUG)
elif options.verbose:
logger.setLevel(logging.INFO)
elif options.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.WARNING)
session_opts = koji.grab_session_options(options)
session = koji.ClientSession(options.server,session_opts)
if options.cert is not None and os.path.isfile(options.cert):
# authenticate using SSL client certificates
session.ssl_login(options.cert, None, options.serverca)
elif options.user:
# authenticate using user/password
session.login()
elif koji.krbV and options.principal and options.keytab:
session.krb_login(options.principal, options.keytab, options.ccache)
#get an exclusive session
try:
session.exclusiveSession(force=options.force_lock)
except koji.AuthLockError:
quit("Error: Unable to get lock. Trying using --force-lock")
if not session.logged_in:
quit("Error: Unknown login error")
if not session.logged_in:
print("Error: unable to log in")
sys.exit(1)
if options.skip_main:
sys.exit()
elif options.daemon:
koji.daemonize()
else:
koji.add_stderr_logger("koji")
main(options, session)