flake8: apply E2 rules except E266

This commit is contained in:
Yuming Zhu 2020-02-26 02:25:24 +08:00
parent ce1f9928af
commit 05340b146b
28 changed files with 707 additions and 706 deletions

View file

@ -1,5 +1,6 @@
[flake8] [flake8]
select = I,C,F,E1,E265 select = I,C,F,E1,E2
ignore = E266
exclude = exclude =
.git, .git,
__pycache__, __pycache__,

View file

@ -105,7 +105,7 @@ try:
import pykickstart.parser as ksparser import pykickstart.parser as ksparser
import pykickstart.handlers.control as kscontrol import pykickstart.handlers.control as kscontrol
import pykickstart.errors as kserrors import pykickstart.errors as kserrors
import iso9660 # from pycdio import iso9660 # from pycdio
image_enabled = True image_enabled = True
except ImportError: # pragma: no cover except ImportError: # pragma: no cover
image_enabled = False image_enabled = False
@ -145,15 +145,15 @@ def main(options, session):
def restart(*args): def restart(*args):
logger.warn("Initiating graceful restart") logger.warn("Initiating graceful restart")
tm.restart_pending = True tm.restart_pending = True
signal.signal(signal.SIGTERM,shutdown) signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGUSR1,restart) signal.signal(signal.SIGUSR1, restart)
while 1: while 1:
try: try:
taken = False taken = False
tm.updateBuildroots() tm.updateBuildroots()
tm.updateTasks() tm.updateTasks()
taken = tm.getNextTask() taken = tm.getNextTask()
except (SystemExit,ServerExit,KeyboardInterrupt): except (SystemExit, ServerExit, KeyboardInterrupt):
logger.warn("Exiting") logger.warn("Exiting")
break break
except ServerRestart: except ServerRestart:
@ -174,7 +174,7 @@ def main(options, session):
# The load-balancing code in getNextTask() will prevent a single builder # The load-balancing code in getNextTask() will prevent a single builder
# from getting overloaded. # from getting overloaded.
time.sleep(options.sleeptime) time.sleep(options.sleeptime)
except (SystemExit,KeyboardInterrupt): except (SystemExit, KeyboardInterrupt):
logger.warn("Exiting") logger.warn("Exiting")
break break
logger.warn("Shutting down, please wait...") logger.warn("Shutting down, please wait...")
@ -185,15 +185,15 @@ def main(options, session):
class BuildRoot(object): class BuildRoot(object):
def __init__(self,session,options,*args,**kwargs): def __init__(self, session, options, *args, **kwargs):
self.logger = logging.getLogger("koji.build.buildroot") self.logger = logging.getLogger("koji.build.buildroot")
self.session = session self.session = session
self.options = options self.options = options
if len(args) + len(kwargs) == 1: if len(args) + len(kwargs) == 1:
# manage an existing mock buildroot # manage an existing mock buildroot
self._load(*args,**kwargs) self._load(*args, **kwargs)
else: else:
self._new(*args,**kwargs) self._new(*args, **kwargs)
def _load(self, data): def _load(self, data):
# manage an existing buildroot # manage an existing buildroot
@ -263,7 +263,7 @@ class BuildRoot(object):
def _writeMockConfig(self): def _writeMockConfig(self):
# mock config # mock config
configdir = '/etc/mock/koji' configdir = '/etc/mock/koji'
configfile = "%s/%s.cfg" % (configdir,self.name) configfile = "%s/%s.cfg" % (configdir, self.name)
self.mockcfg = "koji/%s" % self.name self.mockcfg = "koji/%s" % self.name
opts = {} opts = {}
@ -296,7 +296,7 @@ class BuildRoot(object):
output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts) output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts)
# write config # write config
with open(configfile,'w') as fo: with open(configfile, 'w') as fo:
fo.write(output) fo.write(output)
def _repositoryEntries(self, pi, plugin=False): def _repositoryEntries(self, pi, plugin=False):
@ -400,7 +400,7 @@ class BuildRoot(object):
def mock(self, args): def mock(self, args):
"""Run mock""" """Run mock"""
mockpath = getattr(self.options,"mockpath","/usr/bin/mock") mockpath = getattr(self.options, "mockpath", "/usr/bin/mock")
cmd = [mockpath, "-r", self.mockcfg] cmd = [mockpath, "-r", self.mockcfg]
# if self.options.debug_mock: # if self.options.debug_mock:
# cmd.append('--debug') # cmd.append('--debug')
@ -520,13 +520,13 @@ class BuildRoot(object):
fd = os.open(outfile, flags, 0o666) fd = os.open(outfile, flags, 0o666)
os.dup2(fd, 1) os.dup2(fd, 1)
os.dup2(fd, 2) os.dup2(fd, 2)
if os.getuid() == 0 and hasattr(self.options,"mockuser"): if os.getuid() == 0 and hasattr(self.options, "mockuser"):
self.logger.info('Running mock as %s' % self.options.mockuser) self.logger.info('Running mock as %s' % self.options.mockuser)
uid,gid = pwd.getpwnam(self.options.mockuser)[2:4] uid, gid = pwd.getpwnam(self.options.mockuser)[2:4]
os.setgroups([grp.getgrnam('mock')[2]]) os.setgroups([grp.getgrnam('mock')[2]])
os.setregid(gid,gid) os.setregid(gid, gid)
os.setreuid(uid,uid) os.setreuid(uid, uid)
os.execvp(cmd[0],cmd) os.execvp(cmd[0], cmd)
except: except:
# diediedie # diediedie
print("Failed to exec mock") print("Failed to exec mock")
@ -546,7 +546,7 @@ class BuildRoot(object):
raise koji.BuildrootError("could not init mock buildroot, %s" % self._mockResult(rv)) raise koji.BuildrootError("could not init mock buildroot, %s" % self._mockResult(rv))
# log kernel version # log kernel version
self.mock(['--chroot', 'uname -r']) self.mock(['--chroot', 'uname -r'])
self.session.host.setBuildRootList(self.id,self.getPackageList()) self.session.host.setBuildRootList(self.id, self.getPackageList())
def _mockResult(self, rv, logfile=None): def _mockResult(self, rv, logfile=None):
if logfile: if logfile:
@ -559,7 +559,7 @@ class BuildRoot(object):
return parseStatus(rv, 'mock') + msg return parseStatus(rv, 'mock') + msg
def rebuild_srpm(self, srpm): def rebuild_srpm(self, srpm):
self.session.host.setBuildRootState(self.id,'BUILDING') self.session.host.setBuildRootState(self.id, 'BUILDING')
# unpack SRPM to tempdir # unpack SRPM to tempdir
srpm_dir = os.path.join(self.tmpdir(), 'srpm_unpacked') srpm_dir = os.path.join(self.tmpdir(), 'srpm_unpacked')
@ -592,7 +592,7 @@ class BuildRoot(object):
def build_srpm(self, specfile, sourcedir, source_cmd): def build_srpm(self, specfile, sourcedir, source_cmd):
self.session.host.setBuildRootState(self.id,'BUILDING') self.session.host.setBuildRootState(self.id, 'BUILDING')
if source_cmd: if source_cmd:
# call the command defined by source_cmd in the chroot so any required files not stored in # call the command defined by source_cmd in the chroot so any required files not stored in
# the SCM can be retrieved # the SCM can be retrieved
@ -619,16 +619,16 @@ class BuildRoot(object):
self.expire() self.expire()
raise koji.BuildError("error building srpm, %s" % self._mockResult(rv)) raise koji.BuildError("error building srpm, %s" % self._mockResult(rv))
def build(self,srpm,arch=None): def build(self, srpm, arch=None):
# run build # run build
self.session.host.setBuildRootState(self.id,'BUILDING') self.session.host.setBuildRootState(self.id, 'BUILDING')
args = ['--no-clean'] args = ['--no-clean']
if arch: if arch:
args.extend(['--target', arch]) args.extend(['--target', arch])
args.extend(['--rebuild', srpm]) args.extend(['--rebuild', srpm])
rv = self.mock(args) rv = self.mock(args)
self.session.host.updateBuildRootList(self.id,self.getPackageList()) self.session.host.updateBuildRootList(self.id, self.getPackageList())
if rv: if rv:
self.expire() self.expire()
raise koji.BuildError("error building package (arch %s), %s" % (arch, self._mockResult(rv))) raise koji.BuildError("error building package (arch %s), %s" % (arch, self._mockResult(rv)))
@ -759,7 +759,7 @@ class BuildRoot(object):
pathinfo = koji.PathInfo(topdir='') pathinfo = koji.PathInfo(topdir='')
repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name']) repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name'])
opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')]) opts = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
opts['tempdir'] = self.options.workdir opts['tempdir'] = self.options.workdir
# prefer librepo # prefer librepo
@ -792,8 +792,8 @@ class BuildRoot(object):
repodata = repoMDObject.RepoMD('ourrepo', fo) repodata = repoMDObject.RepoMD('ourrepo', fo)
except: except:
raise koji.BuildError("Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch)) raise koji.BuildError("Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch))
data = repodata.getData('origin') data = repodata.getData('origin')
pkgorigins = data.location[1] pkgorigins = data.location[1]
else: else:
# shouldn't occur # shouldn't occur
raise koji.GenericError("install librepo or yum") raise koji.GenericError("install librepo or yum")
@ -808,11 +808,11 @@ class BuildRoot(object):
if six.PY3: if six.PY3:
fo2 = io.TextIOWrapper(fo2, encoding='utf-8') fo2 = io.TextIOWrapper(fo2, encoding='utf-8')
for line in fo2: for line in fo2:
parts=line.split(None, 2) parts = line.split(None, 2)
if len(parts) < 2: if len(parts) < 2:
continue continue
# first field is formated by yum as [e:]n-v-r.a # first field is formated by yum as [e:]n-v-r.a
nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0]) nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0])
origin_idx[nvra] = parts[1] origin_idx[nvra] = parts[1]
fo2.close() fo2.close()
# mergerepo starts from a local repo in the task workdir, so internal # mergerepo starts from a local repo in the task workdir, so internal
@ -872,7 +872,7 @@ class BuildRoot(object):
return "%s%s" % (self.rootdir(), base) return "%s%s" % (self.rootdir(), base)
def expire(self): def expire(self):
self.session.host.setBuildRootState(self.id,'EXPIRED') self.session.host.setBuildRootState(self.id, 'EXPIRED')
class ChainBuildTask(BaseTaskHandler): class ChainBuildTask(BaseTaskHandler):
@ -984,16 +984,16 @@ class BuildTask(BaseTaskHandler):
dest_tag = taginfo['id'] dest_tag = taginfo['id']
# policy checks... # policy checks...
policy_data = { policy_data = {
'user_id' : task_info['owner'], 'user_id': task_info['owner'],
'source' : src, 'source': src,
'task_id' : self.id, 'task_id': self.id,
'build_tag' : build_tag, #id 'build_tag': build_tag, # id
'skip_tag' : bool(self.opts.get('skip_tag')), 'skip_tag': bool(self.opts.get('skip_tag')),
} }
if target_info: if target_info:
policy_data['target'] = target_info['id'], policy_data['target'] = target_info['id'],
if not self.opts.get('skip_tag'): if not self.opts.get('skip_tag'):
policy_data['tag'] = dest_tag #id policy_data['tag'] = dest_tag # id
if not SCM.is_scm_url(src) and not opts.get('scratch'): if not SCM.is_scm_url(src) and not opts.get('scratch'):
# let hub policy decide # let hub policy decide
self.session.host.assertPolicy('build_from_srpm', policy_data) self.session.host.assertPolicy('build_from_srpm', policy_data)
@ -1002,11 +1002,11 @@ class BuildTask(BaseTaskHandler):
self.session.host.assertPolicy('build_from_repo_id', policy_data) self.session.host.assertPolicy('build_from_repo_id', policy_data)
if not repo_info: if not repo_info:
repo_info = self.getRepo(build_tag, builds=opts.get('wait_builds'), repo_info = self.getRepo(build_tag, builds=opts.get('wait_builds'),
wait=opts.get('wait_repo')) #(subtask) wait=opts.get('wait_repo')) # (subtask)
self.event_id = self.session.getLastEvent()['id'] self.event_id = self.session.getLastEvent()['id']
srpm = self.getSRPM(src, build_tag, repo_info['id']) srpm = self.getSRPM(src, build_tag, repo_info['id'])
h = self.readSRPMHeader(srpm) h = self.readSRPMHeader(srpm)
data = koji.get_header_fields(h, ['name','version','release','epoch']) data = koji.get_header_fields(h, ['name', 'version', 'release', 'epoch'])
data['task_id'] = self.id data['task_id'] = self.id
if getattr(self, 'source', False): if getattr(self, 'source', False):
data['source'] = self.source['source'] data['source'] = self.source['source']
@ -1014,7 +1014,7 @@ class BuildTask(BaseTaskHandler):
extra_arches = None extra_arches = None
self.logger.info("Reading package config for %(name)s" % data) self.logger.info("Reading package config for %(name)s" % data)
pkg_cfg = self.session.getPackageConfig(dest_tag,data['name'],event=self.event_id) pkg_cfg = self.session.getPackageConfig(dest_tag, data['name'], event=self.event_id)
self.logger.debug("%r" % pkg_cfg) self.logger.debug("%r" % pkg_cfg)
if pkg_cfg is not None: if pkg_cfg is not None:
extra_arches = pkg_cfg.get('extra_arches') extra_arches = pkg_cfg.get('extra_arches')
@ -1036,16 +1036,16 @@ class BuildTask(BaseTaskHandler):
failany = (self.opts.get('fail_fast', False) failany = (self.opts.get('fail_fast', False)
or not getattr(self.options, 'build_arch_can_fail', False)) or not getattr(self.options, 'build_arch_can_fail', False))
try: try:
self.extra_information = { "src": src, "data": data, "target": target } self.extra_information = {"src": src, "data": data, "target": target}
srpm,rpms,brmap,logs = self.runBuilds(srpm, build_tag, archlist, srpm, rpms, brmap, logs = self.runBuilds(srpm, build_tag, archlist,
repo_info['id'], failany=failany) repo_info['id'], failany=failany)
if opts.get('scratch'): if opts.get('scratch'):
# scratch builds do not get imported # scratch builds do not get imported
self.session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs) self.session.host.moveBuildToScratch(self.id, srpm, rpms, logs=logs)
else: else:
self.session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs) self.session.host.completeBuild(self.id, build_id, srpm, rpms, brmap, logs=logs)
except (SystemExit,ServerExit,KeyboardInterrupt): except (SystemExit, ServerExit, KeyboardInterrupt):
# we do not trap these # we do not trap these
raise raise
except: except:
@ -1055,11 +1055,11 @@ class BuildTask(BaseTaskHandler):
# reraise the exception # reraise the exception
raise raise
if not self.opts.get('skip_tag') and not self.opts.get('scratch'): if not self.opts.get('skip_tag') and not self.opts.get('scratch'):
self.tagBuild(build_id,dest_tag) self.tagBuild(build_id, dest_tag)
def getSRPM(self, src, build_tag, repo_id): def getSRPM(self, src, build_tag, repo_id):
"""Get srpm from src""" """Get srpm from src"""
if isinstance(src,str): if isinstance(src, str):
if SCM.is_scm_url(src): if SCM.is_scm_url(src):
return self.getSRPMFromSCM(src, build_tag, repo_id) return self.getSRPMFromSCM(src, build_tag, repo_id)
else: else:
@ -1107,7 +1107,7 @@ class BuildTask(BaseTaskHandler):
# srpm arg should be a path relative to <BASEDIR>/work # srpm arg should be a path relative to <BASEDIR>/work
self.logger.debug("Reading SRPM") self.logger.debug("Reading SRPM")
relpath = "work/%s" % srpm relpath = "work/%s" % srpm
opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')]) opts = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
opts['tempdir'] = self.workdir opts['tempdir'] = self.workdir
with koji.openRemoteFile(relpath, **opts) as fo: with koji.openRemoteFile(relpath, **opts) as fo:
koji.check_rpm_file(fo) koji.check_rpm_file(fo)
@ -1127,7 +1127,7 @@ class BuildTask(BaseTaskHandler):
self.logger.debug('arches: %s' % arches) self.logger.debug('arches: %s' % arches)
if extra: if extra:
self.logger.debug('Got extra arches: %s' % extra) self.logger.debug('Got extra arches: %s' % extra)
arches = "%s %s" % (arches,extra) arches = "%s %s" % (arches, extra)
archlist = arches.split() archlist = arches.split()
self.logger.debug('base archlist: %r' % archlist) self.logger.debug('base archlist: %r' % archlist)
# - adjust arch list based on srpm macros # - adjust arch list based on srpm macros
@ -1138,14 +1138,14 @@ class BuildTask(BaseTaskHandler):
archlist = buildarchs archlist = buildarchs
self.logger.debug('archlist after buildarchs: %r' % archlist) self.logger.debug('archlist after buildarchs: %r' % archlist)
if exclusivearch: if exclusivearch:
archlist = [ a for a in archlist if a in exclusivearch ] archlist = [a for a in archlist if a in exclusivearch]
self.logger.debug('archlist after exclusivearch: %r' % archlist) self.logger.debug('archlist after exclusivearch: %r' % archlist)
if excludearch: if excludearch:
archlist = [ a for a in archlist if a not in excludearch ] archlist = [a for a in archlist if a not in excludearch]
self.logger.debug('archlist after excludearch: %r' % archlist) self.logger.debug('archlist after excludearch: %r' % archlist)
# noarch is funny # noarch is funny
if 'noarch' not in excludearch and \ if 'noarch' not in excludearch and \
( 'noarch' in buildarchs or 'noarch' in exclusivearch ): ('noarch' in buildarchs or 'noarch' in exclusivearch):
archlist.append('noarch') archlist.append('noarch')
override = self.opts.get('arch_override') override = self.opts.get('arch_override')
if self.opts.get('scratch') and override: if self.opts.get('scratch') and override:
@ -1187,9 +1187,9 @@ class BuildTask(BaseTaskHandler):
excludearch = [koji.canonArch(a) for a in excludearch] excludearch = [koji.canonArch(a) for a in excludearch]
archlist = list(tag_arches) archlist = list(tag_arches)
if exclusivearch: if exclusivearch:
archlist = [ a for a in archlist if a in exclusivearch ] archlist = [a for a in archlist if a in exclusivearch]
if excludearch: if excludearch:
archlist = [ a for a in archlist if a not in excludearch ] archlist = [a for a in archlist if a not in excludearch]
if not archlist: if not archlist:
raise koji.BuildError("No valid arches were found. tag %r, " raise koji.BuildError("No valid arches were found. tag %r, "
"exclusive %r, exclude %r" % (tag_arches, "exclusive %r, exclude %r" % (tag_arches,
@ -1231,13 +1231,13 @@ class BuildTask(BaseTaskHandler):
built_srpm = None built_srpm = None
for (arch, task_id) in six.iteritems(subtasks): for (arch, task_id) in six.iteritems(subtasks):
result = results[task_id] result = results[task_id]
self.logger.debug("DEBUG: %r : %r " % (arch,result,)) self.logger.debug("DEBUG: %r : %r " % (arch, result,))
brootid = result['brootid'] brootid = result['brootid']
for fn in result['rpms']: for fn in result['rpms']:
rpms.append(fn) rpms.append(fn)
brmap[fn] = brootid brmap[fn] = brootid
for fn in result['logs']: for fn in result['logs']:
logs.setdefault(arch,[]).append(fn) logs.setdefault(arch, []).append(fn)
if result['srpms']: if result['srpms']:
if built_srpm: if built_srpm:
raise koji.BuildError("multiple builds returned a srpm. task %i" % self.id) raise koji.BuildError("multiple builds returned a srpm. task %i" % self.id)
@ -1249,14 +1249,14 @@ class BuildTask(BaseTaskHandler):
else: else:
raise koji.BuildError("could not find a built srpm") raise koji.BuildError("could not find a built srpm")
return srpm,rpms,brmap,logs return srpm, rpms, brmap, logs
def tagBuild(self,build_id,dest_tag): def tagBuild(self, build_id, dest_tag):
# XXX - need options to skip tagging and to force tagging # XXX - need options to skip tagging and to force tagging
# create the tagBuild subtask # create the tagBuild subtask
# this will handle the "post tests" # this will handle the "post tests"
task_id = self.session.host.subtask(method='tagBuild', task_id = self.session.host.subtask(method='tagBuild',
arglist=[dest_tag,build_id,False,None,True], arglist=[dest_tag, build_id, False, None, True],
label='tag', label='tag',
parent=self.id, parent=self.id,
arch='noarch') arch='noarch')
@ -1377,7 +1377,7 @@ class BuildArchTask(BaseBuildTask):
# run build # run build
self.logger.debug("Running build") self.logger.debug("Running build")
broot.build(fn,arch) broot.build(fn, arch)
# extract results # extract results
resultdir = broot.resultdir() resultdir = broot.resultdir()
@ -1424,7 +1424,7 @@ class BuildArchTask(BaseBuildTask):
# upload files to storage server # upload files to storage server
uploadpath = broot.getUploadPath() uploadpath = broot.getUploadPath()
for f in rpm_files: for f in rpm_files:
self.uploadFile("%s/%s" % (resultdir,f)) self.uploadFile("%s/%s" % (resultdir, f))
self.logger.debug("keep srpm %i %s %s" % (self.id, keep_srpm, opts)) self.logger.debug("keep srpm %i %s %s" % (self.id, keep_srpm, opts))
if keep_srpm: if keep_srpm:
if len(srpm_files) == 0: if len(srpm_files) == 0:
@ -1433,19 +1433,19 @@ class BuildArchTask(BaseBuildTask):
raise koji.BuildError("multiple srpm files found for task %i: %s" % (self.id, srpm_files)) raise koji.BuildError("multiple srpm files found for task %i: %s" % (self.id, srpm_files))
# Run sanity checks. Any failures will throw a BuildError # Run sanity checks. Any failures will throw a BuildError
self.srpm_sanity_checks("%s/%s" % (resultdir,srpm_files[0])) self.srpm_sanity_checks("%s/%s" % (resultdir, srpm_files[0]))
self.logger.debug("uploading %s/%s to %s" % (resultdir,srpm_files[0], uploadpath)) self.logger.debug("uploading %s/%s to %s" % (resultdir, srpm_files[0], uploadpath))
self.uploadFile("%s/%s" % (resultdir,srpm_files[0])) self.uploadFile("%s/%s" % (resultdir, srpm_files[0]))
if rpm_files: if rpm_files:
ret['rpms'] = [ "%s/%s" % (uploadpath,f) for f in rpm_files ] ret['rpms'] = ["%s/%s" % (uploadpath, f) for f in rpm_files]
else: else:
ret['rpms'] = [] ret['rpms'] = []
if keep_srpm: if keep_srpm:
ret['srpms'] = [ "%s/%s" % (uploadpath,f) for f in srpm_files ] ret['srpms'] = ["%s/%s" % (uploadpath, f) for f in srpm_files]
else: else:
ret['srpms'] = [] ret['srpms'] = []
ret['logs'] = [ "%s/%s" % (uploadpath,f) for f in log_files ] ret['logs'] = ["%s/%s" % (uploadpath, f) for f in log_files]
if rpmdiff_hash[self.id]: if rpmdiff_hash[self.id]:
self.uploadFile(noarch_hash_path) self.uploadFile(noarch_hash_path)
@ -1563,7 +1563,7 @@ class BuildMavenTask(BaseBuildTask):
st = os.lstat(filepath) st = os.lstat(filepath)
mtime = time.localtime(st.st_mtime) mtime = time.localtime(st.st_mtime)
info = zipfile.ZipInfo(filepath[roottrim:]) info = zipfile.ZipInfo(filepath[roottrim:])
info.external_attr |= 0o120000 << 16 # symlink file type info.external_attr |= 0o120000 << 16 # symlink file type
info.compress_type = zipfile.ZIP_STORED info.compress_type = zipfile.ZIP_STORED
info.date_time = mtime[:6] info.date_time = mtime[:6]
zfo.writestr(info, content) zfo.writestr(info, content)
@ -2345,7 +2345,7 @@ class TagBuildTask(BaseTaskHandler):
# computationally expensive 'post' tests. # computationally expensive 'post' tests.
# XXX - add more post tests # XXX - add more post tests
self.session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag) self.session.host.tagBuild(self.id, tag_id, build_id, force=force, fromtag=fromtag)
self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success) self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success)
except Exception as e: except Exception as e:
exctype, value = sys.exc_info()[:2] exctype, value = sys.exc_info()[:2]
@ -2478,7 +2478,7 @@ class BuildBaseImageTask(BuildImageTask):
self.session.host.completeImageBuild(self.id, bld_info['id'], self.session.host.completeImageBuild(self.id, bld_info['id'],
results) results)
except (SystemExit,ServerExit,KeyboardInterrupt): except (SystemExit, ServerExit, KeyboardInterrupt):
# we do not trap these # we do not trap these
raise raise
except: except:
@ -2564,7 +2564,7 @@ class BuildApplianceTask(BuildImageTask):
else: else:
self.session.host.moveImageBuildToScratch(self.id, results) self.session.host.moveImageBuildToScratch(self.id, results)
except (SystemExit,ServerExit,KeyboardInterrupt): except (SystemExit, ServerExit, KeyboardInterrupt):
# we do not trap these # we do not trap these
raise raise
except: except:
@ -2648,7 +2648,7 @@ class BuildLiveCDTask(BuildImageTask):
else: else:
self.session.host.moveImageBuildToScratch(self.id, results) self.session.host.moveImageBuildToScratch(self.id, results)
except (SystemExit,ServerExit,KeyboardInterrupt): except (SystemExit, ServerExit, KeyboardInterrupt):
# we do not trap these # we do not trap these
raise raise
except: except:
@ -2822,7 +2822,7 @@ class ImageTask(BaseTaskHandler):
Methods = [] Methods = []
# default to bind mounting /dev, but allow subclasses to change # default to bind mounting /dev, but allow subclasses to change
# this # this
bind_opts = {'dirs' : {'/dev' : '/dev',}} bind_opts = {'dirs': {'/dev': '/dev', }}
def makeImgBuildRoot(self, buildtag, repoinfo, arch, inst_group): def makeImgBuildRoot(self, buildtag, repoinfo, arch, inst_group):
""" """
@ -2887,8 +2887,8 @@ class ImageTask(BaseTaskHandler):
else: else:
kspath = self.localPath("work/%s" % ksfile) kspath = self.localPath("work/%s" % ksfile)
self.uploadFile(kspath) # upload the original ks file self.uploadFile(kspath) # upload the original ks file
return kspath # full absolute path to the file in the chroot return kspath # full absolute path to the file in the chroot
def readKickstart(self, kspath, opts): def readKickstart(self, kspath, opts):
""" """
@ -2941,7 +2941,7 @@ class ImageTask(BaseTaskHandler):
# in the kickstart file. If --repo wasn't specified, then we use the # in the kickstart file. If --repo wasn't specified, then we use the
# repo associated with the target passed in initially. # repo associated with the target passed in initially.
repo_class = kscontrol.dataMap[self.ks.version]['RepoData'] repo_class = kscontrol.dataMap[self.ks.version]['RepoData']
self.ks.handler.repo.repoList = [] # delete whatever the ks file told us self.ks.handler.repo.repoList = [] # delete whatever the ks file told us
if opts.get('repo'): if opts.get('repo'):
user_repos = opts['repo'] user_repos = opts['repo']
if isinstance(user_repos, six.string_types): if isinstance(user_repos, six.string_types):
@ -2973,7 +2973,7 @@ class ImageTask(BaseTaskHandler):
if not os.path.exists(kskoji): if not os.path.exists(kskoji):
raise koji.LiveCDError("KS file missing: %s" % kskoji) raise koji.LiveCDError("KS file missing: %s" % kskoji)
self.uploadFile(kskoji) self.uploadFile(kskoji)
return broot.path_without_to_within(kskoji) # absolute path within chroot return broot.path_without_to_within(kskoji) # absolute path within chroot
def getImagePackages(self, cachepath): def getImagePackages(self, cachepath):
""" """
@ -3281,7 +3281,7 @@ class LiveCDTask(ImageTask):
if not opts.get('scratch'): if not opts.get('scratch'):
hdrlist = self.getImagePackages(os.path.join(broot.rootdir(), hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
cachedir[1:])) cachedir[1:]))
imgdata ['rpmlist'] = hdrlist imgdata['rpmlist'] = hdrlist
broot.markExternalRPMs(hdrlist) broot.markExternalRPMs(hdrlist)
broot.expire() broot.expire()
@ -3432,7 +3432,7 @@ class LiveMediaTask(ImageTask):
'--iso-only', '--iso-only',
]) ])
isoname='%s-%s-%s-%s.iso' % (name, arch, version, release) isoname = '%s-%s-%s-%s.iso' % (name, arch, version, release)
cmd.extend(['--iso-name', isoname, cmd.extend(['--iso-name', isoname,
'--releasever', version, '--releasever', version,
]) ])
@ -3514,7 +3514,7 @@ class LiveMediaTask(ImageTask):
# (getImagePackages doesn't work here) # (getImagePackages doesn't work here)
# hdrlist = self.getImagePackages(os.path.join(broot.rootdir(), # hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
# cachedir[1:])) # cachedir[1:]))
imgdata ['rpmlist'] = [] imgdata['rpmlist'] = []
# broot.markExternalRPMs(hdrlist) # broot.markExternalRPMs(hdrlist)
broot.expire() broot.expire()
@ -3557,15 +3557,15 @@ class OzImageTask(BaseTaskHandler):
srcdir=scmsrcdir) srcdir=scmsrcdir)
kspath = os.path.join(scmsrcdir, os.path.basename(ksfile)) kspath = os.path.join(scmsrcdir, os.path.basename(ksfile))
else: else:
tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')]) tops = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
tops['tempdir'] = self.workdir tops['tempdir'] = self.workdir
with koji.openRemoteFile(ksfile, **tops) as ks_src: with koji.openRemoteFile(ksfile, **tops) as ks_src:
kspath = os.path.join(self.workdir, os.path.basename(ksfile)) kspath = os.path.join(self.workdir, os.path.basename(ksfile))
with open(kspath, 'wb') as ks_dest: with open(kspath, 'wb') as ks_dest:
ks_dest.write(ks_src.read()) ks_dest.write(ks_src.read())
self.logger.debug('uploading kickstart from here: %s' % kspath) self.logger.debug('uploading kickstart from here: %s' % kspath)
self.uploadFile(kspath) # upload the original ks file self.uploadFile(kspath) # upload the original ks file
return kspath # absolute path to the ks file return kspath # absolute path to the ks file
def readKickstart(self, kspath): def readKickstart(self, kspath):
""" """
@ -3611,7 +3611,7 @@ class OzImageTask(BaseTaskHandler):
# url with --repo, then we substitute that in for the repo(s) specified # url with --repo, then we substitute that in for the repo(s) specified
# in the kickstart file. If --repo wasn't specified, then we use the # in the kickstart file. If --repo wasn't specified, then we use the
# repo associated with the target passed in initially. # repo associated with the target passed in initially.
ks.handler.repo.repoList = [] # delete whatever the ks file told us ks.handler.repo.repoList = [] # delete whatever the ks file told us
repo_class = kscontrol.dataMap[ks.version]['RepoData'] repo_class = kscontrol.dataMap[ks.version]['RepoData']
# TODO: sensibly use "url" and "repo" commands in kickstart # TODO: sensibly use "url" and "repo" commands in kickstart
if self.opts.get('repo'): if self.opts.get('repo'):
@ -3654,7 +3654,7 @@ class OzImageTask(BaseTaskHandler):
# put the new ksfile in the output directory # put the new ksfile in the output directory
if not os.path.exists(kspath): if not os.path.exists(kspath):
raise koji.BuildError("KS file missing: %s" % kspath) raise koji.BuildError("KS file missing: %s" % kspath)
self.uploadFile(kspath) # upload the modified ks file self.uploadFile(kspath) # upload the modified ks file
return kspath return kspath
def makeConfig(self): def makeConfig(self):
@ -3704,7 +3704,7 @@ class OzImageTask(BaseTaskHandler):
# image and attempt to ssh in. This breaks docker image creation. # image and attempt to ssh in. This breaks docker image creation.
# TODO: intelligently guess the distro based on the install tree URL # TODO: intelligently guess the distro based on the install tree URL
distname, distver = self.parseDistro(self.opts.get('distro')) distname, distver = self.parseDistro(self.opts.get('distro'))
if self.arch in ['armhfp','armv7hnl','armv7hl']: if self.arch in ['armhfp', 'armv7hnl', 'armv7hl']:
arch = 'armv7l' arch = 'armv7l'
else: else:
arch = self.arch arch = self.arch
@ -3841,22 +3841,22 @@ class BaseImageTask(OzImageTask):
Call out to ImageFactory to build the image(s) we want. Returns a dict Call out to ImageFactory to build the image(s) we want. Returns a dict
of details for each image type we had to ask ImageFactory to build of details for each image type we had to ask ImageFactory to build
""" """
fcalls = {'raw': self._buildBase, fcalls = {'raw': self._buildBase,
'raw-xz': self._buildXZ, 'raw-xz': self._buildXZ,
'tar-gz': self._buildTarGZ, 'tar-gz': self._buildTarGZ,
'liveimg-squashfs': self._buildSquashfs, 'liveimg-squashfs': self._buildSquashfs,
'vmdk': self._buildConvert, 'vmdk': self._buildConvert,
'vdi': self._buildConvert, 'vdi': self._buildConvert,
'qcow': self._buildConvert, 'qcow': self._buildConvert,
'qcow2': self._buildConvert, 'qcow2': self._buildConvert,
'vpc': self._buildConvert, 'vpc': self._buildConvert,
'rhevm-ova': self._buildOVA, 'rhevm-ova': self._buildOVA,
'vsphere-ova': self._buildOVA, 'vsphere-ova': self._buildOVA,
'vagrant-virtualbox': self._buildOVA, 'vagrant-virtualbox': self._buildOVA,
'vagrant-libvirt': self._buildOVA, 'vagrant-libvirt': self._buildOVA,
'vagrant-vmware-fusion': self._buildOVA, 'vagrant-vmware-fusion': self._buildOVA,
'vagrant-hyperv': self._buildOVA, 'vagrant-hyperv': self._buildOVA,
'docker': self._buildDocker 'docker': self._buildDocker
} }
# add a handler to the logger so that we capture ImageFactory's logging # add a handler to the logger so that we capture ImageFactory's logging
self.fhandler = logging.FileHandler(self.ozlog) self.fhandler = logging.FileHandler(self.ozlog)
@ -3865,7 +3865,7 @@ class BaseImageTask(OzImageTask):
self.tlog.setLevel(logging.DEBUG) self.tlog.setLevel(logging.DEBUG)
self.tlog.addHandler(self.fhandler) self.tlog.addHandler(self.fhandler)
images = {} images = {}
random.seed() # necessary to ensure a unique mac address random.seed() # necessary to ensure a unique mac address
params = {'install_script': str(ks.handler), params = {'install_script': str(ks.handler),
'offline_icicle': True} 'offline_icicle': True}
# build the base (raw) image # build the base (raw) image
@ -3925,7 +3925,7 @@ class BaseImageTask(OzImageTask):
if scrnshot: if scrnshot:
ext = scrnshot[-3:] ext = scrnshot[-3:]
self.uploadFile(scrnshot, remoteName='screenshot.%s' % ext) self.uploadFile(scrnshot, remoteName='screenshot.%s' % ext)
image.os_plugin.abort() # forcibly tear down the VM image.os_plugin.abort() # forcibly tear down the VM
# TODO abort when a task is CANCELLED # TODO abort when a task is CANCELLED
if not self.session.checkUpload('', os.path.basename(self.ozlog)): if not self.session.checkUpload('', os.path.basename(self.ozlog)):
self.tlog.removeHandler(self.fhandler) self.tlog.removeHandler(self.fhandler)
@ -3972,7 +3972,7 @@ class BaseImageTask(OzImageTask):
self.logger.debug('templates: %s' % template) self.logger.debug('templates: %s' % template)
self.logger.debug('pre-merge params: %s' % params) self.logger.debug('pre-merge params: %s' % params)
# We enforce various things related to the ks file - do not allow override # We enforce various things related to the ks file - do not allow override
self._mergeFactoryParams(params, [ 'install_script' ]) self._mergeFactoryParams(params, ['install_script'])
self.logger.debug('post-merge params: %s' % params) self.logger.debug('post-merge params: %s' % params)
base = self.bd.builder_for_base_image(template, parameters=params) base = self.bd.builder_for_base_image(template, parameters=params)
if wait: if wait:
@ -4088,22 +4088,22 @@ class BaseImageTask(OzImageTask):
if format == 'vagrant-virtualbox': if format == 'vagrant-virtualbox':
format = 'vsphere-ova' format = 'vsphere-ova'
img_opts['vsphere_ova_format'] = 'vagrant-virtualbox' img_opts['vsphere_ova_format'] = 'vagrant-virtualbox'
fixed_params = [ 'vsphere_ova_format' ] fixed_params = ['vsphere_ova_format']
if format == 'vagrant-libvirt': if format == 'vagrant-libvirt':
format = 'rhevm-ova' format = 'rhevm-ova'
img_opts['rhevm_ova_format'] = 'vagrant-libvirt' img_opts['rhevm_ova_format'] = 'vagrant-libvirt'
fixed_params = [ 'rhevm_ova_format' ] fixed_params = ['rhevm_ova_format']
if format == 'vagrant-vmware-fusion': if format == 'vagrant-vmware-fusion':
format = 'vsphere-ova' format = 'vsphere-ova'
img_opts['vsphere_ova_format'] = 'vagrant-vmware-fusion' img_opts['vsphere_ova_format'] = 'vagrant-vmware-fusion'
# The initial disk image transform for VMWare Fusion/Workstation requires a "standard" VMDK # The initial disk image transform for VMWare Fusion/Workstation requires a "standard" VMDK
# not the stream oriented format used for VirtualBox or regular VMWare OVAs # not the stream oriented format used for VirtualBox or regular VMWare OVAs
img_opts['vsphere_vmdk_format'] = 'standard' img_opts['vsphere_vmdk_format'] = 'standard'
fixed_params = [ 'vsphere_ova_format', 'vsphere_vmdk_format' ] fixed_params = ['vsphere_ova_format', 'vsphere_vmdk_format']
if format == 'vagrant-hyperv': if format == 'vagrant-hyperv':
format = 'hyperv-ova' format = 'hyperv-ova'
img_opts['hyperv_ova_format'] = 'hyperv-vagrant' img_opts['hyperv_ova_format'] = 'hyperv-vagrant'
fixed_params = [ 'hyperv_ova_format' ] fixed_params = ['hyperv_ova_format']
targ = self._do_target_image(self.base_img.base_image.identifier, targ = self._do_target_image(self.base_img.base_image.identifier,
format.replace('-ova', ''), img_opts=img_opts, fixed_params=fixed_params) format.replace('-ova', ''), img_opts=img_opts, fixed_params=fixed_params)
targ2 = self._do_target_image(targ.target_image.identifier, 'OVA', targ2 = self._do_target_image(targ.target_image.identifier, 'OVA',
@ -4177,7 +4177,7 @@ class BaseImageTask(OzImageTask):
cmd = ['/usr/bin/qemu-img', 'convert', '-f', 'raw', '-O', cmd = ['/usr/bin/qemu-img', 'convert', '-f', 'raw', '-O',
format, self.base_img.base_image.data, newimg] format, self.base_img.base_image.data, newimg]
if format == 'qcow': if format == 'qcow':
cmd.insert(2, '-c') # enable compression for qcow images cmd.insert(2, '-c') # enable compression for qcow images
if format == 'qcow2': if format == 'qcow2':
# qemu-img changed its default behavior at some point to generate a # qemu-img changed its default behavior at some point to generate a
# v3 image when the requested output format is qcow2. We don't # v3 image when the requested output format is qcow2. We don't
@ -4262,14 +4262,14 @@ class BaseImageTask(OzImageTask):
for p in icicle.getElementsByTagName('extra'): for p in icicle.getElementsByTagName('extra'):
bits = p.firstChild.nodeValue.split(',') bits = p.firstChild.nodeValue.split(',')
rpm = { rpm = {
'name': bits[0], 'name': bits[0],
'version': bits[1], 'version': bits[1],
'release': bits[2], 'release': bits[2],
'arch': bits[3], 'arch': bits[3],
# epoch is a special case, as usual # epoch is a special case, as usual
'size': int(bits[5]), 'size': int(bits[5]),
'payloadhash': bits[6], 'payloadhash': bits[6],
'buildtime': int(bits[7]) 'buildtime': int(bits[7])
} }
if rpm['name'] in ['buildsys-build', 'gpg-pubkey']: if rpm['name'] in ['buildsys-build', 'gpg-pubkey']:
continue continue
@ -4369,15 +4369,15 @@ class BuildIndirectionImageTask(OzImageTask):
srcdir=scmsrcdir) srcdir=scmsrcdir)
final_path = os.path.join(scmsrcdir, os.path.basename(filepath)) final_path = os.path.join(scmsrcdir, os.path.basename(filepath))
else: else:
tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')]) tops = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
tops['tempdir'] = self.workdir tops['tempdir'] = self.workdir
final_path = os.path.join(self.workdir, os.path.basename(filepath)) final_path = os.path.join(self.workdir, os.path.basename(filepath))
with koji.openRemoteFile(filepath, **tops) as remote_fileobj: with koji.openRemoteFile(filepath, **tops) as remote_fileobj:
with open(final_path, 'w') as final_fileobj: with open(final_path, 'w') as final_fileobj:
shutil.copyfileobj(remote_fileobj, final_fileobj) shutil.copyfileobj(remote_fileobj, final_fileobj)
self.logger.debug('uploading retrieved file from here: %s' % final_path) self.logger.debug('uploading retrieved file from here: %s' % final_path)
self.uploadFile(final_path) # upload the original ks file self.uploadFile(final_path) # upload the original ks file
return final_path # absolute path to the ks file return final_path # absolute path to the ks file
def handler(self, opts): def handler(self, opts):
"""Governing task for building an image with two other images using Factory Indirection""" """Governing task for building an image with two other images using Factory Indirection"""
@ -4406,7 +4406,7 @@ class BuildIndirectionImageTask(OzImageTask):
task_diskimage = _match_name(result['files'], ".*qcow2$") task_diskimage = _match_name(result['files'], ".*qcow2$")
task_tdl = _match_name(result['files'], "tdl.*xml") task_tdl = _match_name(result['files'], "tdl.*xml")
task_dir = os.path.join(koji.pathinfo.work(),koji.pathinfo.taskrelpath(task_id)) task_dir = os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(task_id))
diskimage_full = os.path.join(task_dir, task_diskimage) diskimage_full = os.path.join(task_dir, task_diskimage)
tdl_full = os.path.join(task_dir, task_tdl) tdl_full = os.path.join(task_dir, task_tdl)
@ -4443,7 +4443,7 @@ class BuildIndirectionImageTask(OzImageTask):
raise koji.BuildError("Could not retrieve archives for build (%s) from NVR (%s)" % raise koji.BuildError("Could not retrieve archives for build (%s) from NVR (%s)" %
(build['id'], nvr)) (build['id'], nvr))
buildfiles = [ x['filename'] for x in buildarchives ] buildfiles = [x['filename'] for x in buildarchives]
builddir = koji.pathinfo.imagebuild(build) builddir = koji.pathinfo.imagebuild(build)
def _match_name(inlist, namere): def _match_name(inlist, namere):
@ -4509,7 +4509,7 @@ class BuildIndirectionImageTask(OzImageTask):
release = self.getRelease(name, version) release = self.getRelease(name, version)
if '-' in version: if '-' in version:
raise koji.ApplianceError('The Version may not have a hyphen') raise koji.ApplianceError('The Version may not have a hyphen')
if '-' in release: if '-' in release:
raise koji.ApplianceError('The Release may not have a hyphen') raise koji.ApplianceError('The Release may not have a hyphen')
indirection_template = self.fetchHubOrSCM(opts.get('indirection_template'), indirection_template = self.fetchHubOrSCM(opts.get('indirection_template'),
@ -4570,8 +4570,8 @@ class BuildIndirectionImageTask(OzImageTask):
results_loc = "/" + results_loc results_loc = "/" + results_loc
params = {'utility_image': str(utility_factory_image.identifier), params = {'utility_image': str(utility_factory_image.identifier),
'utility_customizations': utility_customizations, 'utility_customizations': utility_customizations,
'results_location': results_loc } 'results_location': results_loc}
random.seed() # necessary to ensure a unique mac address random.seed() # necessary to ensure a unique mac address
try: try:
try: try:
# Embedded deep debug option - if template is just the string MOCK # Embedded deep debug option - if template is just the string MOCK
@ -4584,7 +4584,7 @@ class BuildIndirectionImageTask(OzImageTask):
target.target_image = target_image target.target_image = target_image
with open(target_image.data, "w") as f: with open(target_image.data, "w") as f:
f.write("Mock build from task ID: %s" % self.id) f.write("Mock build from task ID: %s" % self.id)
target_image.status='COMPLETE' target_image.status = 'COMPLETE'
else: else:
target = bd.builder_for_target_image('indirection', target = bd.builder_for_target_image('indirection',
image_id=base_factory_image.identifier, image_id=base_factory_image.identifier,
@ -4609,18 +4609,18 @@ class BuildIndirectionImageTask(OzImageTask):
self.uploadFile(target.target_image.data, remoteName=os.path.basename(results_loc)) self.uploadFile(target.target_image.data, remoteName=os.path.basename(results_loc))
myresults = { } myresults = {}
myresults['task_id'] = self.id myresults['task_id'] = self.id
myresults['files'] = [ os.path.basename(results_loc) ] myresults['files'] = [os.path.basename(results_loc)]
myresults['logs'] = [ os.path.basename(ozlog) ] myresults['logs'] = [os.path.basename(ozlog)]
myresults['arch'] = opts['arch'] myresults['arch'] = opts['arch']
# TODO: This should instead track the two input images: base and utility # TODO: This should instead track the two input images: base and utility
myresults['rpmlist'] = [ ] myresults['rpmlist'] = []
# This is compatible with some helper methods originally implemented for the base # This is compatible with some helper methods originally implemented for the base
# image build. In the original usage, the dict contains an entry per build arch # image build. In the original usage, the dict contains an entry per build arch
# TODO: If adding multiarch support, keep this in mind # TODO: If adding multiarch support, keep this in mind
results = { str(self.id): myresults } results = {str(self.id): myresults}
self.logger.debug('Image Results for hub: %s' % results) self.logger.debug('Image Results for hub: %s' % results)
if opts['scratch']: if opts['scratch']:
@ -4640,7 +4640,7 @@ class BuildIndirectionImageTask(OzImageTask):
report = '' report = ''
if opts.get('scratch'): if opts.get('scratch'):
respath = ', '.join( respath = ', '.join(
[os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in [self.id] ]) [os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in [self.id]])
report += 'Scratch ' report += 'Scratch '
else: else:
respath = koji.pathinfo.imagebuild(bld_info) respath = koji.pathinfo.imagebuild(bld_info)
@ -4773,7 +4773,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask):
'setup_dns': True, 'setup_dns': True,
'repo_id': repo_id} 'repo_id': repo_id}
if self.options.scm_credentials_dir is not None and os.path.isdir(self.options.scm_credentials_dir): if self.options.scm_credentials_dir is not None and os.path.isdir(self.options.scm_credentials_dir):
rootopts['bind_opts'] = {'dirs' : {self.options.scm_credentials_dir : '/credentials',}} rootopts['bind_opts'] = {'dirs': {self.options.scm_credentials_dir: '/credentials', }}
# Force internal_dev_setup back to true because bind_opts is used to turn it off # Force internal_dev_setup back to true because bind_opts is used to turn it off
rootopts['internal_dev_setup'] = True rootopts['internal_dev_setup'] = True
br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id)) br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))
@ -5092,9 +5092,9 @@ Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
failure = failure_info or cancel_info or '' failure = failure_info or cancel_info or ''
tasks = {'failed' : [task for task in task_data.values() if task['state'] == 'failed'], tasks = {'failed': [task for task in task_data.values() if task['state'] == 'failed'],
'canceled' : [task for task in task_data.values() if task['state'] == 'canceled'], 'canceled': [task for task in task_data.values() if task['state'] == 'canceled'],
'closed' : [task for task in task_data.values() if task['state'] == 'closed']} 'closed': [task for task in task_data.values() if task['state'] == 'closed']}
srpms = [] srpms = []
for taskinfo in task_data.values(): for taskinfo in task_data.values():
@ -5143,7 +5143,7 @@ Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
output += "\r\n" output += "\r\n"
output += "\r\n" output += "\r\n"
changelog = koji.util.formatChangelog(self.session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n","\r\n") changelog = koji.util.formatChangelog(self.session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n", "\r\n")
if changelog: if changelog:
changelog = "Changelog:\r\n%s" % changelog changelog = "Changelog:\r\n%s" % changelog
@ -5230,7 +5230,7 @@ class NewRepoTask(BaseTaskHandler):
results = self.wait(to_list(subtasks.values()), all=True, failany=True) results = self.wait(to_list(subtasks.values()), all=True, failany=True)
for (arch, task_id) in six.iteritems(subtasks): for (arch, task_id) in six.iteritems(subtasks):
data[arch] = results[task_id] data[arch] = results[task_id]
self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],)) self.logger.debug("DEBUG: %r : %r " % (arch, data[arch],))
# finalize # finalize
kwargs = {} kwargs = {}
@ -5448,7 +5448,7 @@ class createDistRepoTask(BaseTaskHandler):
"ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"), "ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),
"ppc64le": ("ppc64le", "noarch"), "ppc64le": ("ppc64le", "noarch"),
"s390": ("s390", "noarch"), "s390": ("s390", "noarch"),
"s390x": ("s390x", "noarch"), "s390x": ("s390x", "noarch"),
"sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"), "sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),
"sparc64": ("sparc64v", "sparc64", "noarch"), "sparc64": ("sparc64v", "sparc64", "noarch"),
"alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"), "alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),
@ -5703,7 +5703,7 @@ enabled=1
self.session.uploadWrapper(yconfig_path, self.uploadpath, self.session.uploadWrapper(yconfig_path, self.uploadpath,
os.path.basename(yconfig_path)) os.path.basename(yconfig_path))
conf = dnf.conf.Conf() conf = dnf.conf.Conf()
conf.reposdir = [] # don't use system repos at all conf.reposdir = [] # don't use system repos at all
conf.read(yconfig_path) conf.read(yconfig_path)
dnfbase = dnf.Base(conf) dnfbase = dnf.Base(conf)
if hasattr(koji.arch, 'ArchStorage'): if hasattr(koji.arch, 'ArchStorage'):
@ -6243,7 +6243,7 @@ def get_options():
defaults[name] = int(value) defaults[name] = int(value)
except ValueError: except ValueError:
quit("value for %s option must be a valid integer" % name) quit("value for %s option must be a valid integer" % name)
elif name in ['offline_retry', 'use_createrepo_c', 'createrepo_skip_stat', elif name in ['offline_retry', 'use_createrepo_c', 'createrepo_skip_stat',
'createrepo_update', 'use_fast_upload', 'support_rpm_source_layout', 'createrepo_update', 'use_fast_upload', 'support_rpm_source_layout',
'krb_rdns', 'krb_canon_host', 'build_arch_can_fail', 'no_ssl_verify', 'krb_rdns', 'krb_canon_host', 'build_arch_can_fail', 'no_ssl_verify',
'log_timestamps']: 'log_timestamps']:

View file

@ -47,7 +47,7 @@ EXPAND_ARCHES = {
'alpha': ['alphaev4', 'alphaev45', 'alphaev5', 'alphaev56', 'alpha': ['alphaev4', 'alphaev45', 'alphaev5', 'alphaev56',
'alphapca56', 'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7'], 'alphapca56', 'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7'],
'armhfp': ['armv7hl', 'armv7hnl', 'armv6hl', 'armv6hnl'], 'armhfp': ['armv7hl', 'armv7hnl', 'armv6hl', 'armv6hnl'],
'arm': ['armv5tel', 'armv5tejl', 'armv6l','armv7l'], 'arm': ['armv5tel', 'armv5tejl', 'armv6l', 'armv7l'],
'sh4': ['sh4a'] 'sh4': ['sh4a']
} }
@ -64,7 +64,7 @@ def parse_args(args):
mergerepos --repo=url --repo=url --outputdir=/some/path""" mergerepos --repo=url --repo=url --outputdir=/some/path"""
parser = OptionParser(version = "mergerepos 0.1", usage=usage) parser = OptionParser(version="mergerepos 0.1", usage=usage)
# query options # query options
parser.add_option("-r", "--repo", dest="repos", default=[], action="append", parser.add_option("-r", "--repo", dest="repos", default=[], action="append",
help="repo url") help="repo url")
@ -158,7 +158,7 @@ class RepoMerge(object):
# in the repolist # in the repolist
count = 0 count = 0
for r in self.repolist: for r in self.repolist:
count +=1 count += 1
rid = 'repo%s' % count rid = 'repo%s' % count
sys.stderr.write('Adding repo: %s\n' % r) sys.stderr.write('Adding repo: %s\n' % r)
n = self.yumbase.add_enable_repo(rid, baseurls=[r]) n = self.yumbase.add_enable_repo(rid, baseurls=[r])

View file

@ -193,9 +193,9 @@ def get_options():
return options, '_list_commands', [0, ''] return options, '_list_commands', [0, '']
aliases = { aliases = {
'cancel-task' : 'cancel', 'cancel-task': 'cancel',
'cxl' : 'cancel', 'cxl': 'cancel',
'list-commands' : 'help', 'list-commands': 'help',
'move-pkg': 'move-build', 'move-pkg': 'move-build',
'move': 'move-build', 'move': 'move-build',
'latest-pkg': 'latest-build', 'latest-pkg': 'latest-build',
@ -252,7 +252,7 @@ def fix_pyver(options, logger):
pyver = getattr(options, 'pyver', None) pyver = getattr(options, 'pyver', None)
if not pyver: if not pyver:
return return
if pyver not in [2,3]: if pyver not in [2, 3]:
logger.warning('Invalid python version requested: %s', pyver) logger.warning('Invalid python version requested: %s', pyver)
if sys.version_info[0] == pyver: if sys.version_info[0] == pyver:
return return
@ -278,20 +278,20 @@ def list_commands(categories_chosen=None):
categories_chosen = list(categories_chosen) categories_chosen = list(categories_chosen)
categories_chosen.sort() categories_chosen.sort()
handlers = [] handlers = []
for name,value in globals().items(): for name, value in globals().items():
if name.startswith('handle_'): if name.startswith('handle_'):
alias = name.replace('handle_','') alias = name.replace('handle_', '')
alias = alias.replace('_','-') alias = alias.replace('_', '-')
handlers.append((alias,value)) handlers.append((alias, value))
elif name.startswith('anon_handle_'): elif name.startswith('anon_handle_'):
alias = name.replace('anon_handle_','') alias = name.replace('anon_handle_', '')
alias = alias.replace('_','-') alias = alias.replace('_', '-')
handlers.append((alias,value)) handlers.append((alias, value))
handlers.sort() handlers.sort()
print(_("Available commands:")) print(_("Available commands:"))
for category in categories_chosen: for category in categories_chosen:
print(_("\n%s:" % categories[category])) print(_("\n%s:" % categories[category]))
for alias,handler in handlers: for alias, handler in handlers:
desc = handler.__doc__ or '' desc = handler.__doc__ or ''
if desc.startswith('[%s] ' % category): if desc.startswith('[%s] ' % category):
desc = desc[len('[%s] ' % category):] desc = desc[len('[%s] ' % category):]

View file

@ -58,7 +58,7 @@ except ImportError: # pragma: no cover
def _printable_unicode(s): def _printable_unicode(s):
if six.PY2: if six.PY2:
return s.encode('utf-8') return s.encode('utf-8')
else: # no cover: 2.x else: # no cover: 2.x
return s return s
@ -354,7 +354,7 @@ def handle_add_pkg(goptions, session, args):
opts['force'] = options.force opts['force'] = options.force
opts['block'] = False opts['block'] = False
# check if list of packages exists for that tag already # check if list of packages exists for that tag already
dsttag=session.getTag(tag) dsttag = session.getTag(tag)
if dsttag is None: if dsttag is None:
print("No such tag: %s" % tag) print("No such tag: %s" % tag)
sys.exit(1) sys.exit(1)
@ -388,7 +388,7 @@ def handle_block_pkg(goptions, session, args):
activate_session(session, goptions) activate_session(session, goptions)
tag = args[0] tag = args[0]
# check if list of packages exists for that tag already # check if list of packages exists for that tag already
dsttag=session.getTag(tag) dsttag = session.getTag(tag)
if dsttag is None: if dsttag is None:
print("No such tag: %s" % tag) print("No such tag: %s" % tag)
return 1 return 1
@ -425,7 +425,7 @@ def handle_remove_pkg(goptions, session, args):
opts = {} opts = {}
opts['force'] = options.force opts['force'] = options.force
# check if list of packages exists for that tag already # check if list of packages exists for that tag already
dsttag=session.getTag(tag) dsttag = session.getTag(tag)
if dsttag is None: if dsttag is None:
print("No such tag: %s" % tag) print("No such tag: %s" % tag)
return 1 return 1
@ -717,7 +717,7 @@ def handle_wrapper_rpm(options, session, args):
(build_opts, args) = parser.parse_args(args) (build_opts, args) = parser.parse_args(args)
if build_opts.inis: if build_opts.inis:
if len(args)!= 1: if len(args) != 1:
parser.error(_("Exactly one argument (a build target) is required")) parser.error(_("Exactly one argument (a build target) is required"))
else: else:
if len(args) < 3: if len(args) < 3:
@ -1153,14 +1153,14 @@ def handle_import(goptions, session, args):
activate_session(session, goptions) activate_session(session, goptions)
to_import = {} to_import = {}
for path in args: for path in args:
data = koji.get_header_fields(path, ('name','version','release','epoch', data = koji.get_header_fields(path, ('name', 'version', 'release', 'epoch',
'arch','sigmd5','sourcepackage','sourcerpm')) 'arch', 'sigmd5', 'sourcepackage', 'sourcerpm'))
if data['sourcepackage']: if data['sourcepackage']:
data['arch'] = 'src' data['arch'] = 'src'
nvr = "%(name)s-%(version)s-%(release)s" % data nvr = "%(name)s-%(version)s-%(release)s" % data
else: else:
nvr = "%(name)s-%(version)s-%(release)s" % koji.parse_NVRA(data['sourcerpm']) nvr = "%(name)s-%(version)s-%(release)s" % koji.parse_NVRA(data['sourcerpm'])
to_import.setdefault(nvr,[]).append((path,data)) to_import.setdefault(nvr, []).append((path, data))
builds_missing = False builds_missing = False
nvrs = to_list(to_import.keys()) nvrs = to_list(to_import.keys())
nvrs.sort() nvrs.sort()
@ -1181,7 +1181,7 @@ def handle_import(goptions, session, args):
# local function to help us out below # local function to help us out below
def do_import(path, data): def do_import(path, data):
rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')]) rinfo = dict([(k, data[k]) for k in ('name', 'version', 'release', 'arch')])
prev = session.getRPM(rinfo) prev = session.getRPM(rinfo)
if prev and not prev.get('external_repo_id', 0): if prev and not prev.get('external_repo_id', 0):
if prev['payloadhash'] == koji.hex_string(data['sigmd5']): if prev['payloadhash'] == koji.hex_string(data['sigmd5']):
@ -1247,7 +1247,7 @@ def handle_import(goptions, session, args):
if need_build: if need_build:
# if we're doing this here, we weren't given the matching srpm # if we're doing this here, we weren't given the matching srpm
if not options.create_build: # pragma: no cover if not options.create_build: # pragma: no cover
if binfo: if binfo:
# should have caught this earlier, but just in case... # should have caught this earlier, but just in case...
b_state = koji.BUILD_STATES[binfo['state']] b_state = koji.BUILD_STATES[binfo['state']]
@ -1362,11 +1362,11 @@ def _import_comps(session, filename, tag, options):
comps.fromxml_f(filename) comps.fromxml_f(filename)
force = options.force force = options.force
ptypes = { ptypes = {
libcomps.PACKAGE_TYPE_DEFAULT : 'default', libcomps.PACKAGE_TYPE_DEFAULT: 'default',
libcomps.PACKAGE_TYPE_OPTIONAL : 'optional', libcomps.PACKAGE_TYPE_OPTIONAL: 'optional',
libcomps.PACKAGE_TYPE_CONDITIONAL : 'conditional', libcomps.PACKAGE_TYPE_CONDITIONAL: 'conditional',
libcomps.PACKAGE_TYPE_MANDATORY : 'mandatory', libcomps.PACKAGE_TYPE_MANDATORY: 'mandatory',
libcomps.PACKAGE_TYPE_UNKNOWN : 'unknown', libcomps.PACKAGE_TYPE_UNKNOWN: 'unknown',
} }
for group in comps.groups: for group in comps.groups:
print("Group: %s (%s)" % (group.id, group.name)) print("Group: %s (%s)" % (group.id, group.name))
@ -1378,8 +1378,8 @@ def _import_comps(session, filename, tag, options):
langonly=group.lang_only, langonly=group.lang_only,
biarchonly=bool(group.biarchonly)) biarchonly=bool(group.biarchonly))
for pkg in group.packages: for pkg in group.packages:
pkgopts = {'type' : ptypes[pkg.type], pkgopts = {'type': ptypes[pkg.type],
'basearchonly' : bool(pkg.basearchonly), 'basearchonly': bool(pkg.basearchonly),
} }
if pkg.type == libcomps.PACKAGE_TYPE_CONDITIONAL: if pkg.type == libcomps.PACKAGE_TYPE_CONDITIONAL:
pkgopts['requires'] = pkg.requires pkgopts['requires'] = pkg.requires
@ -1393,7 +1393,7 @@ def _import_comps(session, filename, tag, options):
# libcomps does not support metapkgs # libcomps does not support metapkgs
def _import_comps_alt(session, filename, tag, options): # no cover 3.x def _import_comps_alt(session, filename, tag, options): # no cover 3.x
"""Import comps data using yum.comps module""" """Import comps data using yum.comps module"""
print('WARN: yum.comps does not support the biarchonly of group and basearchonly of package') print('WARN: yum.comps does not support the biarchonly of group and basearchonly of package')
comps = yumcomps.Comps() comps = yumcomps.Comps()
@ -1413,7 +1413,7 @@ def _import_comps_alt(session, filename, tag, options): # no cover 3.x
('conditional', group.conditional_packages)]: ('conditional', group.conditional_packages)]:
for pkg in pdata: for pkg in pdata:
# yum.comps does not support basearchonly # yum.comps does not support basearchonly
pkgopts = {'type' : ptype} pkgopts = {'type': ptype}
if ptype == 'conditional': if ptype == 'conditional':
pkgopts['requires'] = pdata[pkg] pkgopts['requires'] = pdata[pkg]
for k in pkgopts.keys(): for k in pkgopts.keys():
@ -1444,7 +1444,7 @@ def handle_import_sig(goptions, session, args):
parser.error(_("No such file: %s") % path) parser.error(_("No such file: %s") % path)
activate_session(session, goptions) activate_session(session, goptions)
for path in args: for path in args:
data = koji.get_header_fields(path, ('name','version','release','arch','siggpg','sigpgp','sourcepackage')) data = koji.get_header_fields(path, ('name', 'version', 'release', 'arch', 'siggpg', 'sigpgp', 'sourcepackage'))
if data['sourcepackage']: if data['sourcepackage']:
data['arch'] = 'src' data['arch'] = 'src'
sigkey = data['siggpg'] sigkey = data['siggpg']
@ -1524,7 +1524,7 @@ def handle_write_signed_rpm(goptions, session, args):
rpms.extend(session.listRPMs(buildID=build['id'])) rpms.extend(session.listRPMs(buildID=build['id']))
for i, rpminfo in enumerate(rpms): for i, rpminfo in enumerate(rpms):
nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rpminfo nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rpminfo
print("[%d/%d] %s" % (i+1, len(rpms), nvra)) print("[%d/%d] %s" % (i + 1, len(rpms), nvra))
session.writeSignedRPM(rpminfo['id'], key) session.writeSignedRPM(rpminfo['id'], key)
@ -1573,7 +1573,7 @@ def handle_prune_signed_copies(options, session, args):
if options.verbose: if options.verbose:
print("Getting builds...") print("Getting builds...")
qopts = { qopts = {
'state' : koji.BUILD_STATES['COMPLETE'], 'state': koji.BUILD_STATES['COMPLETE'],
'queryOpts': { 'queryOpts': {
'limit': 50000, 'limit': 50000,
'offset': 0, 'offset': 0,
@ -1665,7 +1665,7 @@ def handle_prune_signed_copies(options, session, args):
timeline.sort(key=lambda entry: entry[:2]) timeline.sort(key=lambda entry: entry[:2])
# find most recent creation entry for our build and crop there # find most recent creation entry for our build and crop there
latest_ts = None latest_ts = None
for i in range(len(timeline)-1, -1, -1): for i in range(len(timeline) - 1, -1, -1):
# searching in reverse cronological order # searching in reverse cronological order
event_id, is_create, entry = timeline[i] event_id, is_create, entry = timeline[i]
if entry['build_id'] == binfo['id'] and is_create: if entry['build_id'] == binfo['id'] and is_create:
@ -1678,7 +1678,7 @@ def handle_prune_signed_copies(options, session, args):
if options.debug: if options.debug:
print(_histline(event_id, our_entry)) print(_histline(event_id, our_entry))
# now go through the events since most recent creation entry # now go through the events since most recent creation entry
timeline = timeline[i+1:] timeline = timeline[i + 1:]
if not timeline: if not timeline:
is_latest = True is_latest = True
if options.debug: if options.debug:
@ -1831,7 +1831,7 @@ def handle_prune_signed_copies(options, session, args):
print("Error removing %s: %s" % (signedpath, e)) print("Error removing %s: %s" % (signedpath, e))
print("This script needs write access to %s" % koji.BASEDIR) print("This script needs write access to %s" % koji.BASEDIR)
continue continue
mycount +=1 mycount += 1
build_files += 1 build_files += 1
build_space += st.st_size build_space += st.st_size
# XXX - this makes some layout assumptions, but # XXX - this makes some layout assumptions, but
@ -1894,7 +1894,7 @@ def handle_set_build_volume(goptions, session, args):
if not binfo: if not binfo:
print("No such build: %s" % nvr) print("No such build: %s" % nvr)
elif binfo['volume_id'] == volinfo['id']: elif binfo['volume_id'] == volinfo['id']:
print("Build %s already on volume %s" %(nvr, volinfo['name'])) print("Build %s already on volume %s" % (nvr, volinfo['name']))
else: else:
builds.append(binfo) builds.append(binfo)
if not builds: if not builds:
@ -2352,13 +2352,13 @@ def anon_handle_latest_build(goptions, session, args):
if not options.quiet: if not options.quiet:
if options.type == 'maven': if options.type == 'maven':
print("%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by")) print("%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))
print("%s %s %s %s %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16)) print("%s %s %s %s %s" % ("-" * 40, "-" * 20, "-" * 20, "-" * 20, "-" * 16))
else: else:
print("%-40s %-20s %s" % ("Build","Tag","Built by")) print("%-40s %-20s %s" % ("Build", "Tag", "Built by"))
print("%s %s %s" % ("-"*40, "-"*20, "-"*16)) print("%s %s %s" % ("-" * 40, "-" * 20, "-" * 16))
options.quiet = True options.quiet = True
output = [ fmt % x for x in data] output = [fmt % x for x in data]
output.sort() output.sort()
for line in output: for line in output:
print(line) print(line)
@ -2452,7 +2452,7 @@ def anon_handle_list_tagged(goptions, session, args):
rpms, builds = session.listTaggedRPMS(tag, **opts) rpms, builds = session.listTaggedRPMS(tag, **opts)
data = rpms data = rpms
if options.paths: if options.paths:
build_idx = dict([(b['id'],b) for b in builds]) build_idx = dict([(b['id'], b) for b in builds])
for rinfo in data: for rinfo in data:
build = build_idx[rinfo['build_id']] build = build_idx[rinfo['build_id']]
builddir = pathinfo.build(build) builddir = pathinfo.build(build)
@ -2488,12 +2488,12 @@ def anon_handle_list_tagged(goptions, session, args):
if not options.quiet: if not options.quiet:
if options.type == 'maven': if options.type == 'maven':
print("%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by")) print("%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))
print("%s %s %s %s %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16)) print("%s %s %s %s %s" % ("-" * 40, "-" * 20, "-" * 20, "-" * 20, "-" * 16))
else: else:
print("%-40s %-20s %s" % ("Build","Tag","Built by")) print("%-40s %-20s %s" % ("Build", "Tag", "Built by"))
print("%s %s %s" % ("-"*40, "-"*20, "-"*16)) print("%s %s %s" % ("-" * 40, "-" * 20, "-" * 16))
output = [ fmt % x for x in data] output = [fmt % x for x in data]
output.sort() output.sort()
for line in output: for line in output:
print(line) print(line)
@ -2573,7 +2573,7 @@ def anon_handle_list_untagged(goptions, session, args):
if options.show_references: if options.show_references:
fmt = fmt + " %(refs)s" fmt = fmt + " %(refs)s"
output = [ fmt % x for x in data] output = [fmt % x for x in data]
output.sort() output.sort()
for line in output: for line in output:
print(line) print(line)
@ -2904,21 +2904,21 @@ def anon_handle_list_pkgs(goptions, session, args):
if not options.quiet: if not options.quiet:
if allpkgs: if allpkgs:
print("Package") print("Package")
print('-'*23) print('-' * 23)
else: else:
print("%-23s %-23s %-16s %-15s" % ('Package','Tag','Extra Arches','Owner')) print("%-23s %-23s %-16s %-15s" % ('Package', 'Tag', 'Extra Arches', 'Owner'))
print("%s %s %s %s" % ('-'*23,'-'*23,'-'*16,'-'*15)) print("%s %s %s %s" % ('-' * 23, '-' * 23, '-' * 16, '-' * 15))
for pkg in data: for pkg in data:
if allpkgs: if allpkgs:
print(pkg['package_name']) print(pkg['package_name'])
else: else:
if not options.show_blocked and pkg.get('blocked',False): if not options.show_blocked and pkg.get('blocked', False):
continue continue
if 'tag_id' in pkg: if 'tag_id' in pkg:
if pkg['extra_arches'] is None: if pkg['extra_arches'] is None:
pkg['extra_arches'] = "" pkg['extra_arches'] = ""
fmt = "%(package_name)-23s %(tag_name)-23s %(extra_arches)-16s %(owner_name)-15s" fmt = "%(package_name)-23s %(tag_name)-23s %(extra_arches)-16s %(owner_name)-15s"
if pkg.get('blocked',False): if pkg.get('blocked', False):
fmt += " [BLOCKED]" fmt += " [BLOCKED]"
else: else:
fmt = "%(package_name)s" fmt = "%(package_name)s"
@ -3038,7 +3038,7 @@ def anon_handle_list_builds(goptions, session, args):
fmt = "%(nvr)-55s %(owner_name)-16s %(state)s" fmt = "%(nvr)-55s %(owner_name)-16s %(state)s"
if not options.quiet: if not options.quiet:
print("%-55s %-16s %s" % ("Build", "Built by", "State")) print("%-55s %-16s %s" % ("Build", "Built by", "State"))
print("%s %s %s" % ("-"*55, "-"*16, "-"*16)) print("%s %s %s" % ("-" * 55, "-" * 16, "-" * 16))
for build in data: for build in data:
print(fmt % build) print(fmt % build)
@ -3102,11 +3102,11 @@ def anon_handle_rpminfo(goptions, session, args):
if info.get('extra'): if info.get('extra'):
print("Extra: %(extra)r" % info) print("Extra: %(extra)r" % info)
if options.buildroots: if options.buildroots:
br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order':'buildroot.id'}) br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order': 'buildroot.id'})
print("Used in %i buildroots:" % len(br_list)) print("Used in %i buildroots:" % len(br_list))
if len(br_list): if len(br_list):
print(" %8s %-28s %-8s %-29s" % ('id','build tag','arch','build host')) print(" %8s %-28s %-8s %-29s" % ('id', 'build tag', 'arch', 'build host'))
print(" %s %s %s %s" % ('-'*8, '-'*28, '-'*8, '-'*29)) print(" %s %s %s %s" % ('-' * 8, '-' * 28, '-' * 8, '-' * 29))
for br_info in br_list: for br_info in br_list:
print(" %(id)8i %(tag_name)-28s %(arch)-8s %(host_name)-29s" % br_info) print(" %(id)8i %(tag_name)-28s %(arch)-8s %(host_name)-29s" % br_info)
@ -3226,14 +3226,14 @@ def anon_handle_hostinfo(goptions, session, args):
description = info['description'].splitlines() description = info['description'].splitlines()
print("Description: %s" % description[0]) print("Description: %s" % description[0])
for line in description[1:]: for line in description[1:]:
print("%s%s" % (" "*13, line)) print("%s%s" % (" " * 13, line))
else: else:
print("Description:") print("Description:")
if info['comment']: if info['comment']:
comment = info['comment'].splitlines() comment = info['comment'].splitlines()
print("Comment: %s" % comment[0]) print("Comment: %s" % comment[0])
for line in comment[1:]: for line in comment[1:]:
print("%s%s" % (" "*9, line)) print("%s%s" % (" " * 9, line))
else: else:
print("Comment:") print("Comment:")
print("Enabled: %s" % (info['enabled'] and 'yes' or 'no')) print("Enabled: %s" % (info['enabled'] and 'yes' or 'no'))
@ -3246,9 +3246,9 @@ def anon_handle_hostinfo(goptions, session, args):
print("Last Update: %s" % update) print("Last Update: %s" % update)
print("Channels: %s" % ' '.join([c['name'] for c in session.listChannels(hostID=info['id'])])) print("Channels: %s" % ' '.join([c['name'] for c in session.listChannels(hostID=info['id'])]))
print("Active Buildroots:") print("Active Buildroots:")
states = {0:"INIT", 1:"WAITING", 2:"BUILDING"} states = {0: "INIT", 1: "WAITING", 2: "BUILDING"}
rows = [('NAME', 'STATE', 'CREATION TIME')] rows = [('NAME', 'STATE', 'CREATION TIME')]
for s in range(0,3): for s in range(0, 3):
for b in session.listBuildroots(hostID=info['id'], state=s): for b in session.listBuildroots(hostID=info['id'], state=s):
rows.append((("%s-%s-%s" % (b['tag_name'], b['id'], b['repo_id'])), states[s], rows.append((("%s-%s-%s" % (b['tag_name'], b['id'], b['repo_id'])), states[s],
b['create_event_time'][:b['create_event_time'].find('.')])) b['create_event_time'][:b['create_event_time'].find('.')]))
@ -3327,9 +3327,9 @@ def handle_clone_tag(goptions, session, args):
"Please use --force if this is what you really want to do.")) "Please use --force if this is what you really want to do."))
# init debug lists. # init debug lists.
chgpkglist=[] chgpkglist = []
chgbldlist=[] chgbldlist = []
chggrplist=[] chggrplist = []
# case of brand new dst-tag. # case of brand new dst-tag.
if not dsttag: if not dsttag:
if not options.config: if not options.config:
@ -3500,7 +3500,7 @@ def handle_clone_tag(goptions, session, args):
bdellist.extend(dblds) bdellist.extend(dblds)
baddlist.sort(key=lambda x: x['package_name']) baddlist.sort(key=lambda x: x['package_name'])
bdellist.sort(key=lambda x: x['package_name']) bdellist.sort(key=lambda x: x['package_name'])
gaddlist = [] # list containing new groups to be added from src tag gaddlist = [] # list containing new groups to be added from src tag
for (grpname, group) in six.iteritems(srcgroups): for (grpname, group) in six.iteritems(srcgroups):
if grpname not in dstgroups: if grpname not in dstgroups:
gaddlist.append(group) gaddlist.append(group)
@ -3517,8 +3517,8 @@ def handle_clone_tag(goptions, session, args):
grpchanges[grpname]['inherited'] = False grpchanges[grpname]['inherited'] = False
if dstgroup['tag_id'] != dsttag['id']: if dstgroup['tag_id'] != dsttag['id']:
grpchanges[grpname]['inherited'] = True grpchanges[grpname]['inherited'] = True
srcgrppkglist=[] srcgrppkglist = []
dstgrppkglist=[] dstgrppkglist = []
for pkg in group['packagelist']: for pkg in group['packagelist']:
srcgrppkglist.append(pkg['package']) srcgrppkglist.append(pkg['package'])
for pkg in dstgroups[grpname]['packagelist']: for pkg in dstgroups[grpname]['packagelist']:
@ -3717,22 +3717,22 @@ def handle_clone_tag(goptions, session, args):
session.multiCall(batch=options.batch) session.multiCall(batch=options.batch)
# print final list of actions. # print final list of actions.
if options.verbose: if options.verbose:
pfmt=' %-7s %-28s %-10s %-10s %-10s\n' pfmt = ' %-7s %-28s %-10s %-10s %-10s\n'
bfmt=' %-7s %-28s %-40s %-10s %-10s %-10s\n' bfmt = ' %-7s %-28s %-40s %-10s %-10s %-10s\n'
gfmt=' %-7s %-28s %-28s\n' gfmt = ' %-7s %-28s %-28s\n'
sys.stdout.write('\nList of changes:\n\n') sys.stdout.write('\nList of changes:\n\n')
sys.stdout.write(pfmt % ('Action', 'Package', 'Blocked', 'Owner', 'From Tag')) sys.stdout.write(pfmt % ('Action', 'Package', 'Blocked', 'Owner', 'From Tag'))
sys.stdout.write(pfmt % ('-'*7, '-'*28, '-'*10, '-'*10, '-'*10)) sys.stdout.write(pfmt % ('-' * 7, '-' * 28, '-' * 10, '-' * 10, '-' * 10))
for changes in chgpkglist: for changes in chgpkglist:
sys.stdout.write(pfmt % changes) sys.stdout.write(pfmt % changes)
sys.stdout.write('\n') sys.stdout.write('\n')
sys.stdout.write(bfmt % ('Action', 'From/To Package', 'Build(s)', 'State', 'Owner', 'From Tag')) sys.stdout.write(bfmt % ('Action', 'From/To Package', 'Build(s)', 'State', 'Owner', 'From Tag'))
sys.stdout.write(bfmt % ('-'*7, '-'*28, '-'*40, '-'*10, '-'*10, '-'*10)) sys.stdout.write(bfmt % ('-' * 7, '-' * 28, '-' * 40, '-' * 10, '-' * 10, '-' * 10))
for changes in chgbldlist: for changes in chgbldlist:
sys.stdout.write(bfmt % changes) sys.stdout.write(bfmt % changes)
sys.stdout.write('\n') sys.stdout.write('\n')
sys.stdout.write(gfmt % ('Action', 'Package', 'Group')) sys.stdout.write(gfmt % ('Action', 'Package', 'Group'))
sys.stdout.write(gfmt % ('-'*7, '-'*28, '-'*28)) sys.stdout.write(gfmt % ('-' * 7, '-' * 28, '-' * 28))
for changes in chggrplist: for changes in chggrplist:
sys.stdout.write(gfmt % changes) sys.stdout.write(gfmt % changes)
@ -3874,7 +3874,7 @@ def anon_handle_list_targets(goptions, session, args):
fmt = "%(name)-30s %(build_tag_name)-30s %(dest_tag_name)-30s" fmt = "%(name)-30s %(build_tag_name)-30s %(dest_tag_name)-30s"
if not options.quiet: if not options.quiet:
print("%-30s %-30s %-30s" % ('Name','Buildroot','Destination')) print("%-30s %-30s %-30s" % ('Name', 'Buildroot', 'Destination'))
print("-" * 93) print("-" * 93)
tmp_list = [(x['name'], x) for x in session.getBuildTargets(options.name)] tmp_list = [(x['name'], x) for x in session.getBuildTargets(options.name)]
tmp_list.sort() tmp_list.sort()
@ -4005,7 +4005,7 @@ def anon_handle_list_tags(goptions, session, args):
if not buildinfo: if not buildinfo:
parser.error(_("Invalid build %s" % options.build)) parser.error(_("Invalid build %s" % options.build))
tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None)) tags = session.listTags(buildinfo.get('id', None), pkginfo.get('id', None))
tags.sort(key=lambda x: x['name']) tags.sort(key=lambda x: x['name'])
# if options.verbose: # if options.verbose:
# fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s" # fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s"
@ -4107,7 +4107,7 @@ def _print_histline(entry, **kwargs):
del x['.related'] del x['.related']
bad_edit = None bad_edit = None
if len(edit) != 1: if len(edit) != 1:
bad_edit = "%i elements" % (len(edit)+1) bad_edit = "%i elements" % (len(edit) + 1)
other = edit[0] other = edit[0]
# check edit for sanity # check edit for sanity
if create or not other[2]: if create or not other[2]:
@ -4256,7 +4256,7 @@ def _print_histline(entry, **kwargs):
else: else:
fmt = "%s entry revoked" % table fmt = "%s entry revoked" % table
time_str = time.asctime(time.localtime(ts)) time_str = time.asctime(time.localtime(ts))
parts = [time_str, fmt % x] parts = [time_str, fmt % x]
if options.events or options.verbose: if options.events or options.verbose:
parts.insert(1, "(eid %i)" % event_id) parts.insert(1, "(eid %i)" % event_id)
if who: if who:
@ -4312,23 +4312,23 @@ def _print_histline(entry, **kwargs):
print(" %s: %s" % (dkey, x[key])) print(" %s: %s" % (dkey, x[key]))
_table_keys = { _table_keys = {
'user_perms' : ['user_id', 'perm_id'], 'user_perms': ['user_id', 'perm_id'],
'user_groups' : ['user_id', 'group_id'], 'user_groups': ['user_id', 'group_id'],
'cg_users' : ['user_id', 'cg_id'], 'cg_users': ['user_id', 'cg_id'],
'tag_inheritance' : ['tag_id', 'parent_id'], 'tag_inheritance': ['tag_id', 'parent_id'],
'tag_config' : ['tag_id'], 'tag_config': ['tag_id'],
'tag_extra' : ['tag_id', 'key'], 'tag_extra': ['tag_id', 'key'],
'build_target_config' : ['build_target_id'], 'build_target_config': ['build_target_id'],
'external_repo_config' : ['external_repo_id'], 'external_repo_config': ['external_repo_id'],
'host_config': ['host_id'], 'host_config': ['host_id'],
'host_channels': ['host_id', 'channel_id'], 'host_channels': ['host_id', 'channel_id'],
'tag_external_repos' : ['tag_id', 'external_repo_id'], 'tag_external_repos': ['tag_id', 'external_repo_id'],
'tag_listing' : ['build_id', 'tag_id'], 'tag_listing': ['build_id', 'tag_id'],
'tag_packages' : ['package_id', 'tag_id'], 'tag_packages': ['package_id', 'tag_id'],
'tag_package_owners' : ['package_id', 'tag_id'], 'tag_package_owners': ['package_id', 'tag_id'],
'group_config' : ['group_id', 'tag_id'], 'group_config': ['group_id', 'tag_id'],
'group_req_listing' : ['group_id', 'tag_id', 'req_id'], 'group_req_listing': ['group_id', 'tag_id', 'req_id'],
'group_package_listing' : ['group_id', 'tag_id', 'package'], 'group_package_listing': ['group_id', 'tag_id', 'package'],
} }
@ -4640,7 +4640,7 @@ def _printTaskInfo(session, task_id, topdir, level=0, recurse=True, verbose=True
and its children.""" and its children."""
BUILDDIR = '/var/lib/mock' BUILDDIR = '/var/lib/mock'
indent = " "*2*level indent = " " * 2 * level
info = session.getTaskInfo(task_id) info = session.getTaskInfo(task_id)
@ -4762,8 +4762,8 @@ def anon_handle_taginfo(goptions, session, args):
for n, info in enumerate(tags): for n, info in enumerate(tags):
if n > 0: if n > 0:
print('') print('')
print("Tag: %(name)s [%(id)d]" %info) print("Tag: %(name)s [%(id)d]" % info)
print("Arches: %(arches)s" %info) print("Arches: %(arches)s" % info)
group_list = [x['name'] for x in session.getTagGroups(info['id'], **event_opts)] group_list = [x['name'] for x in session.getTagGroups(info['id'], **event_opts)]
group_list.sort() group_list.sort()
print("Groups: " + ', '.join(group_list)) print("Groups: " + ', '.join(group_list))
@ -4851,7 +4851,7 @@ def handle_add_tag(goptions, session, args):
value = arg_filter(value) value = arg_filter(value)
extra[key] = value extra[key] = value
opts['extra'] = extra opts['extra'] = extra
session.createTag(args[0],**opts) session.createTag(args[0], **opts)
def handle_edit_tag(goptions, session, args): def handle_edit_tag(goptions, session, args):
@ -5119,7 +5119,7 @@ def handle_edit_tag_inheritance(goptions, session, args):
return 1 return 1
new_data = data.copy() new_data = data.copy()
if options.priority is not None and options.priority.isdigit(): if options.priority is not None and options.priority.isdigit():
new_data['priority'] = int(options.priority) new_data['priority'] = int(options.priority)
if options.maxdepth is not None: if options.maxdepth is not None:
if options.maxdepth.isdigit(): if options.maxdepth.isdigit():
@ -5226,7 +5226,7 @@ def anon_handle_show_groups(goptions, session, args):
if options.comps: if options.comps:
print(koji.generate_comps(groups, expand_groups=options.expand)) print(koji.generate_comps(groups, expand_groups=options.expand))
elif options.spec: elif options.spec:
print(koji.make_groups_spec(groups,name='buildgroups',buildgroup='build')) print(koji.make_groups_spec(groups, name='buildgroups', buildgroup='build'))
else: else:
pprint.pprint(groups) pprint.pprint(groups)
@ -5235,8 +5235,8 @@ def anon_handle_list_external_repos(goptions, session, args):
"[info] List external repos" "[info] List external repos"
usage = _("usage: %prog list-external-repos [options]") usage = _("usage: %prog list-external-repos [options]")
parser = OptionParser(usage=get_usage_str(usage)) parser = OptionParser(usage=get_usage_str(usage))
parser.add_option("--url", help=_("Select by url")) parser.add_option("--url", help=_("Select by url"))
parser.add_option("--name", help=_("Select by name")) parser.add_option("--name", help=_("Select by name"))
parser.add_option("--id", type="int", help=_("Select by id")) parser.add_option("--id", type="int", help=_("Select by id"))
parser.add_option("--tag", help=_("Select by tag")) parser.add_option("--tag", help=_("Select by tag"))
parser.add_option("--used", action='store_true', help=_("List which tags use the repo(s)")) parser.add_option("--used", action='store_true', help=_("List which tags use the repo(s)"))
@ -5278,7 +5278,7 @@ def anon_handle_list_external_repos(goptions, session, args):
format = "basic" format = "basic"
opts['info'] = options.id or options.name or None opts['info'] = options.id or options.name or None
opts['url'] = options.url or None opts['url'] = options.url or None
data = session.listExternalRepos (**opts) data = session.listExternalRepos(**opts)
# There are three different output formats # There are three different output formats
# 1) Listing just repo data (name, url) # 1) Listing just repo data (name, url)
@ -5287,15 +5287,15 @@ def anon_handle_list_external_repos(goptions, session, args):
if format == "basic": if format == "basic":
format = "%(name)-25s %(url)s" format = "%(name)-25s %(url)s"
header1 = "%-25s %s" % ("External repo name", "URL") header1 = "%-25s %s" % ("External repo name", "URL")
header2 = "%s %s" % ("-"*25, "-"*40) header2 = "%s %s" % ("-" * 25, "-" * 40)
elif format == "tag": elif format == "tag":
format = "%(priority)-3i %(external_repo_name)-25s %(merge_mode)-10s %(url)s" format = "%(priority)-3i %(external_repo_name)-25s %(merge_mode)-10s %(url)s"
header1 = "%-3s %-25s %-10s URL" % ("Pri", "External repo name", "Mode") header1 = "%-3s %-25s %-10s URL" % ("Pri", "External repo name", "Mode")
header2 = "%s %s %s %s" % ("-"*3, "-"*25, "-"*10, "-"*40) header2 = "%s %s %s %s" % ("-" * 3, "-" * 25, "-" * 10, "-" * 40)
elif format == "multitag": elif format == "multitag":
format = "%(tag_name)-20s %(priority)-3i %(merge_mode)-10s %(external_repo_name)s" format = "%(tag_name)-20s %(priority)-3i %(merge_mode)-10s %(external_repo_name)s"
header1 = "%-20s %-3s %-10s %s" % ("Tag", "Pri", "Mode", "External repo name") header1 = "%-20s %-3s %-10s %s" % ("Tag", "Pri", "Mode", "External repo name")
header2 = "%s %s %s %s" % ("-"*20, "-"*3, "-"*10, "-"*25) header2 = "%s %s %s %s" % ("-" * 20, "-" * 3, "-" * 10, "-" * 25)
if not options.quiet: if not options.quiet:
print(header1) print(header1)
print(header2) print(header2)
@ -5379,8 +5379,8 @@ def handle_edit_external_repo(goptions, session, args):
"[admin] Edit data for an external repo" "[admin] Edit data for an external repo"
usage = _("usage: %prog edit-external-repo <name>") usage = _("usage: %prog edit-external-repo <name>")
parser = OptionParser(usage=get_usage_str(usage)) parser = OptionParser(usage=get_usage_str(usage))
parser.add_option("--url", help=_("Change the url")) parser.add_option("--url", help=_("Change the url"))
parser.add_option("--name", help=_("Change the name")) parser.add_option("--name", help=_("Change the name"))
(options, args) = parser.parse_args(args) (options, args) = parser.parse_args(args)
if len(args) != 1: if len(args) != 1:
parser.error(_("Incorrect number of arguments")) parser.error(_("Incorrect number of arguments"))
@ -5654,17 +5654,17 @@ def _build_image_indirection(options, task_opts, session, args):
bool(task_opts.base_image_build)): bool(task_opts.base_image_build)):
raise koji.GenericError(_("You must specify either a base-image task or build ID/NVR")) raise koji.GenericError(_("You must specify either a base-image task or build ID/NVR"))
required_opts = [ 'name', 'version', 'arch', 'target', 'indirection_template', 'results_loc' ] required_opts = ['name', 'version', 'arch', 'target', 'indirection_template', 'results_loc']
optional_opts = [ 'indirection_template_url', 'scratch', 'utility_image_task', 'utility_image_build', optional_opts = ['indirection_template_url', 'scratch', 'utility_image_task', 'utility_image_build',
'base_image_task', 'base_image_build', 'release', 'skip_tag' ] 'base_image_task', 'base_image_build', 'release', 'skip_tag']
missing = [ ] missing = []
for opt in required_opts: for opt in required_opts:
if not getattr(task_opts, opt, None): if not getattr(task_opts, opt, None):
missing.append(opt) missing.append(opt)
if len(missing) > 0: if len(missing) > 0:
print("Missing the following required options: %s" % ' '.join(['--%s' % o.replace('_','-') for o in missing])) print("Missing the following required options: %s" % ' '.join(['--%s' % o.replace('_', '-') for o in missing]))
raise koji.GenericError(_("Missing required options specified above")) raise koji.GenericError(_("Missing required options specified above"))
activate_session(session, options) activate_session(session, options)
@ -5708,7 +5708,7 @@ def _build_image_indirection(options, task_opts, session, args):
os.path.basename(templatefile)) os.path.basename(templatefile))
print('') print('')
hub_opts = { } hub_opts = {}
# Just pass everything in as opts. No posiitonal arguments at all. Why not? # Just pass everything in as opts. No posiitonal arguments at all. Why not?
for opt in required_opts + optional_opts: for opt in required_opts + optional_opts:
val = getattr(task_opts, opt, None) val = getattr(task_opts, opt, None)
@ -5829,10 +5829,10 @@ def handle_image_build(options, session, args):
# as do factory-parameters # as do factory-parameters
section = 'factory-parameters' section = 'factory-parameters'
if config.has_section(section): if config.has_section(section):
task_options.factory_parameter = [ ] task_options.factory_parameter = []
for k, v in config.items(section): for k, v in config.items(section):
# We do this, rather than a dict, to match what argparse spits out # We do this, rather than a dict, to match what argparse spits out
task_options.factory_parameter.append( (k, v) ) task_options.factory_parameter.append((k, v))
else: else:
if len(args) < 5: if len(args) < 5:
@ -6198,7 +6198,7 @@ def handle_set_pkg_arches(goptions, session, args):
tag = args[1] tag = args[1]
with session.multicall(strict=True) as m: with session.multicall(strict=True) as m:
for package in args[2:]: for package in args[2:]:
m.packageListSetArches(tag,package,arches,force=options.force) m.packageListSetArches(tag, package, arches, force=options.force)
def handle_set_pkg_owner(goptions, session, args): def handle_set_pkg_owner(goptions, session, args):
@ -6214,7 +6214,7 @@ def handle_set_pkg_owner(goptions, session, args):
tag = args[1] tag = args[1]
with session.multicall(strict=True) as m: with session.multicall(strict=True) as m:
for package in args[2:]: for package in args[2:]:
m.packageListSetOwner(tag,package,owner,force=options.force) m.packageListSetOwner(tag, package, owner, force=options.force)
def handle_set_pkg_owner_global(goptions, session, args): def handle_set_pkg_owner_global(goptions, session, args):
@ -6237,7 +6237,7 @@ def handle_set_pkg_owner_global(goptions, session, args):
if not user: if not user:
print("No such user: %s" % owner) print("No such user: %s" % owner)
return 1 return 1
opts = {'with_dups' : True} opts = {'with_dups': True}
old_user = None old_user = None
if options.old_user: if options.old_user:
old_user = session.getUser(options.old_user) old_user = session.getUser(options.old_user)
@ -6262,7 +6262,7 @@ def handle_set_pkg_owner_global(goptions, session, args):
if user['id'] == entry['owner_id']: if user['id'] == entry['owner_id']:
if options.verbose: if options.verbose:
print("Preserving owner=%s for package %s in tag %s" \ print("Preserving owner=%s for package %s in tag %s" \
% (user['name'], package, entry['tag_name'])) % (user['name'], package, entry['tag_name']))
else: else:
if options.test: if options.test:
print("Would have changed owner for %s in tag %s: %s -> %s" \ print("Would have changed owner for %s in tag %s: %s -> %s" \
@ -6527,7 +6527,7 @@ def handle_unblock_pkg(goptions, session, args):
tag = args[0] tag = args[0]
with session.multicall(strict=True) as m: with session.multicall(strict=True) as m:
for package in args[1:]: for package in args[1:]:
m.packageListUnblock(tag,package) m.packageListUnblock(tag, package)
def anon_handle_download_build(options, session, args): def anon_handle_download_build(options, session, args):
@ -6729,7 +6729,7 @@ def anon_handle_download_logs(options, session, args):
if task_info is None: if task_info is None:
error(_("No such task id: %i" % task_id)) error(_("No such task id: %i" % task_id))
files = list_task_output_all_volumes(session, task_id) files = list_task_output_all_volumes(session, task_id)
logs = [] # list of tuples (filename, volume) logs = [] # list of tuples (filename, volume)
for filename in files: for filename in files:
if not filename.endswith(".log"): if not filename.endswith(".log"):
continue continue
@ -6741,7 +6741,7 @@ def anon_handle_download_logs(options, session, args):
"%s-%s" % (task_info["arch"], task_id)) "%s-%s" % (task_info["arch"], task_id))
count = 0 count = 0
state = koji.TASK_STATES[task_info['state']] state = koji.TASK_STATES[task_info['state']]
if state == 'FAILED': if state == 'FAILED':
if not match or koji.util.multi_fnmatch(FAIL_LOG, match): if not match or koji.util.multi_fnmatch(FAIL_LOG, match):
write_fail_log(task_log_dir, task_id) write_fail_log(task_log_dir, task_id)
@ -7022,7 +7022,7 @@ def handle_dist_repo(options, session, args):
parser.add_option("--split-debuginfo", action='store_true', default=False, parser.add_option("--split-debuginfo", action='store_true', default=False,
help='Split debuginfo info a separate repo for each arch') help='Split debuginfo info a separate repo for each arch')
parser.add_option('--comps', help='Include a comps file in the repodata') parser.add_option('--comps', help='Include a comps file in the repodata')
parser.add_option('--delta-rpms', metavar='REPO',default=[], parser.add_option('--delta-rpms', metavar='REPO', default=[],
action='append', action='append',
help=_('Create delta rpms. REPO can be the id of another dist repo ' help=_('Create delta rpms. REPO can be the id of another dist repo '
'or the name of a tag that has a dist repo. May be specified ' 'or the name of a tag that has a dist repo. May be specified '
@ -7178,7 +7178,7 @@ def handle_moshimoshi(options, session, args):
u = session.getLoggedInUser() u = session.getLoggedInUser()
if not u: if not u:
print("Not authenticated") print("Not authenticated")
u = {'name' : 'anonymous user'} u = {'name': 'anonymous user'}
print("%s, %s!" % (_printable_unicode(random.choice(greetings)), u["name"])) print("%s, %s!" % (_printable_unicode(random.choice(greetings)), u["name"]))
print("") print("")
print("You are using the hub at %s" % session.baseurl) print("You are using the hub at %s" % session.baseurl)

View file

@ -77,21 +77,21 @@ def arg_filter(arg):
categories = { categories = {
'admin' : 'admin commands', 'admin': 'admin commands',
'build' : 'build commands', 'build': 'build commands',
'search' : 'search commands', 'search': 'search commands',
'download' : 'download commands', 'download': 'download commands',
'monitor' : 'monitor commands', 'monitor': 'monitor commands',
'info' : 'info commands', 'info': 'info commands',
'bind' : 'bind commands', 'bind': 'bind commands',
'misc' : 'miscellaneous commands', 'misc': 'miscellaneous commands',
} }
def get_epilog_str(progname=None): def get_epilog_str(progname=None):
if progname is None: if progname is None:
progname = os.path.basename(sys.argv[0]) or 'koji' progname = os.path.basename(sys.argv[0]) or 'koji'
categories_ordered=', '.join(sorted(['all'] + to_list(categories.keys()))) categories_ordered = ', '.join(sorted(['all'] + to_list(categories.keys())))
epilog_str = ''' epilog_str = '''
Try "%(progname)s --help" for help about global options Try "%(progname)s --help" for help about global options
Try "%(progname)s help" to get all available commands Try "%(progname)s help" to get all available commands
@ -119,29 +119,29 @@ def print_task_headers():
print("ID Pri Owner State Arch Name") print("ID Pri Owner State Arch Name")
def print_task(task,depth=0): def print_task(task, depth=0):
"""Print a task""" """Print a task"""
task = task.copy() task = task.copy()
task['state'] = koji.TASK_STATES.get(task['state'],'BADSTATE') task['state'] = koji.TASK_STATES.get(task['state'], 'BADSTATE')
fmt = "%(id)-8s %(priority)-4s %(owner_name)-20s %(state)-8s %(arch)-10s " fmt = "%(id)-8s %(priority)-4s %(owner_name)-20s %(state)-8s %(arch)-10s "
if depth: if depth:
indent = " "*(depth-1) + " +" indent = " " * (depth - 1) + " +"
else: else:
indent = '' indent = ''
label = koji.taskLabel(task) label = koji.taskLabel(task)
print(''.join([fmt % task, indent, label])) print(''.join([fmt % task, indent, label]))
def print_task_recurse(task,depth=0): def print_task_recurse(task, depth=0):
"""Print a task and its children""" """Print a task and its children"""
print_task(task,depth) print_task(task, depth)
for child in task.get('children',()): for child in task.get('children', ()):
print_task_recurse(child,depth+1) print_task_recurse(child, depth + 1)
class TaskWatcher(object): class TaskWatcher(object):
def __init__(self,task_id,session,level=0,quiet=False): def __init__(self, task_id, session, level=0, quiet=False):
self.id = task_id self.id = task_id
self.session = session self.session = session
self.info = None self.info = None
@ -167,7 +167,7 @@ class TaskWatcher(object):
error = None error = None
try: try:
self.session.getTaskResult(self.id) self.session.getTaskResult(self.id)
except (six.moves.xmlrpc_client.Fault,koji.GenericError) as e: except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:
error = e error = e
if error is None: if error is None:
# print("%s: complete" % self.str()) # print("%s: complete" % self.str())
@ -206,7 +206,7 @@ class TaskWatcher(object):
if self.info is None: if self.info is None:
return False return False
state = koji.TASK_STATES[self.info['state']] state = koji.TASK_STATES[self.info['state']]
return (state in ['CLOSED','CANCELED','FAILED']) return (state in ['CLOSED', 'CANCELED', 'FAILED'])
def is_success(self): def is_success(self):
if self.info is None: if self.info is None:
@ -339,7 +339,7 @@ def watch_logs(session, tasklist, opts, poll_interval):
print("No such task id: %i" % taskId) print("No such task id: %i" % taskId)
sys.exit(1) sys.exit(1)
state = koji.TASK_STATES[info['state']] state = koji.TASK_STATES[info['state']]
return (state in ['CLOSED','CANCELED','FAILED']) return (state in ['CLOSED', 'CANCELED', 'FAILED'])
offsets = {} offsets = {}
for task_id in tasklist: for task_id in tasklist:
@ -422,7 +422,7 @@ def _format_size(size):
return "%0.2f GiB" % (size / 1073741824.0) return "%0.2f GiB" % (size / 1073741824.0)
if (size / 1048576 >= 1): if (size / 1048576 >= 1):
return "%0.2f MiB" % (size / 1048576.0) return "%0.2f MiB" % (size / 1048576.0)
if (size / 1024 >=1): if (size / 1024 >= 1):
return "%0.2f KiB" % (size / 1024.0) return "%0.2f KiB" % (size / 1024.0)
return "%0.2f B" % (size) return "%0.2f B" % (size)
@ -439,7 +439,7 @@ def _progress_callback(uploaded, total, piece, time, total_time):
if total == 0: if total == 0:
percent_done = 0.0 percent_done = 0.0
else: else:
percent_done = float(uploaded)/float(total) percent_done = float(uploaded) / float(total)
percent_done_str = "%02d%%" % (percent_done * 100) percent_done_str = "%02d%%" % (percent_done * 100)
data_done = _format_size(uploaded) data_done = _format_size(uploaded)
elapsed = _format_secs(total_time) elapsed = _format_secs(total_time)
@ -447,12 +447,12 @@ def _progress_callback(uploaded, total, piece, time, total_time):
speed = "- B/sec" speed = "- B/sec"
if (time): if (time):
if (uploaded != total): if (uploaded != total):
speed = _format_size(float(piece)/float(time)) + "/sec" speed = _format_size(float(piece) / float(time)) + "/sec"
else: else:
speed = _format_size(float(total)/float(total_time)) + "/sec" speed = _format_size(float(total) / float(total_time)) + "/sec"
# write formated string and flush # write formated string and flush
sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('='*(int(percent_done*36)), percent_done_str, elapsed, data_done, speed)) sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('=' * (int(percent_done * 36)), percent_done_str, elapsed, data_done, speed))
sys.stdout.flush() sys.stdout.flush()
@ -587,8 +587,8 @@ def _list_tasks(options, session):
"Retrieve a list of tasks" "Retrieve a list of tasks"
callopts = { callopts = {
'state' : [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')], 'state': [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],
'decode' : True, 'decode': True,
} }
if getattr(options, 'mine', False): if getattr(options, 'mine', False):
@ -622,7 +622,7 @@ def _list_tasks(options, session):
sys.exit(1) sys.exit(1)
callopts['host_id'] = host['id'] callopts['host_id'] = host['id']
qopts = {'order' : 'priority,create_time'} qopts = {'order': 'priority,create_time'}
tasklist = session.listTasks(callopts, qopts) tasklist = session.listTasks(callopts, qopts)
tasks = dict([(x['id'], x) for x in tasklist]) tasks = dict([(x['id'], x) for x in tasklist])
@ -631,7 +631,7 @@ def _list_tasks(options, session):
if t['parent'] is not None: if t['parent'] is not None:
parent = tasks.get(t['parent']) parent = tasks.get(t['parent'])
if parent: if parent:
parent.setdefault('children',[]) parent.setdefault('children', [])
parent['children'].append(t) parent['children'].append(t)
t['sub'] = True t['sub'] = True
@ -641,7 +641,7 @@ def _list_tasks(options, session):
def format_inheritance_flags(parent): def format_inheritance_flags(parent):
"""Return a human readable string of inheritance flags""" """Return a human readable string of inheritance flags"""
flags = '' flags = ''
for code,expr in ( for code, expr in (
('M', parent['maxdepth'] is not None), ('M', parent['maxdepth'] is not None),
('F', parent['pkg_filter']), ('F', parent['pkg_filter']),
('I', parent['intransitive']), ('I', parent['intransitive']),

View file

@ -401,7 +401,7 @@ class Task(object):
if strict: if strict:
raise koji.GenericError("Task %d is not top-level (parent=%d)" % (task_id, parent)) raise koji.GenericError("Task %d is not top-level (parent=%d)" % (task_id, parent))
# otherwise, find the top-level task and go from there # otherwise, find the top-level task and go from there
seen = {task_id:1} seen = {task_id: 1}
while parent is not None: while parent is not None:
if parent in seen: if parent in seen:
raise koji.GenericError("Task LOOP at task %i" % task_id) raise koji.GenericError("Task LOOP at task %i" % task_id)
@ -891,7 +891,7 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept
continue continue
else: else:
hist[id] = [] hist[id] = []
hist[id].append(link) #record history hist[id].append(link) # record history
order.append(link) order.append(link)
if link['intransitive'] and reverse: if link['intransitive'] and reverse:
# add link, but don't follow it # add link, but don't follow it
@ -910,20 +910,20 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept
def _pkglist_remove(tag_id, pkg_id): def _pkglist_remove(tag_id, pkg_id):
clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i') clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')
update = UpdateProcessor('tag_packages', values=locals(), clauses=clauses) update = UpdateProcessor('tag_packages', values=locals(), clauses=clauses)
update.make_revoke() #XXX user_id? update.make_revoke() # XXX user_id?
update.execute() update.execute()
def _pkglist_owner_remove(tag_id, pkg_id): def _pkglist_owner_remove(tag_id, pkg_id):
clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i') clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')
update = UpdateProcessor('tag_package_owners', values=locals(), clauses=clauses) update = UpdateProcessor('tag_package_owners', values=locals(), clauses=clauses)
update.make_revoke() #XXX user_id? update.make_revoke() # XXX user_id?
update.execute() update.execute()
def _pkglist_owner_add(tag_id, pkg_id, owner): def _pkglist_owner_add(tag_id, pkg_id, owner):
_pkglist_owner_remove(tag_id, pkg_id) _pkglist_owner_remove(tag_id, pkg_id)
data = {'tag_id': tag_id, 'package_id': pkg_id, 'owner': owner} data = {'tag_id': tag_id, 'package_id': pkg_id, 'owner': owner}
insert = InsertProcessor('tag_package_owners', data=data) insert = InsertProcessor('tag_package_owners', data=data)
insert.make_create() #XXX user_id? insert.make_create() # XXX user_id?
insert.execute() insert.execute()
def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches): def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches):
@ -936,7 +936,7 @@ def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches):
'extra_arches': koji.parse_arches(extra_arches, strict=True, allow_none=True) 'extra_arches': koji.parse_arches(extra_arches, strict=True, allow_none=True)
} }
insert = InsertProcessor('tag_packages', data=data) insert = InsertProcessor('tag_packages', data=data)
insert.make_create() #XXX user_id? insert.make_create() # XXX user_id?
insert.execute() insert.execute()
_pkglist_owner_add(tag_id, pkg_id, owner) _pkglist_owner_add(tag_id, pkg_id, owner)
@ -966,7 +966,7 @@ def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force,
action = 'block' action = 'block'
if policy: if policy:
context.session.assertLogin() context.session.assertLogin()
policy_data = {'tag' : tag_id, 'action' : action, 'package' : pkginfo, 'force' : force} policy_data = {'tag': tag_id, 'action': action, 'package': pkginfo, 'force': force}
# don't check policy for admins using force # don't check policy for admins using force
if not (force and context.session.hasPerm('admin')): if not (force and context.session.hasPerm('admin')):
assert_policy('package_list', policy_data) assert_policy('package_list', policy_data)
@ -1045,7 +1045,7 @@ def _direct_pkglist_remove(taginfo, pkginfo, force=False, policy=False):
pkg = lookup_package(pkginfo, strict=True) pkg = lookup_package(pkginfo, strict=True)
if policy: if policy:
context.session.assertLogin() context.session.assertLogin()
policy_data = {'tag' : tag['id'], 'action' : 'remove', 'package' : pkg['id'], 'force' : force} policy_data = {'tag': tag['id'], 'action': 'remove', 'package': pkg['id'], 'force': force}
# don't check policy for admins using force # don't check policy for admins using force
if not (force and context.session.hasPerm('admin')): if not (force and context.session.hasPerm('admin')):
assert_policy('package_list', policy_data) assert_policy('package_list', policy_data)
@ -1074,7 +1074,7 @@ def pkglist_unblock(taginfo, pkginfo, force=False):
tag = get_tag(taginfo, strict=True) tag = get_tag(taginfo, strict=True)
pkg = lookup_package(pkginfo, strict=True) pkg = lookup_package(pkginfo, strict=True)
context.session.assertLogin() context.session.assertLogin()
policy_data = {'tag' : tag['id'], 'action' : 'unblock', 'package' : pkg['id'], 'force' : force} policy_data = {'tag': tag['id'], 'action': 'unblock', 'package': pkg['id'], 'force': force}
# don't check policy for admins using force # don't check policy for admins using force
if not (force and context.session.hasPerm('admin')): if not (force and context.session.hasPerm('admin')):
assert_policy('package_list', policy_data) assert_policy('package_list', policy_data)
@ -1411,7 +1411,7 @@ def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, late
tables = ['rpminfo'] tables = ['rpminfo']
joins = ['tag_listing ON rpminfo.build_id = tag_listing.build_id'] joins = ['tag_listing ON rpminfo.build_id = tag_listing.build_id']
clauses = [eventCondition(event, 'tag_listing'), 'tag_id=%(tagid)s'] clauses = [eventCondition(event, 'tag_listing'), 'tag_id=%(tagid)s']
data = {} #tagid added later data = {} # tagid added later
if package: if package:
joins.append('build ON rpminfo.build_id = build.id') joins.append('build ON rpminfo.build_id = build.id')
joins.append('package ON package.id = build.pkg_id') joins.append('package ON package.id = build.pkg_id')
@ -1629,8 +1629,8 @@ def _direct_tag_build(tag, build, user, force=False):
table = 'tag_listing' table = 'tag_listing'
clauses = ('tag_id=%(tag_id)i', 'build_id=%(build_id)i') clauses = ('tag_id=%(tag_id)i', 'build_id=%(build_id)i')
query = QueryProcessor(columns=['build_id'], tables=[table], query = QueryProcessor(columns=['build_id'], tables=[table],
clauses=('active = TRUE',)+clauses, clauses=('active = TRUE',) + clauses,
values=locals(), opts={'rowlock':True}) values=locals(), opts={'rowlock': True})
# note: tag_listing is unique on (build_id, tag_id, active) # note: tag_listing is unique on (build_id, tag_id, active)
if query.executeOne(): if query.executeOne():
# already tagged # already tagged
@ -1814,8 +1814,8 @@ def _grplist_unblock(taginfo, grpinfo):
table = 'group_config' table = 'group_config'
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s') clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s')
query = QueryProcessor(columns=['blocked'], tables=[table], query = QueryProcessor(columns=['blocked'], tables=[table],
clauses=('active = TRUE',)+clauses, clauses=('active = TRUE',) + clauses,
values=locals(), opts={'rowlock':True}) values=locals(), opts={'rowlock': True})
blocked = query.singleValue(strict=False) blocked = query.singleValue(strict=False)
if not blocked: if not blocked:
raise koji.GenericError("group %s is NOT blocked in tag %s" % (group['name'], tag['name'])) raise koji.GenericError("group %s is NOT blocked in tag %s" % (group['name'], tag['name']))
@ -1936,8 +1936,8 @@ def _grp_pkg_unblock(taginfo, grpinfo, pkg_name):
grp_id = get_group_id(grpinfo, strict=True) grp_id = get_group_id(grpinfo, strict=True)
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'package = %(pkg_name)s') clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'package = %(pkg_name)s')
query = QueryProcessor(columns=['blocked'], tables=[table], query = QueryProcessor(columns=['blocked'], tables=[table],
clauses=('active = TRUE',)+clauses, clauses=('active = TRUE',) + clauses,
values=locals(), opts={'rowlock':True}) values=locals(), opts={'rowlock': True})
blocked = query.singleValue(strict=False) blocked = query.singleValue(strict=False)
if not blocked: if not blocked:
raise koji.GenericError("package %s is NOT blocked in group %s, tag %s" \ raise koji.GenericError("package %s is NOT blocked in group %s, tag %s" \
@ -2063,8 +2063,8 @@ def _grp_req_unblock(taginfo, grpinfo, reqinfo):
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'req_id = %(req_id)s') clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'req_id = %(req_id)s')
query = QueryProcessor(columns=['blocked'], tables=[table], query = QueryProcessor(columns=['blocked'], tables=[table],
clauses=('active = TRUE',)+clauses, clauses=('active = TRUE',) + clauses,
values=locals(), opts={'rowlock':True}) values=locals(), opts={'rowlock': True})
blocked = query.singleValue(strict=False) blocked = query.singleValue(strict=False)
if not blocked: if not blocked:
raise koji.GenericError("group req %s is NOT blocked in group %s, tag %s" \ raise koji.GenericError("group req %s is NOT blocked in group %s, tag %s" \
@ -2274,12 +2274,12 @@ def remove_channel(channel_name, force=False):
channel_id = get_channel_id(channel_name, strict=True) channel_id = get_channel_id(channel_name, strict=True)
# check for task references # check for task references
query = QueryProcessor(tables=['task'], clauses=['channel_id=%(channel_id)i'], query = QueryProcessor(tables=['task'], clauses=['channel_id=%(channel_id)i'],
values=locals(), columns=['id'], opts={'limit':1}) values=locals(), columns=['id'], opts={'limit': 1})
# XXX slow query # XXX slow query
if query.execute(): if query.execute():
raise koji.GenericError('channel %s has task references' % channel_name) raise koji.GenericError('channel %s has task references' % channel_name)
query = QueryProcessor(tables=['host_channels'], clauses=['channel_id=%(channel_id)i'], query = QueryProcessor(tables=['host_channels'], clauses=['channel_id=%(channel_id)i'],
values=locals(), columns=['host_id'], opts={'limit':1}) values=locals(), columns=['host_id'], opts={'limit': 1})
if query.execute(): if query.execute():
if not force: if not force:
raise koji.GenericError('channel %s has host references' % channel_name) raise koji.GenericError('channel %s has host references' % channel_name)
@ -2344,7 +2344,7 @@ AND channel_id IN %(channels)s)'''
clauses = [clause] clauses = [clause]
else: else:
clauses = ['state IN (%(FREE)i,%(ASSIGNED)i)'] clauses = ['state IN (%(FREE)i,%(ASSIGNED)i)']
queryOpts = {'limit' : 100, 'order' : 'priority,create_time'} queryOpts = {'limit': 100, 'order': 'priority,create_time'}
query = QueryProcessor(columns=fields, tables=['task'], clauses=clauses, query = QueryProcessor(columns=fields, tables=['task'], clauses=clauses,
values=values, opts=queryOpts) values=values, opts=queryOpts)
return query.execute() return query.execute()
@ -2496,7 +2496,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa
blocks = [pkg for pkg in readPackageList(tag_id, event=event_id, inherit=True).values() \ blocks = [pkg for pkg in readPackageList(tag_id, event=event_id, inherit=True).values() \
if pkg['blocked']] if pkg['blocked']]
repodir = koji.pathinfo.repo(repo_id, tinfo['name']) repodir = koji.pathinfo.repo(repo_id, tinfo['name'])
os.makedirs(repodir) #should not already exist os.makedirs(repodir) # should not already exist
# generate comps and groups.spec # generate comps and groups.spec
groupsdir = "%s/groups" % (repodir) groupsdir = "%s/groups" % (repodir)
@ -2710,7 +2710,7 @@ def repo_info(repo_id, strict=False):
('repo.id', 'id'), ('repo.id', 'id'),
('repo.state', 'state'), ('repo.state', 'state'),
('repo.create_event', 'create_event'), ('repo.create_event', 'create_event'),
('events.time', 'creation_time'), #for compatibility with getRepo ('events.time', 'creation_time'), # for compatibility with getRepo
('EXTRACT(EPOCH FROM events.time)', 'create_ts'), ('EXTRACT(EPOCH FROM events.time)', 'create_ts'),
('repo.tag_id', 'tag_id'), ('repo.tag_id', 'tag_id'),
('tag.name', 'tag_name'), ('tag.name', 'tag_name'),
@ -2753,9 +2753,9 @@ def repo_expire_older(tag_id, event_id, dist=None):
If dist is not None, then only expire repos with the given dist value If dist is not None, then only expire repos with the given dist value
""" """
st_ready = koji.REPO_READY st_ready = koji.REPO_READY
clauses=['tag_id = %(tag_id)s', clauses = ['tag_id = %(tag_id)s',
'create_event < %(event_id)s', 'create_event < %(event_id)s',
'state = %(st_ready)s'] 'state = %(st_ready)s']
if dist is not None: if dist is not None:
dist = bool(dist) dist = bool(dist)
clauses.append('dist = %(dist)s') clauses.append('dist = %(dist)s')
@ -3406,7 +3406,7 @@ def _delete_tag(tagInfo):
def _tagDelete(tableName, value, columnName='tag_id'): def _tagDelete(tableName, value, columnName='tag_id'):
update = UpdateProcessor(tableName, clauses=["%s = %%(value)i" % columnName], update = UpdateProcessor(tableName, clauses=["%s = %%(value)i" % columnName],
values={'value':value}) values={'value': value})
update.make_revoke() update.make_revoke()
update.execute() update.execute()
@ -4147,8 +4147,8 @@ def get_rpm(rpminfo, strict=False, multi=False):
clauses.append("""external_repo_id = %(external_repo_id)i""") clauses.append("""external_repo_id = %(external_repo_id)i""")
elif not multi: elif not multi:
# try to match internal first, otherwise first matching external # try to match internal first, otherwise first matching external
retry = True #if no internal match retry = True # if no internal match
orig_clauses = list(clauses) #copy orig_clauses = list(clauses) # copy
clauses.append("""external_repo_id = 0""") clauses.append("""external_repo_id = 0""")
joins = ['external_repo ON rpminfo.external_repo_id = external_repo.id'] joins = ['external_repo ON rpminfo.external_repo_id = external_repo.id']
@ -4339,7 +4339,7 @@ def get_build_type(buildInfo, strict=False):
joins=['build_types ON btype_id=btype.id'], joins=['build_types ON btype_id=btype.id'],
clauses=['build_id = %(id)i'], clauses=['build_id = %(id)i'],
values=binfo, values=binfo,
opts={'asList':True}, opts={'asList': True},
) )
ret = {} ret = {}
@ -5098,7 +5098,7 @@ def query_buildroots(hostID=None, tagID=None, state=None, rpmID=None, archiveID=
('content_generator.name', 'cg_name'), ('content_generator.name', 'cg_name'),
('buildroot.cg_version', 'cg_version'), ('buildroot.cg_version', 'cg_version'),
('buildroot.container_arch', 'container_arch'), ('buildroot.container_arch', 'container_arch'),
('buildroot.container_arch', 'arch'), #alias for back compat ('buildroot.container_arch', 'arch'), # alias for back compat
('buildroot.container_type', 'container_type'), ('buildroot.container_type', 'container_type'),
('buildroot.host_os', 'host_os'), ('buildroot.host_os', 'host_os'),
('buildroot.host_arch', 'host_arch'), ('buildroot.host_arch', 'host_arch'),
@ -5231,7 +5231,7 @@ def remove_volume(volume):
context.session.assertPerm('admin') context.session.assertPerm('admin')
volinfo = lookup_name('volume', volume, strict=True) volinfo = lookup_name('volume', volume, strict=True)
query = QueryProcessor(tables=['build'], clauses=['volume_id=%(id)i'], query = QueryProcessor(tables=['build'], clauses=['volume_id=%(id)i'],
values=volinfo, columns=['id'], opts={'limit':1}) values=volinfo, columns=['id'], opts={'limit': 1})
if query.execute(): if query.execute():
raise koji.GenericError('volume %(name)s has build references' % volinfo) raise koji.GenericError('volume %(name)s has build references' % volinfo)
delete = """DELETE FROM volume WHERE id=%(id)i""" delete = """DELETE FROM volume WHERE id=%(id)i"""
@ -5510,19 +5510,19 @@ def recycle_build(old, data):
# check for evidence of tag activity # check for evidence of tag activity
query = QueryProcessor(columns=['tag_id'], tables=['tag_listing'], query = QueryProcessor(columns=['tag_id'], tables=['tag_listing'],
clauses = ['build_id = %(id)s'], values=old) clauses=['build_id = %(id)s'], values=old)
if query.execute(): if query.execute():
raise koji.GenericError("Build already exists. Unable to recycle, " raise koji.GenericError("Build already exists. Unable to recycle, "
"has tag history") "has tag history")
# check for rpms or archives # check for rpms or archives
query = QueryProcessor(columns=['id'], tables=['rpminfo'], query = QueryProcessor(columns=['id'], tables=['rpminfo'],
clauses = ['build_id = %(id)s'], values=old) clauses=['build_id = %(id)s'], values=old)
if query.execute(): if query.execute():
raise koji.GenericError("Build already exists. Unable to recycle, " raise koji.GenericError("Build already exists. Unable to recycle, "
"has rpm data") "has rpm data")
query = QueryProcessor(columns=['id'], tables=['archiveinfo'], query = QueryProcessor(columns=['id'], tables=['archiveinfo'],
clauses = ['build_id = %(id)s'], values=old) clauses=['build_id = %(id)s'], values=old)
if query.execute(): if query.execute():
raise koji.GenericError("Build already exists. Unable to recycle, " raise koji.GenericError("Build already exists. Unable to recycle, "
"has archive data") "has archive data")
@ -6760,7 +6760,7 @@ def get_archive_type(filename=None, type_name=None, type_id=None, strict=False):
columns=['id', 'name', 'description', 'extensions'], columns=['id', 'name', 'description', 'extensions'],
clauses=['extensions ~* %(pattern)s'], clauses=['extensions ~* %(pattern)s'],
) )
for start in range(len(parts)-1, -1, -1): for start in range(len(parts) - 1, -1, -1):
ext = '.'.join(parts[start:]) ext = '.'.join(parts[start:])
query.values['pattern'] = r'(\s|^)%s(\s|$)' % ext query.values['pattern'] = r'(\s|^)%s(\s|$)' % ext
results = query.execute() results = query.execute()
@ -6874,7 +6874,7 @@ def new_image_build(build_info):
def new_typed_build(build_info, btype): def new_typed_build(build_info, btype):
"""Mark build as a given btype""" """Mark build as a given btype"""
btype_id=lookup_name('btype', btype, strict=True)['id'] btype_id = lookup_name('btype', btype, strict=True)['id']
query = QueryProcessor(tables=('build_types',), columns=('build_id',), query = QueryProcessor(tables=('build_types',), columns=('build_id',),
clauses=('build_id = %(build_id)i', clauses=('build_id = %(build_id)i',
'btype_id = %(btype_id)i',), 'btype_id = %(btype_id)i',),
@ -7170,7 +7170,7 @@ def _scan_sighdr(sighdr, fn):
inp.close() inp.close()
outp.seek(0, 0) outp.seek(0, 0)
ts = rpm.TransactionSet() ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)
# (we have no payload, so verifies would fail otherwise) # (we have no payload, so verifies would fail otherwise)
hdr = ts.hdrFromFdno(outp.fileno()) hdr = ts.hdrFromFdno(outp.fileno())
outp.close() outp.close()
@ -7194,7 +7194,7 @@ def check_rpm_sig(an_rpm, sigkey, sighdr):
try: try:
koji.splice_rpm_sighdr(sighdr, rpm_path, temp) koji.splice_rpm_sighdr(sighdr, rpm_path, temp)
ts = rpm.TransactionSet() ts = rpm.TransactionSet()
ts.setVSFlags(0) #full verify ts.setVSFlags(0) # full verify
with open(temp, 'rb') as fo: with open(temp, 'rb') as fo:
hdr = ts.hdrFromFdno(fo.fileno()) hdr = ts.hdrFromFdno(fo.fileno())
except: except:
@ -7393,7 +7393,7 @@ def query_history(tables=None, **kwargs):
elif field == 'build_id': elif field == 'build_id':
# special case # special case
fields.update({ fields.update({
'package.name': 'name', #XXX? 'package.name': 'name', # XXX?
'build.version': 'version', 'build.version': 'version',
'build.release': 'release', 'build.release': 'release',
'build.epoch': 'epoch', 'build.epoch': 'epoch',
@ -8165,12 +8165,12 @@ def add_group_member(group, user, strict=True):
if uinfo['usertype'] == koji.USERTYPES['GROUP']: if uinfo['usertype'] == koji.USERTYPES['GROUP']:
raise koji.GenericError("Groups cannot be members of other groups") raise koji.GenericError("Groups cannot be members of other groups")
# check to see if user is already a member # check to see if user is already a member
data = {'user_id' : uinfo['id'], 'group_id' : ginfo['id']} data = {'user_id': uinfo['id'], 'group_id': ginfo['id']}
table = 'user_groups' table = 'user_groups'
clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s') clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s')
query = QueryProcessor(columns=['user_id'], tables=[table], query = QueryProcessor(columns=['user_id'], tables=[table],
clauses=('active = TRUE',)+clauses, clauses=('active = TRUE',) + clauses,
values=data, opts={'rowlock':True}) values=data, opts={'rowlock': True})
row = query.executeOne() row = query.executeOne()
if row: if row:
if not strict: if not strict:
@ -8189,7 +8189,7 @@ def drop_group_member(group, user):
raise koji.GenericError("No such group: %s" % group) raise koji.GenericError("No such group: %s" % group)
if user['id'] not in [u['id'] for u in get_group_members(group)]: if user['id'] not in [u['id'] for u in get_group_members(group)]:
raise koji.GenericError("No such user in group: %s" % group) raise koji.GenericError("No such user in group: %s" % group)
data = {'user_id' : user['id'], 'group_id' : ginfo['id']} data = {'user_id': user['id'], 'group_id': ginfo['id']}
clauses = ["user_id = %(user_id)i", "group_id = %(group_id)i"] clauses = ["user_id = %(user_id)i", "group_id = %(group_id)i"]
update = UpdateProcessor('user_groups', values=data, clauses=clauses) update = UpdateProcessor('user_groups', values=data, clauses=clauses)
update.make_revoke() update.make_revoke()
@ -8305,7 +8305,7 @@ def revoke_cg_access(user, cg):
context.session.assertPerm('admin') context.session.assertPerm('admin')
user = get_user(user, strict=True) user = get_user(user, strict=True)
cg = lookup_name('content_generator', cg, strict=True) cg = lookup_name('content_generator', cg, strict=True)
data = {'user_id': user['id'], 'cg_id' : cg['id']} data = {'user_id': user['id'], 'cg_id': cg['id']}
update = UpdateProcessor('cg_users', values=data, update = UpdateProcessor('cg_users', values=data,
clauses=["user_id = %(user_id)i", "cg_id = %(cg_id)i"]) clauses=["user_id = %(user_id)i", "cg_id = %(cg_id)i"])
update.make_revoke() update.make_revoke()
@ -8320,7 +8320,7 @@ def assert_cg(cg, user=None):
user = context.session.user_id user = context.session.user_id
user = get_user(user, strict=True) user = get_user(user, strict=True)
clauses = ['active = TRUE', 'user_id = %(user_id)s', 'cg_id = %(cg_id)s'] clauses = ['active = TRUE', 'user_id = %(user_id)s', 'cg_id = %(cg_id)s']
data = {'user_id' : user['id'], 'cg_id' : cg['id']} data = {'user_id': user['id'], 'cg_id': cg['id']}
query = QueryProcessor(tables=['cg_users'], columns=['cg_id'], clauses=clauses, values=data) query = QueryProcessor(tables=['cg_users'], columns=['cg_id'], clauses=clauses, values=data)
if not query.execute(): if not query.execute():
raise koji.AuthError("Content generator access required (%s)" % cg['name']) raise koji.AuthError("Content generator access required (%s)" % cg['name'])
@ -8461,7 +8461,7 @@ class BulkInsertProcessor(object):
self._one_insert(self.data) self._one_insert(self.data)
else: else:
for i in range(0, len(self.data), self.batch): for i in range(0, len(self.data), self.batch):
data = self.data[i:i+self.batch] data = self.data[i:i + self.batch]
self._one_insert(data) self._one_insert(data)
def _one_insert(self, data): def _one_insert(self, data):
@ -8592,7 +8592,7 @@ class UpdateProcessor(object):
ret = {} ret = {}
ret.update(self.values) ret.update(self.values)
for key in self.data: for key in self.data:
ret["data."+key] = self.data[key] ret["data." + key] = self.data[key]
return ret return ret
def set(self, **kwargs): def set(self, **kwargs):
@ -8956,13 +8956,13 @@ def policy_get_pkg(data):
# for some operations (e.g. adding a new package), the package # for some operations (e.g. adding a new package), the package
# entry may not exist yet # entry may not exist yet
if isinstance(data['package'], six.string_types): if isinstance(data['package'], six.string_types):
return {'id' : None, 'name' : data['package']} return {'id': None, 'name': data['package']}
else: else:
raise koji.GenericError("Invalid package: %s" % data['package']) raise koji.GenericError("Invalid package: %s" % data['package'])
return pkginfo return pkginfo
if 'build' in data: if 'build' in data:
binfo = get_build(data['build'], strict=True) binfo = get_build(data['build'], strict=True)
return {'id' : binfo['package_id'], 'name' : binfo['name']} return {'id': binfo['package_id'], 'name': binfo['name']}
# else # else
raise koji.GenericError("policy requires package data") raise koji.GenericError("policy requires package data")
@ -9981,7 +9981,7 @@ class RootExports(object):
try: try:
if offset == 0 or (offset == -1 and size == len(contents)): if offset == 0 or (offset == -1 and size == len(contents)):
# truncate file # truncate file
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
try: try:
os.ftruncate(fd, 0) os.ftruncate(fd, 0)
# log_error("truncating fd %r to 0" %fd) # log_error("truncating fd %r to 0" %fd)
@ -9992,7 +9992,7 @@ class RootExports(object):
else: else:
os.lseek(fd, offset, 0) os.lseek(fd, offset, 0)
# write contents # write contents
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2) fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, len(contents), 0, 2)
try: try:
os.write(fd, contents) os.write(fd, contents)
# log_error("wrote contents") # log_error("wrote contents")
@ -10001,7 +10001,7 @@ class RootExports(object):
if offset == -1: if offset == -1:
if size is not None: if size is not None:
# truncate file # truncate file
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
try: try:
os.ftruncate(fd, size) os.ftruncate(fd, size)
# log_error("truncating fd %r to size %r" % (fd,size)) # log_error("truncating fd %r to size %r" % (fd,size))
@ -10010,7 +10010,7 @@ class RootExports(object):
if verify is not None: if verify is not None:
# check final digest # check final digest
chksum = sum_cls() chksum = sum_cls()
fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB) fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
try: try:
os.lseek(fd, 0, 0) os.lseek(fd, 0, 0)
while True: while True:
@ -10038,7 +10038,7 @@ class RootExports(object):
raise raise
try: try:
try: try:
fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB) fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
except IOError as e: except IOError as e:
raise koji.LockError(e) raise koji.LockError(e)
st = os.fstat(fd) st = os.fstat(fd)
@ -10188,8 +10188,8 @@ class RootExports(object):
def createEmptyBuild(self, name, version, release, epoch, owner=None): def createEmptyBuild(self, name, version, release, epoch, owner=None):
context.session.assertPerm('admin') context.session.assertPerm('admin')
data = {'name' : name, 'version' : version, 'release' : release, data = {'name': name, 'version': version, 'release': release,
'epoch' : epoch} 'epoch': epoch}
if owner is not None: if owner is not None:
data['owner'] = owner data['owner'] = owner
return new_build(data) return new_build(data)
@ -10243,7 +10243,7 @@ class RootExports(object):
""" """
context.session.assertPerm('admin') context.session.assertPerm('admin')
uploadpath = koji.pathinfo.work() uploadpath = koji.pathinfo.work()
fn = "%s/%s/%s" %(uploadpath, path, basename) fn = "%s/%s/%s" % (uploadpath, path, basename)
if not os.path.exists(fn): if not os.path.exists(fn):
raise koji.GenericError("No such file: %s" % fn) raise koji.GenericError("No such file: %s" % fn)
rpminfo = import_rpm(fn) rpminfo = import_rpm(fn)
@ -10306,7 +10306,7 @@ class RootExports(object):
context.session.assertPerm('tag') context.session.assertPerm('tag')
tag_id = get_tag(tag, strict=True)['id'] tag_id = get_tag(tag, strict=True)['id']
build_id = get_build(build, strict=True)['id'] build_id = get_build(build, strict=True)['id']
policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : None, 'operation' : 'tag'} policy_data = {'tag': tag_id, 'build': build_id, 'fromtag': None, 'operation': 'tag'}
assert_policy('tag', policy_data) assert_policy('tag', policy_data)
_tag_build(tag, build, force=force) _tag_build(tag, build, force=force)
if notify: if notify:
@ -10362,7 +10362,7 @@ class RootExports(object):
else: else:
raise koji.TagError(pkg_error) raise koji.TagError(pkg_error)
# tag policy check # tag policy check
policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : fromtag_id} policy_data = {'tag': tag_id, 'build': build_id, 'fromtag': fromtag_id}
if fromtag is None: if fromtag is None:
policy_data['operation'] = 'tag' policy_data['operation'] = 'tag'
else: else:
@ -10384,7 +10384,7 @@ class RootExports(object):
user_id = context.session.user_id user_id = context.session.user_id
tag_id = get_tag(tag, strict=True)['id'] tag_id = get_tag(tag, strict=True)['id']
build_id = get_build(build, strict=True)['id'] build_id = get_build(build, strict=True)['id']
policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id} policy_data = {'tag': None, 'build': build_id, 'fromtag': tag_id}
policy_data['operation'] = 'untag' policy_data['operation'] = 'untag'
try: try:
# don't check policy for admins using force # don't check policy for admins using force
@ -10410,7 +10410,7 @@ class RootExports(object):
context.session.assertPerm('tag') context.session.assertPerm('tag')
tag_id = get_tag(tag, strict=True)['id'] tag_id = get_tag(tag, strict=True)['id']
build_id = get_build(build, strict=True)['id'] build_id = get_build(build, strict=True)['id']
policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id, 'operation' : 'untag'} policy_data = {'tag': None, 'build': build_id, 'fromtag': tag_id, 'operation': 'untag'}
assert_policy('tag', policy_data) assert_policy('tag', policy_data)
_untag_build(tag, build, strict=strict, force=force) _untag_build(tag, build, strict=strict, force=force)
if notify: if notify:
@ -10457,7 +10457,7 @@ class RootExports(object):
build_list.reverse() build_list.reverse()
# policy check # policy check
policy_data = {'tag' : tag2, 'fromtag' : tag1, 'operation' : 'move'} policy_data = {'tag': tag2, 'fromtag': tag1, 'operation': 'move'}
# don't check policy for admins using force # don't check policy for admins using force
if not (force and context.session.hasPerm('admin')): if not (force and context.session.hasPerm('admin')):
for build in build_list: for build in build_list:
@ -10468,7 +10468,7 @@ class RootExports(object):
wait_on = [] wait_on = []
tasklist = [] tasklist = []
for build in build_list: for build in build_list:
task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority':15}]]]) task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority': 15}]]])
wait_on = [task_id] wait_on = [task_id]
log_error("\nMade Task: %s\n" % task_id) log_error("\nMade Task: %s\n" % task_id)
tasklist.append(task_id) tasklist.append(task_id)
@ -12161,10 +12161,10 @@ class RootExports(object):
raise GenericError, else return None. raise GenericError, else return None.
""" """
query = QueryProcessor(tables=['build_notifications'], query = QueryProcessor(tables=['build_notifications'],
columns = ('id', 'user_id', 'package_id', 'tag_id', columns=('id', 'user_id', 'package_id', 'tag_id',
'success_only', 'email'), 'success_only', 'email'),
clauses = ['id = %(id)i'], clauses=['id = %(id)i'],
values = locals()) values=locals())
result = query.executeOne() result = query.executeOne()
if strict and not result: if strict and not result:
raise koji.GenericError("No notification with ID %i found" % id) raise koji.GenericError("No notification with ID %i found" % id)
@ -12184,9 +12184,9 @@ class RootExports(object):
raise GenericError, else return None. raise GenericError, else return None.
""" """
query = QueryProcessor(tables=['build_notifications_block'], query = QueryProcessor(tables=['build_notifications_block'],
columns = ('id', 'user_id', 'package_id', 'tag_id'), columns=('id', 'user_id', 'package_id', 'tag_id'),
clauses = ['id = %(id)i'], clauses=['id = %(id)i'],
values = locals()) values=locals())
result = query.executeOne() result = query.executeOne()
if strict and not result: if strict and not result:
raise koji.GenericError("No notification block with ID %i found" % id) raise koji.GenericError("No notification block with ID %i found" % id)
@ -12554,7 +12554,7 @@ class BuildRoot(object):
raise koji.GenericError("Cannot change buildroot state to INIT") raise koji.GenericError("Cannot change buildroot state to INIT")
query = QueryProcessor(columns=['state', 'retire_event'], values=self.data, query = QueryProcessor(columns=['state', 'retire_event'], values=self.data,
tables=['standard_buildroot'], clauses=['buildroot_id=%(id)s'], tables=['standard_buildroot'], clauses=['buildroot_id=%(id)s'],
opts={'rowlock':True}) opts={'rowlock': True})
row = query.executeOne() row = query.executeOne()
if not row: if not row:
raise koji.GenericError("Unable to get state for buildroot %s" % self.id) raise koji.GenericError("Unable to get state for buildroot %s" % self.id)
@ -12805,7 +12805,7 @@ class Host(object):
if tasks is None: if tasks is None:
# Query all finished subtasks # Query all finished subtasks
states = tuple([koji.TASK_STATES[s] states = tuple([koji.TASK_STATES[s]
for s in ['CLOSED', 'FAILED','CANCELED']]) for s in ['CLOSED', 'FAILED', 'CANCELED']])
query = QueryProcessor(tables=['task'], columns=['id'], query = QueryProcessor(tables=['task'], columns=['id'],
clauses=['parent=%(parent)s', 'state in %(states)s'], clauses=['parent=%(parent)s', 'state in %(states)s'],
values=locals(), opts={'asList': True}) values=locals(), opts={'asList': True})
@ -12838,7 +12838,7 @@ class Host(object):
q = """ q = """
SELECT %s FROM task SELECT %s FROM task
WHERE host_id = %%(host_id)s AND state = %%(st_open)s WHERE host_id = %%(host_id)s AND state = %%(st_open)s
""" % (",".join(fields)) """ % (",".join(fields))
c.execute(q, locals()) c.execute(q, locals())
tasks = [dict(zip(fields, x)) for x in c.fetchall()] tasks = [dict(zip(fields, x)) for x in c.fetchall()]
for task in tasks: for task in tasks:
@ -12899,7 +12899,7 @@ class Host(object):
WHERE (state = %%(st_free)s) WHERE (state = %%(st_free)s)
OR (state = %%(st_assigned)s AND host_id = %%(id)s) OR (state = %%(st_assigned)s AND host_id = %%(id)s)
ORDER BY priority,create_time ORDER BY priority,create_time
""" % (",".join(fields)) """ % (",".join(fields))
c.execute(q, locals()) c.execute(q, locals())
for data in c.fetchall(): for data in c.fetchall():
data = dict(zip(fields, data)) data = dict(zip(fields, data))
@ -13568,7 +13568,7 @@ class HostExports(object):
pkg_id = build['package_id'] pkg_id = build['package_id']
tag_id = get_tag(tag, strict=True)['id'] tag_id = get_tag(tag, strict=True)['id']
user_id = task.getOwner() user_id = task.getOwner()
policy_data = {'tag' : tag, 'build' : build, 'fromtag' : fromtag} policy_data = {'tag': tag, 'build': build, 'fromtag': fromtag}
policy_data['user_id'] = user_id policy_data['user_id'] = user_id
if fromtag is None: if fromtag is None:
policy_data['operation'] = 'tag' policy_data['operation'] = 'tag'
@ -14090,7 +14090,7 @@ def handle_upload(environ):
fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0o666) fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0o666)
try: try:
try: try:
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e: except IOError as e:
raise koji.LockError(e) raise koji.LockError(e)
if offset == -1: if offset == -1:
@ -14120,5 +14120,5 @@ def handle_upload(environ):
logger.debug("Upload result: %r", ret) logger.debug("Upload result: %r", ret)
logger.info("Completed upload for session %s (#%s): %f seconds, %i bytes, %s", logger.info("Completed upload for session %s (#%s): %f seconds, %i bytes, %s",
context.session.id, context.session.callnum, context.session.id, context.session.callnum,
time.time()-start, size, fn) time.time() - start, size, fn)
return ret return ret

View file

@ -93,7 +93,7 @@ class HandlerRegistry(object):
if not callable(function): if not callable(function):
continue continue
if prefix is not None: if prefix is not None:
name = "%s.%s" %(prefix, name) name = "%s.%s" % (prefix, name)
self.register_function(function, name=name) self.register_function(function, name=name)
def register_instance(self, instance): def register_instance(self, instance):
@ -128,7 +128,7 @@ class HandlerRegistry(object):
# bound method, remove first arg # bound method, remove first arg
args, varargs, varkw, defaults = ret args, varargs, varkw, defaults = ret
if args: if args:
aname = args[0] #generally "self" aname = args[0] # generally "self"
del args[0] del args[0]
if defaults and aname in defaults: if defaults and aname in defaults:
# shouldn't happen, but... # shouldn't happen, but...
@ -202,7 +202,7 @@ class ModXMLRPCRequestHandler(object):
def __init__(self, handlers): def __init__(self, handlers):
self.traceback = False self.traceback = False
self.handlers = handlers #expecting HandlerRegistry instance self.handlers = handlers # expecting HandlerRegistry instance
self.logger = logging.getLogger('koji.xmlrpc') self.logger = logging.getLogger('koji.xmlrpc')
def _get_handler(self, name): def _get_handler(self, name):
@ -319,7 +319,7 @@ class ModXMLRPCRequestHandler(object):
rusage = resource.getrusage(resource.RUSAGE_SELF) rusage = resource.getrusage(resource.RUSAGE_SELF)
self.logger.info("Completed method %s for session %s (#%s): %f seconds, rss %s, stime %f", self.logger.info("Completed method %s for session %s (#%s): %f seconds, rss %s, stime %f",
method, context.session.id, context.session.callnum, method, context.session.id, context.session.callnum,
time.time()-start, time.time() - start,
rusage.ru_maxrss, rusage.ru_stime) rusage.ru_maxrss, rusage.ru_stime)
return ret return ret
@ -771,7 +771,7 @@ def application(environ, start_response):
except Exception: except Exception:
pass pass
context._threadclear() context._threadclear()
return [response] #XXX return [response] # XXX
def get_registry(opts, plugins): def get_registry(opts, plugins):

View file

@ -108,13 +108,13 @@ RPM_FILEDIGESTALGO_IDS = {
# Taken from RFC 4880 # Taken from RFC 4880
# A missing algo ID means md5 # A missing algo ID means md5
None: 'MD5', None: 'MD5',
1: 'MD5', 1: 'MD5',
2: 'SHA1', 2: 'SHA1',
3: 'RIPEMD160', 3: 'RIPEMD160',
8: 'SHA256', 8: 'SHA256',
9: 'SHA384', 9: 'SHA384',
10: 'SHA512', 10: 'SHA512',
11: 'SHA224' 11: 'SHA224'
} }
# rpm 4.12 introduces optional deps, but they can also be backported in some # rpm 4.12 introduces optional deps, but they can also be backported in some
@ -288,7 +288,7 @@ DEFAULT_AUTH_TIMEOUT = 60
# BEGIN kojikamid dup # # BEGIN kojikamid dup #
# Exceptions # Exceptions
PythonImportError = ImportError # will be masked by koji's one PythonImportError = ImportError # will be masked by koji's one
class GenericError(Exception): class GenericError(Exception):
"""Base class for our custom exceptions""" """Base class for our custom exceptions"""
@ -640,8 +640,8 @@ class RawHeader(object):
for i in range(il): for i in range(il):
entry = [] entry = []
for j in range(4): for j in range(4):
ofs = 16 + i*16 + j*4 ofs = 16 + i * 16 + j * 4
data = [_ord(x) for x in self.header[ofs:ofs+4]] data = [_ord(x) for x in self.header[ofs:ofs + 4]]
entry.append(multibyte(data)) entry.append(multibyte(data))
# print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry)) # print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry))
@ -693,7 +693,7 @@ class RawHeader(object):
# integer # integer
n = 1 << (dtype - 2) n = 1 << (dtype - 2)
for i in range(count): for i in range(count):
data = [_ord(x) for x in self.header[pos:pos+n]] data = [_ord(x) for x in self.header[pos:pos + n]]
print("%r" % data) print("%r" % data)
num = multibyte(data) num = multibyte(data)
print("Int(%d): %d" % (n, num)) print("Int(%d): %d" % (n, num))
@ -702,23 +702,23 @@ class RawHeader(object):
elif dtype == 6: elif dtype == 6:
# string (null terminated) # string (null terminated)
end = self.header.find(six.b('\0'), pos) end = self.header.find(six.b('\0'), pos)
print("String(%d): %r" % (end-pos, self.header[pos:end])) print("String(%d): %r" % (end - pos, self.header[pos:end]))
next = end + 1 next = end + 1
elif dtype == 7: elif dtype == 7:
print("Data: %s" % hex_string(self.header[pos:pos+count])) print("Data: %s" % hex_string(self.header[pos:pos + count]))
next = pos+count next = pos + count
elif dtype == 8: elif dtype == 8:
# string array # string array
for i in range(count): for i in range(count):
end = self.header.find(six.b('\0'), pos) end = self.header.find(six.b('\0'), pos)
print("String(%d): %r" % (end-pos, self.header[pos:end])) print("String(%d): %r" % (end - pos, self.header[pos:end]))
pos = end + 1 pos = end + 1
next = pos next = pos
elif dtype == 9: elif dtype == 9:
# unicode string array # unicode string array
for i in range(count): for i in range(count):
end = self.header.find(six.b('\0'), pos) end = self.header.find(six.b('\0'), pos)
print("i18n(%d): %r" % (end-pos, self.header[pos:end])) print("i18n(%d): %r" % (end - pos, self.header[pos:end]))
pos = end + 1 pos = end + 1
next = pos next = pos
else: else:
@ -746,7 +746,7 @@ class RawHeader(object):
if dtype >= 2 and dtype <= 5: if dtype >= 2 and dtype <= 5:
n = 1 << (dtype - 2) n = 1 << (dtype - 2)
# n-byte integer # n-byte integer
data = [_ord(x) for x in self.header[pos:pos+n]] data = [_ord(x) for x in self.header[pos:pos + n]]
return multibyte(data) return multibyte(data)
elif dtype == 6: elif dtype == 6:
# string (null terminated) # string (null terminated)
@ -754,7 +754,7 @@ class RawHeader(object):
return self.header[pos:end] return self.header[pos:end]
elif dtype == 7: elif dtype == 7:
# raw data # raw data
return self.header[pos:pos+count] return self.header[pos:pos + count]
else: else:
# XXX - not all valid data types are handled # XXX - not all valid data types are handled
raise GenericError("Unable to read header data type: %x" % dtype) raise GenericError("Unable to read header data type: %x" % dtype)
@ -806,7 +806,7 @@ def __parse_packet_header(pgp_packet):
offset = 1 offset = 1
length = len(pgp_packet) - offset length = len(pgp_packet) - offset
else: else:
(fmt, offset) = {0:('>B', 2), 1:('>H', 3), 2:('>I', 5)}[len_type] (fmt, offset) = {0: ('>B', 2), 1: ('>H', 3), 2: ('>I', 5)}[len_type]
length = struct.unpack(fmt, pgp_packet[1:offset])[0] length = struct.unpack(fmt, pgp_packet[1:offset])[0]
else: else:
tag = byte0 & 0x3F tag = byte0 & 0x3F
@ -843,8 +843,8 @@ def __subpacket_key_ids(subs):
length = struct.unpack('>I', subs[1:5])[0] length = struct.unpack('>I', subs[1:5])[0]
off = 5 off = 5
if _ord(subs[off]) == 16: if _ord(subs[off]) == 16:
res.append(subs[off+1 : off+length]) res.append(subs[off + 1: off + length])
subs = subs[off+length:] subs = subs[off + length:]
return res return res
def get_sigpacket_key_id(sigpacket): def get_sigpacket_key_id(sigpacket):
@ -858,9 +858,9 @@ def get_sigpacket_key_id(sigpacket):
sub_len = struct.unpack('>H', sigpacket[4:6])[0] sub_len = struct.unpack('>H', sigpacket[4:6])[0]
off = 6 + sub_len off = 6 + sub_len
key_ids = __subpacket_key_ids(sigpacket[6:off]) key_ids = __subpacket_key_ids(sigpacket[6:off])
sub_len = struct.unpack('>H', sigpacket[off : off+2])[0] sub_len = struct.unpack('>H', sigpacket[off: off + 2])[0]
off += 2 off += 2
key_ids += __subpacket_key_ids(sigpacket[off : off+sub_len]) key_ids += __subpacket_key_ids(sigpacket[off: off + sub_len])
if len(key_ids) != 1: if len(key_ids) != 1:
raise NotImplementedError( raise NotImplementedError(
'Unexpected number of key IDs: %s' % len(key_ids)) 'Unexpected number of key IDs: %s' % len(key_ids))
@ -907,7 +907,7 @@ def get_rpm_header(f, ts=None):
raise GenericError("rpm's python bindings are not installed") raise GenericError("rpm's python bindings are not installed")
if ts is None: if ts is None:
ts = rpm.TransactionSet() ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)
if isinstance(f, six.string_types): if isinstance(f, six.string_types):
fo = open(f, "rb") fo = open(f, "rb")
else: else:
@ -1007,8 +1007,8 @@ def parse_NVR(nvr):
p1 = nvr.rfind("-", 0, p2) p1 = nvr.rfind("-", 0, p2)
if p1 == -1 or p1 == p2 - 1: if p1 == -1 or p1 == p2 - 1:
raise GenericError("invalid format: %s" % nvr) raise GenericError("invalid format: %s" % nvr)
ret['release'] = nvr[p2+1:] ret['release'] = nvr[p2 + 1:]
ret['version'] = nvr[p1+1:p2] ret['version'] = nvr[p1 + 1:p2]
ret['name'] = nvr[:p1] ret['name'] = nvr[:p1]
epochIndex = ret['name'].find(':') epochIndex = ret['name'].find(':')
if epochIndex == -1: if epochIndex == -1:
@ -1031,7 +1031,7 @@ def parse_NVRA(nvra):
p3 = nvra.rfind(".") p3 = nvra.rfind(".")
if p3 == -1 or p3 == len(nvra) - 1: if p3 == -1 or p3 == len(nvra) - 1:
raise GenericError("invalid format: %s" % nvra) raise GenericError("invalid format: %s" % nvra)
arch = nvra[p3+1:] arch = nvra[p3 + 1:]
ret = parse_NVR(nvra[:p3]) ret = parse_NVR(nvra[:p3])
ret['arch'] = arch ret['arch'] = arch
if arch == 'src': if arch == 'src':
@ -1427,7 +1427,7 @@ def generate_comps(groups, expand_groups=False):
if expand_groups and g['grouplist']: if expand_groups and g['grouplist']:
# add a requires entry for all packages in groups required by buildgroup # add a requires entry for all packages in groups required by buildgroup
need = [req['name'] for req in g['grouplist']] need = [req['name'] for req in g['grouplist']]
seen_grp = {g['name'] : 1} seen_grp = {g['name']: 1}
seen_pkg = {} seen_pkg = {}
for p in g['packagelist']: for p in g['packagelist']:
seen_pkg[p['package']] = 1 seen_pkg[p['package']] = 1
@ -1503,9 +1503,9 @@ def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts)
# rely on the mock defaults being correct # rely on the mock defaults being correct
# and only includes changes from the defaults here # and only includes changes from the defaults here
config_opts = { config_opts = {
'root' : name, 'root': name,
'basedir' : mockdir, 'basedir': mockdir,
'target_arch' : opts.get('target_arch', arch), 'target_arch': opts.get('target_arch', arch),
'chroothome': '/builddir', 'chroothome': '/builddir',
# Use the group data rather than a generated rpm # Use the group data rather than a generated rpm
'chroot_setup_cmd': 'groupinstall %s' % opts.get('install_group', 'build'), 'chroot_setup_cmd': 'groupinstall %s' % opts.get('install_group', 'build'),
@ -1578,9 +1578,9 @@ name=build
} }
macros = { macros = {
'%_rpmfilename' : '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm', '%_rpmfilename': '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',
'%vendor' : opts.get('vendor', 'Koji'), '%vendor': opts.get('vendor', 'Koji'),
'%packager' : opts.get('packager', 'Koji'), '%packager': opts.get('packager', 'Koji'),
'%distribution': opts.get('distribution', 'Unknown') '%distribution': opts.get('distribution', 'Unknown')
} }
@ -1747,18 +1747,18 @@ def config_directory_contents(dir_name, strict=False):
def read_config(profile_name, user_config=None): def read_config(profile_name, user_config=None):
config_defaults = { config_defaults = {
'server' : 'http://localhost/kojihub', 'server': 'http://localhost/kojihub',
'weburl' : 'http://localhost/koji', 'weburl': 'http://localhost/koji',
'topurl' : None, 'topurl': None,
'pkgurl' : None, 'pkgurl': None,
'topdir' : '/mnt/koji', 'topdir': '/mnt/koji',
'max_retries' : None, 'max_retries': None,
'retry_interval': None, 'retry_interval': None,
'anon_retry' : None, 'anon_retry': None,
'offline_retry' : None, 'offline_retry': None,
'offline_retry_interval' : None, 'offline_retry_interval': None,
'timeout' : DEFAULT_REQUEST_TIMEOUT, 'timeout': DEFAULT_REQUEST_TIMEOUT,
'auth_timeout' : DEFAULT_AUTH_TIMEOUT, 'auth_timeout': DEFAULT_AUTH_TIMEOUT,
'use_fast_upload': False, 'use_fast_upload': False,
'upload_blocksize': 1048576, 'upload_blocksize': 1048576,
'poll_interval': 6, 'poll_interval': 6,
@ -2109,7 +2109,7 @@ def is_requests_cert_error(e):
def is_cert_error(e): def is_cert_error(e):
"""Determine if an OpenSSL error is due to a bad cert""" """Determine if an OpenSSL error is due to a bad cert"""
if SSL_Error is None: #pragma: no cover if SSL_Error is None: # pragma: no cover
# import failed, so we can't determine # import failed, so we can't determine
raise Exception("OpenSSL library did not load") raise Exception("OpenSSL library did not load")
if not isinstance(e, SSL_Error): if not isinstance(e, SSL_Error):
@ -2980,14 +2980,14 @@ class ClientSession(object):
tries = 0 tries = 0
while True: while True:
if debug: if debug:
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %(path, name, sz, digest, offset)) self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" % (path, name, sz, digest, offset))
if self.callMethod('uploadFile', path, name, sz, digest, offset, data, **volopts): if self.callMethod('uploadFile', path, name, sz, digest, offset, data, **volopts):
break break
if tries <= retries: if tries <= retries:
tries += 1 tries += 1
continue continue
else: else:
raise GenericError("Error uploading file %s, offset %d" %(path, offset)) raise GenericError("Error uploading file %s, offset %d" % (path, offset))
if size == 0: if size == 0:
break break
ofs += size ofs += size
@ -3127,7 +3127,7 @@ class MultiCallSession(object):
self._session.logger.debug( self._session.logger.debug(
"MultiCall with batch size %i, calls/groups(%i/%i)", "MultiCall with batch size %i, calls/groups(%i/%i)",
batch, len(calls), round(len(calls) // batch)) batch, len(calls), round(len(calls) // batch))
batches = [calls[i:i+batch] for i in range(0, len(calls), batch)] batches = [calls[i:i + batch] for i in range(0, len(calls), batch)]
else: else:
batches = [calls] batches = [calls]
results = [] results = []
@ -3502,7 +3502,7 @@ def add_file_logger(logger, fn):
return return
if not os.access(fn, os.W_OK): if not os.access(fn, os.W_OK):
return return
handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024*1024*10, backupCount=5) handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024 * 1024 * 10, backupCount=5)
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')) handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))
logging.getLogger(logger).addHandler(handler) logging.getLogger(logger).addHandler(handler)

View file

@ -10,14 +10,14 @@ import rpm
_ppc64_native_is_best = True _ppc64_native_is_best = True
# dict mapping arch -> ( multicompat, best personality, biarch personality ) # dict mapping arch -> ( multicompat, best personality, biarch personality )
multilibArches = { "x86_64": ( "athlon", "x86_64", "athlon" ), multilibArches = {"x86_64": ("athlon", "x86_64", "athlon"),
"sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ), "sparc64v": ("sparcv9v", "sparcv9v", "sparc64v"),
"sparc64": ( "sparcv9", "sparcv9", "sparc64" ), "sparc64": ("sparcv9", "sparcv9", "sparc64"),
"ppc64": ( "ppc", "ppc", "ppc64" ), "ppc64": ("ppc", "ppc", "ppc64"),
"s390x": ( "s390", "s390x", "s390" ), "s390x": ("s390", "s390x", "s390"),
} }
if _ppc64_native_is_best: if _ppc64_native_is_best:
multilibArches["ppc64"] = ( "ppc", "ppc64", "ppc64" ) multilibArches["ppc64"] = ("ppc", "ppc64", "ppc64")
arches = { arches = {
# ia32 # ia32
@ -34,7 +34,7 @@ arches = {
"ia32e": "x86_64", "ia32e": "x86_64",
# ppc64le # ppc64le
"ppc64le": "noarch", "ppc64le": "noarch",
# ppc # ppc
"ppc64p7": "ppc64", "ppc64p7": "ppc64",
@ -56,16 +56,16 @@ arches = {
"sparc": "noarch", "sparc": "noarch",
# alpha # alpha
"alphaev7": "alphaev68", "alphaev7": "alphaev68",
"alphaev68": "alphaev67", "alphaev68": "alphaev67",
"alphaev67": "alphaev6", "alphaev67": "alphaev6",
"alphaev6": "alphapca56", "alphaev6": "alphapca56",
"alphapca56": "alphaev56", "alphapca56": "alphaev56",
"alphaev56": "alphaev5", "alphaev56": "alphaev5",
"alphaev5": "alphaev45", "alphaev5": "alphaev45",
"alphaev45": "alphaev4", "alphaev45": "alphaev4",
"alphaev4": "alpha", "alphaev4": "alpha",
"alpha": "noarch", "alpha": "noarch",
# arm # arm
"armv7l": "armv6l", "armv7l": "armv6l",
@ -124,7 +124,7 @@ def canCoinstall(arch1, arch2):
# if both are a multlibarch then we can't coinstall (x86_64, ia32e) # if both are a multlibarch then we can't coinstall (x86_64, ia32e)
# if both are not multilibarches then we can't coinstall (i386, i686) # if both are not multilibarches then we can't coinstall (i386, i686)
if 'noarch' in [arch1, arch2]: # noarch can never coinstall if 'noarch' in [arch1, arch2]: # noarch can never coinstall
return False return False
if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2): if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2):
@ -153,7 +153,7 @@ def isMultiLibArch(arch=None):
if arch is None: if arch is None:
arch = canonArch arch = canonArch
if arch not in arches: # or we could check if it is noarch if arch not in arches: # or we could check if it is noarch
return 0 return 0
if arch in multilibArches: if arch in multilibArches:
@ -219,7 +219,7 @@ def getArchList(thisarch=None):
# hack hack hack # hack hack hack
# sparc64v is also sparc64 compat # sparc64v is also sparc64 compat
if archlist[0] == "sparc64v": if archlist[0] == "sparc64v":
archlist.insert(1,"sparc64") archlist.insert(1, "sparc64")
# if we're a weirdo arch - add noarch on there. # if we're a weirdo arch - add noarch on there.
if len(archlist) == 1 and archlist[0] == thisarch: if len(archlist) == 1 and archlist[0] == thisarch:
@ -366,7 +366,7 @@ def getCanonX86_64Arch(arch):
return "ia32e" return "ia32e"
return arch return arch
def getCanonArch(skipRpmPlatform = 0): def getCanonArch(skipRpmPlatform=0):
if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK): if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK):
try: try:
f = open("/etc/rpm/platform", "r") f = open("/etc/rpm/platform", "r")
@ -398,11 +398,11 @@ def getCanonArch(skipRpmPlatform = 0):
canonArch = getCanonArch() canonArch = getCanonArch()
# this gets you the "compat" arch of a biarch pair # this gets you the "compat" arch of a biarch pair
def getMultiArchInfo(arch = canonArch): def getMultiArchInfo(arch=canonArch):
if arch in multilibArches: if arch in multilibArches:
return multilibArches[arch] return multilibArches[arch]
if arch in arches and arches[arch] != "noarch": if arch in arches and arches[arch] != "noarch":
return getMultiArchInfo(arch = arches[arch]) return getMultiArchInfo(arch=arches[arch])
return None return None
# get the best usual userspace arch for the arch we're on. this is # get the best usual userspace arch for the arch we're on. this is
@ -430,7 +430,7 @@ def getBaseArch(myarch=None):
if not myarch: if not myarch:
myarch = canonArch myarch = canonArch
if myarch not in arches: # this is dumb, but <shrug> if myarch not in arches: # this is dumb, but <shrug>
return myarch return myarch
if myarch.startswith("sparc64"): if myarch.startswith("sparc64"):
@ -485,7 +485,7 @@ class ArchStorage(object):
self.basearch = getBaseArch(myarch=self.canonarch) self.basearch = getBaseArch(myarch=self.canonarch)
self.archlist = getArchList(thisarch=self.canonarch) self.archlist = getArchList(thisarch=self.canonarch)
if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64 if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64
limit_archlist = [] limit_archlist = []
for a in self.archlist: for a in self.archlist:
if isMultiLibArch(a) or a == 'noarch': if isMultiLibArch(a) or a == 'noarch':
@ -495,7 +495,7 @@ class ArchStorage(object):
self.bestarch = getBestArch(myarch=self.canonarch) self.bestarch = getBestArch(myarch=self.canonarch)
self.compatarches = getMultiArchInfo(arch=self.canonarch) self.compatarches = getMultiArchInfo(arch=self.canonarch)
self.multilib = isMultiLibArch(arch=self.canonarch) self.multilib = isMultiLibArch(arch=self.canonarch)
self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch) self.legit_multi_arches = legitMultiArchesInSameLib(arch=self.canonarch)
def get_best_arch_from_list(self, archlist, fromarch=None): def get_best_arch_from_list(self, archlist, fromarch=None):
if not fromarch: if not fromarch:

View file

@ -321,7 +321,7 @@ class Session(object):
srvkt = krbV.Keytab(name=context.opts.get('AuthKeytab'), context=ctx) srvkt = krbV.Keytab(name=context.opts.get('AuthKeytab'), context=ctx)
ac = krbV.AuthContext(context=ctx) ac = krbV.AuthContext(context=ctx)
ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE | krbV.KRB5_AUTH_CONTEXT_DO_TIME
conninfo = self.getConnInfo() conninfo = self.getConnInfo()
ac.addrs = conninfo ac.addrs = conninfo
@ -537,8 +537,8 @@ class Session(object):
# generate a random key # generate a random key
alnum = string.ascii_letters + string.digits alnum = string.ascii_letters + string.digits
key = "%s-%s" %(user_id, key = "%s-%s" % (user_id,
''.join([random.choice(alnum) for x in range(1, 20)])) ''.join([random.choice(alnum) for x in range(1, 20)]))
# use sha? sha.new(phrase).hexdigest() # use sha? sha.new(phrase).hexdigest()
# get a session id # get a session id
@ -556,7 +556,7 @@ class Session(object):
context.cnx.commit() context.cnx.commit()
# return session info # return session info
return {'session-id' : session_id, 'session-key' : key} return {'session-id': session_id, 'session-key': key}
def subsession(self): def subsession(self):
"Create a subsession" "Create a subsession"
@ -607,7 +607,7 @@ class Session(object):
return None return None
c = context.cnx.cursor() c = context.cnx.cursor()
q = """SELECT id FROM host WHERE user_id = %(uid)d""" q = """SELECT id FROM host WHERE user_id = %(uid)d"""
c.execute(q, {'uid' : self.user_id}) c.execute(q, {'uid': self.user_id})
r = c.fetchone() r = c.fetchone()
c.close() c.close()
if r: if r:

View file

@ -67,8 +67,8 @@ class ThreadLocal(object):
def __str__(self): def __str__(self):
id = six.moves._thread.get_ident() id = six.moves._thread.get_ident()
tdict = object.__getattribute__(self, '_tdict') tdict = object.__getattribute__(self, '_tdict')
return "(current thread: %s) {" % id + \ return "(current thread: %s) {" % id + \
", ".join(["%s : %s" %(k, v.__dict__) for (k, v) in six.iteritems(tdict)]) + \ ", ".join(["%s : %s" % (k, v.__dict__) for (k, v) in six.iteritems(tdict)]) + \
"}" "}"
def _threadclear(self): def _threadclear(self):

View file

@ -307,7 +307,7 @@ class SCM(object):
# check for validity: params should be empty, query may be empty, everything else should be populated # check for validity: params should be empty, query may be empty, everything else should be populated
if params: if params:
raise koji.GenericError('Unable to parse SCM URL: %s . Params element %s should be empty.' % (self.url, params)) raise koji.GenericError('Unable to parse SCM URL: %s . Params element %s should be empty.' % (self.url, params))
if not scheme: #pragma: no cover if not scheme: # pragma: no cover
# should not happen because of is_scm_url check earlier # should not happen because of is_scm_url check earlier
raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url) raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url)
if not fragment: if not fragment:
@ -699,7 +699,7 @@ class TaskManager(object):
# can lead to a world of hurt. # can lead to a world of hurt.
# We remove the rootdir contents but leave the rootdir unless it # We remove the rootdir contents but leave the rootdir unless it
# is really old # is really old
if age > 3600*24: if age > 3600 * 24:
# dir untouched for a day # dir untouched for a day
self.logger.info("Removing buildroot: %s" % desc) self.logger.info("Removing buildroot: %s" % desc)
if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0: if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0:
@ -850,8 +850,8 @@ class TaskManager(object):
self.logger.debug(" hosts: %r" % hosts) self.logger.debug(" hosts: %r" % hosts)
self.logger.debug(" tasks: %r" % tasks) self.logger.debug(" tasks: %r" % tasks)
# now we organize this data into channel-arch bins # now we organize this data into channel-arch bins
bin_hosts = {} #hosts indexed by bin bin_hosts = {} # hosts indexed by bin
bins = {} #bins for this host bins = {} # bins for this host
our_avail = None our_avail = None
for host in hosts: for host in hosts:
host['bins'] = [] host['bins'] = []

View file

@ -34,28 +34,28 @@ from koji.util import encode_datetime_recurse
# the available callback hooks and a list # the available callback hooks and a list
# of functions to be called for each event # of functions to be called for each event
callbacks = { callbacks = {
'prePackageListChange': [], 'prePackageListChange': [],
'postPackageListChange': [], 'postPackageListChange': [],
'preTaskStateChange': [], 'preTaskStateChange': [],
'postTaskStateChange': [], 'postTaskStateChange': [],
'preBuildStateChange': [], 'preBuildStateChange': [],
'postBuildStateChange': [], 'postBuildStateChange': [],
'preImport': [], 'preImport': [],
'postImport': [], 'postImport': [],
'preRPMSign': [], 'preRPMSign': [],
'postRPMSign': [], 'postRPMSign': [],
'preTag': [], 'preTag': [],
'postTag': [], 'postTag': [],
'preUntag': [], 'preUntag': [],
'postUntag': [], 'postUntag': [],
'preRepoInit': [], 'preRepoInit': [],
'postRepoInit': [], 'postRepoInit': [],
'preRepoDone': [], 'preRepoDone': [],
'postRepoDone': [], 'postRepoDone': [],
'preCommit': [], 'preCommit': [],
'postCommit': [], 'postCommit': [],
'preSCMCheckout': [], 'preSCMCheckout': [],
'postSCMCheckout': [], 'postSCMCheckout': [],
} }
class PluginTracker(object): class PluginTracker(object):

View file

@ -288,7 +288,7 @@ class SimpleRuleSet(object):
raise Exception("bad policy line: %s" % line) raise Exception("bad policy line: %s" % line)
negate = True negate = True
tests = line[:pos] tests = line[:pos]
action = line[pos+2:] action = line[pos + 2:]
tests = [self.get_test_handler(x) for x in tests.split('&&')] tests = [self.get_test_handler(x) for x in tests.split('&&')]
action = action.strip() action = action.strip()
# just return action = { for nested rules # just return action = { for nested rules

View file

@ -40,53 +40,53 @@ class Rpmdiff:
# constants # constants
TAGS = ( rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY, TAGS = (rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY,
rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP, rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP,
rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL, rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL,
rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN, rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN,
rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN) rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN)
PRCO = ( 'REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES') PRCO = ('REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')
# {fname : (size, mode, mtime, flags, dev, inode, # {fname : (size, mode, mtime, flags, dev, inode,
# nlink, state, vflags, user, group, digest)} # nlink, state, vflags, user, group, digest)}
__FILEIDX = [ ['S', 0], __FILEIDX = [['S', 0],
['M', 1], ['M', 1],
['5', 11], ['5', 11],
['D', 4], ['D', 4],
['N', 6], ['N', 6],
['L', 7], ['L', 7],
['V', 8], ['V', 8],
['U', 9], ['U', 9],
['G', 10], ['G', 10],
['F', 3], ['F', 3],
['T', 2] ] ['T', 2]]
try: try:
if rpm.RPMSENSE_SCRIPT_PRE: if rpm.RPMSENSE_SCRIPT_PRE:
PREREQ_FLAG=rpm.RPMSENSE_PREREQ|rpm.RPMSENSE_SCRIPT_PRE|\ PREREQ_FLAG = rpm.RPMSENSE_PREREQ | rpm.RPMSENSE_SCRIPT_PRE |\
rpm.RPMSENSE_SCRIPT_POST|rpm.RPMSENSE_SCRIPT_PREUN|\ rpm.RPMSENSE_SCRIPT_POST | rpm.RPMSENSE_SCRIPT_PREUN |\
rpm.RPMSENSE_SCRIPT_POSTUN rpm.RPMSENSE_SCRIPT_POSTUN
except AttributeError: except AttributeError:
try: try:
PREREQ_FLAG=rpm.RPMSENSE_PREREQ PREREQ_FLAG = rpm.RPMSENSE_PREREQ
except: except:
# (proyvind): This seems ugly, but then again so does # (proyvind): This seems ugly, but then again so does
# this whole check as well. # this whole check as well.
PREREQ_FLAG=False PREREQ_FLAG = False
DEPFORMAT = '%-12s%s %s %s %s' DEPFORMAT = '%-12s%s %s %s %s'
FORMAT = '%-12s%s' FORMAT = '%-12s%s'
ADDED = 'added' ADDED = 'added'
REMOVED = 'removed' REMOVED = 'removed'
# code starts here # code starts here
def __init__(self, old, new, ignore=None): def __init__(self, old, new, ignore=None):
self.result = [] self.result = []
self.old_data = { 'tags': {}, 'ignore': ignore } self.old_data = {'tags': {}, 'ignore': ignore}
self.new_data = { 'tags': {}, 'ignore': ignore } self.new_data = {'tags': {}, 'ignore': ignore}
if ignore is None: if ignore is None:
ignore = set() ignore = set()
else: else:
@ -111,7 +111,7 @@ class Rpmdiff:
self.__add(self.FORMAT, ('S.5........', tagname)) self.__add(self.FORMAT, ('S.5........', tagname))
# compare Provides, Requires, ... # compare Provides, Requires, ...
for tag in self.PRCO: for tag in self.PRCO:
self.__comparePRCOs(old, new, tag) self.__comparePRCOs(old, new, tag)
# compare the files # compare the files
@ -183,16 +183,16 @@ class Rpmdiff:
# compare Provides, Requires, Conflicts, Obsoletes # compare Provides, Requires, Conflicts, Obsoletes
def __comparePRCOs(self, old, new, name): def __comparePRCOs(self, old, new, name):
oldflags = old[name[:-1]+'FLAGS'] oldflags = old[name[:-1] + 'FLAGS']
newflags = new[name[:-1]+'FLAGS'] newflags = new[name[:-1] + 'FLAGS']
# fix buggy rpm binding not returning list for single entries # fix buggy rpm binding not returning list for single entries
if not isinstance(oldflags, list): oldflags = [ oldflags ] if not isinstance(oldflags, list): oldflags = [oldflags]
if not isinstance(newflags, list): newflags = [ newflags ] if not isinstance(newflags, list): newflags = [newflags]
o = list(zip(old[name], oldflags, old[name[:-1]+'VERSION'])) o = list(zip(old[name], oldflags, old[name[:-1] + 'VERSION']))
n = list(zip(new[name], newflags, new[name[:-1]+'VERSION'])) n = list(zip(new[name], newflags, new[name[:-1] + 'VERSION']))
if name == 'PROVIDES': # filter our self provide if name == 'PROVIDES': # filter our self provide
oldNV = (old['name'], rpm.RPMSENSE_EQUAL, oldNV = (old['name'], rpm.RPMSENSE_EQUAL,
"%s-%s" % (old['version'], old['release'])) "%s-%s" % (old['version'], old['release']))
newNV = (new['name'], rpm.RPMSENSE_EQUAL, newNV = (new['name'], rpm.RPMSENSE_EQUAL,

View file

@ -144,121 +144,121 @@ def parse_task_params(method, params):
LEGACY_SIGNATURES = { LEGACY_SIGNATURES = {
# key is method name, value is list of possible signatures # key is method name, value is list of possible signatures
# signatures are like getargspec -- args, varargs, keywords, defaults # signatures are like getargspec -- args, varargs, keywords, defaults
'chainbuild' : [ 'chainbuild': [
[['srcs', 'target', 'opts'], None, None, (None,)], [['srcs', 'target', 'opts'], None, None, (None,)],
], ],
'waitrepo' : [ 'waitrepo': [
[['tag', 'newer_than', 'nvrs'], None, None, (None, None)], [['tag', 'newer_than', 'nvrs'], None, None, (None, None)],
], ],
'createLiveMedia' : [ 'createLiveMedia': [
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)], [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
], ],
'createAppliance' : [ 'createAppliance': [
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)], [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
], ],
'livecd' : [ 'livecd': [
[['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)], [['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],
], ],
'buildNotification' : [ 'buildNotification': [
[['recipients', 'build', 'target', 'weburl'], None, None, None], [['recipients', 'build', 'target', 'weburl'], None, None, None],
], ],
'buildMaven' : [ 'buildMaven': [
[['url', 'build_tag', 'opts'], None, None, (None,)], [['url', 'build_tag', 'opts'], None, None, (None,)],
], ],
'build' : [ 'build': [
[['src', 'target', 'opts'], None, None, (None,)], [['src', 'target', 'opts'], None, None, (None,)],
], ],
'buildSRPMFromSCM' : [ 'buildSRPMFromSCM': [
[['url', 'build_tag', 'opts'], None, None, (None,)], [['url', 'build_tag', 'opts'], None, None, (None,)],
], ],
'rebuildSRPM' : [ 'rebuildSRPM': [
[['srpm', 'build_tag', 'opts'], None, None, (None,)], [['srpm', 'build_tag', 'opts'], None, None, (None,)],
], ],
'createrepo' : [ 'createrepo': [
[['repo_id', 'arch', 'oldrepo'], None, None, None], [['repo_id', 'arch', 'oldrepo'], None, None, None],
], ],
'livemedia' : [ 'livemedia': [
[['name', 'version', 'arches', 'target', 'ksfile', 'opts'], None, None, (None,)], [['name', 'version', 'arches', 'target', 'ksfile', 'opts'], None, None, (None,)],
], ],
'indirectionimage' : [ 'indirectionimage': [
[['opts'], None, None, None], [['opts'], None, None, None],
], ],
'wrapperRPM' : [ 'wrapperRPM': [
[['spec_url', 'build_target', 'build', 'task', 'opts'], None, None, (None,)], [['spec_url', 'build_target', 'build', 'task', 'opts'], None, None, (None,)],
], ],
'createLiveCD' : [ 'createLiveCD': [
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)], [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
], ],
'appliance' : [ 'appliance': [
[['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)], [['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],
], ],
'image' : [ 'image': [
[['name', 'version', 'arches', 'target', 'inst_tree', 'opts'], None, None, (None,)], [['name', 'version', 'arches', 'target', 'inst_tree', 'opts'], None, None, (None,)],
], ],
'tagBuild' : [ 'tagBuild': [
[['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'], None, None, (False, None, False)], [['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'], None, None, (False, None, False)],
], ],
'chainmaven' : [ 'chainmaven': [
[['builds', 'target', 'opts'], None, None, (None,)], [['builds', 'target', 'opts'], None, None, (None,)],
], ],
'newRepo' : [ 'newRepo': [
[['tag', 'event', 'src', 'debuginfo', 'separate_src'], None, None, (None, False, False, False)], [['tag', 'event', 'src', 'debuginfo', 'separate_src'], None, None, (None, False, False, False)],
], ],
'createImage' : [ 'createImage': [
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'inst_tree', 'opts'], None, None, (None,)], [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'inst_tree', 'opts'], None, None, (None,)],
], ],
'tagNotification' : [ 'tagNotification': [
[['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info', 'ignore_success', 'failure_msg'], None, None, (None, '')], [['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info', 'ignore_success', 'failure_msg'], None, None, (None, '')],
], ],
'buildArch' : [ 'buildArch': [
[['pkg', 'root', 'arch', 'keep_srpm', 'opts'], None, None, (None,)], [['pkg', 'root', 'arch', 'keep_srpm', 'opts'], None, None, (None,)],
], ],
'maven' : [ 'maven': [
[['url', 'target', 'opts'], None, None, (None,)], [['url', 'target', 'opts'], None, None, (None,)],
], ],
'waittest' : [ 'waittest': [
[['count', 'seconds'], None, None, (10,)], [['count', 'seconds'], None, None, (10,)],
], ],
'default' : [ 'default': [
[[], 'args', 'opts', None], [[], 'args', 'opts', None],
], ],
'shutdown' : [ 'shutdown': [
[[], None, None, None], [[], None, None, None],
], ],
'restartVerify' : [ 'restartVerify': [
[['task_id', 'host'], None, None, None], [['task_id', 'host'], None, None, None],
], ],
'someMethod' : [ 'someMethod': [
[[], 'args', None, None], [[], 'args', None, None],
], ],
'restart' : [ 'restart': [
[['host'], None, None, None], [['host'], None, None, None],
], ],
'fork' : [ 'fork': [
[['n', 'm'], None, None, (5, 37)], [['n', 'm'], None, None, (5, 37)],
], ],
'sleep' : [ 'sleep': [
[['n'], None, None, None], [['n'], None, None, None],
], ],
'dependantTask' : [ 'dependantTask': [
[['wait_list', 'task_list'], None, None, None], [['wait_list', 'task_list'], None, None, None],
], ],
'subtask' : [ 'subtask': [
[['n'], None, None, (4,)], [['n'], None, None, (4,)],
], ],
'restartHosts' : [ 'restartHosts': [
[['options'], None, None, (None,)], [['options'], None, None, (None,)],
], ],
'runroot' : [ 'runroot': [
[['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch', 'weight', 'upload_logs', 'new_chroot'], None, None, (False, [], [], None, False, None, None, False)], [['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch', 'weight', 'upload_logs', 'new_chroot'], None, None, (False, [], [], None, False, None, None, False)],
], ],
'distRepo' : [ 'distRepo': [
[['tag', 'repo_id', 'keys', 'task_opts'], None, None, None], [['tag', 'repo_id', 'keys', 'task_opts'], None, None, None],
], ],
'createdistrepo' : [ 'createdistrepo': [
[['tag', 'repo_id', 'arch', 'keys', 'opts'], None, None, None], [['tag', 'repo_id', 'arch', 'keys', 'opts'], None, None, None],
], ],
'saveFailedTree' : [ 'saveFailedTree': [
[['buildrootID', 'full'], None, None, (False,)], [['buildrootID', 'full'], None, None, (False,)],
], ],
} }
@ -278,7 +278,7 @@ class BaseTaskHandler(object):
Foreground = False Foreground = False
def __init__(self, id, method, params, session, options, workdir=None): def __init__(self, id, method, params, session, options, workdir=None):
self.id = id #task id self.id = id # task id
if method not in self.Methods: if method not in self.Methods:
raise koji.GenericError('method "%s" is not supported' % method) raise koji.GenericError('method "%s" is not supported' % method)
self.method = method self.method = method
@ -641,7 +641,7 @@ class SubtaskTask(BaseTaskHandler):
def handler(self, n=4): def handler(self, n=4):
if n > 0: if n > 0:
task_id = self.session.host.subtask(method='subtask', task_id = self.session.host.subtask(method='subtask',
arglist=[n-1], arglist=[n - 1],
label='foo', label='foo',
parent=self.id) parent=self.id)
self.wait(task_id) self.wait(task_id)
@ -715,14 +715,14 @@ class RestartHostsTask(BaseTaskHandler):
hostquery = {'enabled': True} hostquery = {'enabled': True}
if 'channel' in options: if 'channel' in options:
chan = self.session.getChannel(options['channel'], strict=True) chan = self.session.getChannel(options['channel'], strict=True)
hostquery['channelID']= chan['id'] hostquery['channelID'] = chan['id']
if 'arches' in options: if 'arches' in options:
hostquery['arches'] = options['arches'] hostquery['arches'] = options['arches']
hosts = self.session.listHosts(**hostquery) hosts = self.session.listHosts(**hostquery)
if not hosts: if not hosts:
raise koji.GenericError("No matching hosts") raise koji.GenericError("No matching hosts")
timeout = options.get('timeout', 3600*24) timeout = options.get('timeout', 3600 * 24)
# fire off the subtasks # fire off the subtasks
this_host = self.session.host.getID() this_host = self.session.host.getID()

View file

@ -532,8 +532,8 @@ def eventFromOpts(session, opts):
if repo: if repo:
rinfo = session.repoInfo(repo) rinfo = session.repoInfo(repo)
if rinfo: if rinfo:
return {'id' : rinfo['create_event'], return {'id': rinfo['create_event'],
'ts' : rinfo['create_ts']} 'ts': rinfo['create_ts']}
return None return None
@ -664,7 +664,7 @@ class adler32_constructor(object):
return dup return dup
digest_size = 4 digest_size = 4
block_size = 1 #I think block_size = 1 # I think
def tsort(parts): def tsort(parts):

View file

@ -196,7 +196,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
mock_cmd = ['chroot'] mock_cmd = ['chroot']
if new_chroot: if new_chroot:
mock_cmd.append('--new-chroot') mock_cmd.append('--new-chroot')
elif new_chroot is False: # None -> no option added elif new_chroot is False: # None -> no option added
mock_cmd.append('--old-chroot') mock_cmd.append('--old-chroot')
if skip_setarch: if skip_setarch:
# we can't really skip it, but we can set it to the current one instead of of the chroot one # we can't really skip it, but we can set it to the current one instead of of the chroot one

View file

@ -58,12 +58,12 @@ def handle_runroot(options, session, args):
else: else:
command = args[2:] command = args[2:]
try: try:
kwargs = { 'channel': opts.channel_override, kwargs = {'channel': opts.channel_override,
'packages': opts.package, 'packages': opts.package,
'mounts': opts.mount, 'mounts': opts.mount,
'repo_id': opts.repo_id, 'repo_id': opts.repo_id,
'skip_setarch': opts.skip_setarch, 'skip_setarch': opts.skip_setarch,
'weight': opts.weight } 'weight': opts.weight}
# Only pass this kwarg if it is true - this prevents confusing older # Only pass this kwarg if it is true - this prevents confusing older
# builders with a different function signature # builders with a different function signature
if opts.new_chroot is not None: if opts.new_chroot is not None:

View file

@ -40,7 +40,7 @@ def handle_add_sidetag(options, session, args):
parser.error(_("Policy violation")) parser.error(_("Policy violation"))
if not opts.quiet: if not opts.quiet:
print (tag["name"]) print(tag["name"])
if opts.wait: if opts.wait:
args = ["--target", tag["name"]] args = ["--target", tag["name"]]
@ -88,4 +88,4 @@ def handle_list_sidetags(options, session, args):
user = opts.user user = opts.user
for tag in session.listSideTags(basetag=opts.basetag, user=user): for tag in session.listSideTags(basetag=opts.basetag, user=user):
print (tag["name"]) print(tag["name"])

View file

@ -41,8 +41,8 @@ setup(
" interface."), " interface."),
license="LGPLv2 and GPLv2+", license="LGPLv2 and GPLv2+",
url="http://pagure.io/koji/", url="http://pagure.io/koji/",
author = 'Koji developers', author='Koji developers',
author_email = 'koji-devel@lists.fedorahosted.org', author_email='koji-devel@lists.fedorahosted.org',
classifiers=[ classifiers=[
"Development Status :: 5 - Production/Stable", "Development Status :: 5 - Production/Stable",
"Environment :: Console", "Environment :: Console",

View file

@ -81,7 +81,7 @@ def get_options():
parser.add_option("--smtp-user", dest="smtp_user", metavar="USER", parser.add_option("--smtp-user", dest="smtp_user", metavar="USER",
help=_("specify smtp username for notifications")) help=_("specify smtp username for notifications"))
parser.add_option("--smtp-pass", dest="smtp_pass", metavar="PASSWORD", parser.add_option("--smtp-pass", dest="smtp_pass", metavar="PASSWORD",
help=optparse.SUPPRESS_HELP) # do not allow passwords on a command line help=optparse.SUPPRESS_HELP) # do not allow passwords on a command line
parser.add_option("--no-mail", action='store_false', default=True, dest="mail", parser.add_option("--no-mail", action='store_false', default=True, dest="mail",
help=_("don't send notifications")) help=_("don't send notifications"))
parser.add_option("--send-mail", action='store_true', dest="mail", parser.add_option("--send-mail", action='store_true', dest="mail",
@ -93,7 +93,7 @@ def get_options():
parser.add_option("--email-template", default="/etc/koji-gc/email.tpl", parser.add_option("--email-template", default="/etc/koji-gc/email.tpl",
help=_("notification template")) help=_("notification template"))
parser.add_option("--action", help=_("action(s) to take")) parser.add_option("--action", help=_("action(s) to take"))
parser.add_option("--delay", metavar="INTERVAL", default = '5 days', parser.add_option("--delay", metavar="INTERVAL", default='5 days',
help="time before eligible builds are placed in trashcan") help="time before eligible builds are placed in trashcan")
parser.add_option("--grace-period", default='4 weeks', metavar="INTERVAL", parser.add_option("--grace-period", default='4 weeks', metavar="INTERVAL",
help="time that builds are held in trashcan") help="time that builds are held in trashcan")
@ -184,7 +184,7 @@ def get_options():
# figure out actions # figure out actions
actions = ('prune', 'trash', 'delete', 'salvage') actions = ('prune', 'trash', 'delete', 'salvage')
if options.action: if options.action:
options.action = options.action.lower().replace(',',' ').split() options.action = options.action.lower().replace(',', ' ').split()
for x in options.action: for x in options.action:
if x not in actions: if x not in actions:
parser.error(_("Invalid action: %s") % x) parser.error(_("Invalid action: %s") % x)
@ -193,7 +193,7 @@ def get_options():
# split patterns for unprotected keys # split patterns for unprotected keys
if options.unprotected_keys: if options.unprotected_keys:
options.unprotected_key_patterns = options.unprotected_keys.replace(',',' ').split() options.unprotected_key_patterns = options.unprotected_keys.replace(',', ' ').split()
else: else:
options.unprotected_key_patterns = [] options.unprotected_key_patterns = []
@ -208,7 +208,7 @@ def get_options():
options.key_aliases[parts[0].upper()] = parts[1] options.key_aliases[parts[0].upper()] = parts[1]
except ValueError as e: except ValueError as e:
print(e) print(e)
parser.error(_("Invalid key alias data in config: %s") % config.get('main','key_aliases')) parser.error(_("Invalid key alias data in config: %s") % config.get('main', 'key_aliases'))
# parse time intervals # parse time intervals
for key in ('delay', 'grace_period'): for key in ('delay', 'grace_period'):
@ -274,11 +274,11 @@ def check_package(name):
return True return True
time_units = { time_units = {
'second' : 1, 'second': 1,
'minute' : 60, 'minute': 60,
'hour' : 3600, 'hour': 3600,
'day' : 86400, 'day': 86400,
'week' : 604800, 'week': 604800,
} }
time_unit_aliases = [ time_unit_aliases = [
# [unit, alias, alias, ...] # [unit, alias, alias, ...]
@ -405,14 +405,14 @@ def send_warning_notice(owner_name, builds):
with open(options.email_template, 'r') as f: with open(options.email_template, 'r') as f:
tpl = Template(f.read()) tpl = Template(f.read())
fmt="""\ fmt = """\
Build: %%(name)s-%%(version)s-%%(release)s Build: %%(name)s-%%(version)s-%%(release)s
%s/buildinfo?buildID=%%(id)i""" % options.weburl %s/buildinfo?buildID=%%(id)i""" % options.weburl
middle = '\n\n'.join([fmt % b for b in builds]) middle = '\n\n'.join([fmt % b for b in builds])
msg = MIMEText.MIMEText(tpl.safe_substitute( msg = MIMEText.MIMEText(tpl.safe_substitute(
owner = owner_name, owner=owner_name,
builds = middle, builds=middle,
)) ))
if len(builds) == 1: if len(builds) == 1:
@ -420,7 +420,7 @@ Build: %%(name)s-%%(version)s-%%(release)s
else: else:
msg['Subject'] = "%i builds marked for deletion" % len(builds) msg['Subject'] = "%i builds marked for deletion" % len(builds)
msg['From'] = options.from_addr msg['From'] = options.from_addr
msg['To'] = "%s@%s" % (owner_name, options.email_domain) #XXX! msg['To'] = "%s@%s" % (owner_name, options.email_domain) # XXX!
msg['X-Koji-Builder'] = owner_name msg['X-Koji-Builder'] = owner_name
if options.test: if options.test:
if options.debug: if options.debug:
@ -528,7 +528,7 @@ def handle_trash():
else: else:
age = time.time() - ts age = time.time() - ts
else: else:
history = [(h['revoke_event'],h) for h in history] history = [(h['revoke_event'], h) for h in history]
last = max(history)[1] last = max(history)[1]
if not last['revoke_event']: if not last['revoke_event']:
# this might happen if the build was tagged just now # this might happen if the build was tagged just now
@ -690,7 +690,7 @@ def handle_delete(just_salvage=False):
pprint.pprint(binfo) pprint.pprint(binfo)
pprint.pprint(history) pprint.pprint(history)
continue continue
assert len(current) == 1 #see db constraint assert len(current) == 1 # see db constraint
current = current[0] current = current[0]
age = time.time() - current['create_ts'] age = time.time() - current['create_ts']
if age < grace_period: if age < grace_period:
@ -705,8 +705,8 @@ def handle_delete(just_salvage=False):
if options.test: if options.test:
print("Would have deleted build from trashcan: %s" % binfo['nvr']) print("Would have deleted build from trashcan: %s" % binfo['nvr'])
else: else:
print("Deleting build: %s" % binfo['nvr']) print("Deleting build: %s" % binfo['nvr'])
mcall.untagBuildBypass(trashcan_tag, binfo['id']) mcall.untagBuildBypass(trashcan_tag, binfo['id'])
mcall.deleteBuild(binfo['id']) mcall.deleteBuild(binfo['id'])
for binfo, result in six.moves.zip(continuing, mcall.call_all()): for binfo, result in six.moves.zip(continuing, mcall.call_all()):
@ -898,14 +898,14 @@ def handle_prune():
# get sig data # get sig data
nvr = "%(name)s-%(version)s-%(release)s" % entry nvr = "%(name)s-%(version)s-%(release)s" % entry
data = { data = {
'tagname' : tagname, 'tagname': tagname,
'pkgname' : pkg, 'pkgname': pkg,
'order': order - skipped, 'order': order - skipped,
'ts' : entry['create_ts'], 'ts': entry['create_ts'],
'nvr' : nvr, 'nvr': nvr,
} }
data = LazyDict(data) data = LazyDict(data)
data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache':True}) data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache': True})
data['volname'] = LazyValue(lambda x: session.getBuild(x).get('volume_name'), data['volname'] = LazyValue(lambda x: session.getBuild(x).get('volume_name'),
(entry['build_id'],), cache=True) (entry['build_id'],), cache=True)
build_ids[nvr] = entry['build_id'] build_ids[nvr] = entry['build_id']
@ -923,7 +923,7 @@ def handle_prune():
print("Would have untagged %s from %s" % (nvr, tagname)) print("Would have untagged %s from %s" % (nvr, tagname))
untagged.setdefault(nvr, {})[tagname] = 1 untagged.setdefault(nvr, {})[tagname] = 1
else: else:
print("Untagging build %s from %s" % (nvr, tagname)) print("Untagging build %s from %s" % (nvr, tagname))
try: try:
session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass) session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)
untagged.setdefault(nvr, {})[tagname] = 1 untagged.setdefault(nvr, {})[tagname] = 1
@ -935,7 +935,7 @@ def handle_prune():
print("Attempting to purge %i builds" % len(untagged)) print("Attempting to purge %i builds" % len(untagged))
for nvr in untagged: for nvr in untagged:
build_id = build_ids[nvr] build_id = build_ids[nvr]
tags = [t['name'] for t in session.listTags(build_id, perms=False)] tags = [t['name'] for t in session.listTags(build_id, perms=False)]
if options.test: if options.test:
# filted out the tags we would have dropped above # filted out the tags we would have dropped above
tags = [t for t in tags if t not in untagged[nvr]] tags = [t for t in tags if t not in untagged[nvr]]
@ -958,7 +958,7 @@ def handle_prune():
if options.test: if options.test:
print("Would have deleted build: %s" % nvr) print("Would have deleted build: %s" % nvr)
else: else:
print("Deleting untagged build: %s" % nvr) print("Deleting untagged build: %s" % nvr)
try: try:
session.deleteBuild(build_id, strict=False) session.deleteBuild(build_id, strict=False)
except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e: except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:

View file

@ -51,7 +51,7 @@ except ImportError: # pragma: no cover
# koji.fp.o keeps stalling, probably network errors... # koji.fp.o keeps stalling, probably network errors...
# better to time out than to stall # better to time out than to stall
socket.setdefaulttimeout(180) #XXX - too short? socket.setdefaulttimeout(180) # XXX - too short?
logfile = None logfile = None
@ -190,11 +190,11 @@ def get_options():
return options, args return options, args
time_units = { time_units = {
'second' : 1, 'second': 1,
'minute' : 60, 'minute': 60,
'hour' : 3600, 'hour': 3600,
'day' : 86400, 'day': 86400,
'week' : 604800, 'week': 604800,
} }
time_unit_aliases = [ time_unit_aliases = [
# [unit, alias, alias, ...] # [unit, alias, alias, ...]
@ -282,8 +282,8 @@ def activate_session(session):
pass pass
elif options.auth_cert and options.serverca: elif options.auth_cert and options.serverca:
# convert to absolute paths # convert to absolute paths
options.auth_cert = os.path.expanduser(options.auth_cert) options.auth_cert = os.path.expanduser(options.auth_cert)
options.serverca = os.path.expanduser(options.serverca) options.serverca = os.path.expanduser(options.serverca)
if os.path.isfile(options.auth_cert): if os.path.isfile(options.auth_cert):
# authenticate using SSL client cert # authenticate using SSL client cert
@ -358,7 +358,7 @@ class TrackedBuild(object):
if rinfo['arch'] == 'src': if rinfo['arch'] == 'src':
self.srpm = rinfo self.srpm = rinfo
self.getExtraArches() self.getExtraArches()
self.getDeps() #sets deps, br_tag, base, order, (maybe state) self.getDeps() # sets deps, br_tag, base, order, (maybe state)
def updateState(self): def updateState(self):
"""Update state from local hub """Update state from local hub
@ -475,10 +475,10 @@ class TrackedBuild(object):
return return
buildroots.sort() buildroots.sort()
self.order = buildroots[-1] self.order = buildroots[-1]
seen = {} #used to avoid scanning the same buildroot twice seen = {} # used to avoid scanning the same buildroot twice
builds = {} #track which builds we need for a rebuild builds = {} # track which builds we need for a rebuild
bases = {} #track base install for buildroots bases = {} # track base install for buildroots
tags = {} #track buildroot tag(s) tags = {} # track buildroot tag(s)
remote.multicall = True remote.multicall = True
unpack = [] unpack = []
for br_id in buildroots: for br_id in buildroots:
@ -534,7 +534,7 @@ class TrackedBuild(object):
if len(builds) == 0: if len(builds) == 0:
self.setState("noroot") self.setState("noroot")
self.deps = builds self.deps = builds
self.revised_deps = None #BuildTracker will set this later self.revised_deps = None # BuildTracker will set this later
self.br_tag = tag self.br_tag = tag
self.base = base self.base = base
@ -714,20 +714,20 @@ class BuildTracker(object):
head = " " * depth head = " " * depth
for ignored in self.ignorelist: for ignored in self.ignorelist:
if (build.name == ignored) or fnmatch.fnmatch(build.name, ignored): if (build.name == ignored) or fnmatch.fnmatch(build.name, ignored):
log ("%sIgnored Build: %s%s" % (head, build.nvr, tail)) log("%sIgnored Build: %s%s" % (head, build.nvr, tail))
build.setState('ignore') build.setState('ignore')
return build return build
check = self.checkFilter(build, grey=None) check = self.checkFilter(build, grey=None)
if check is None: if check is None:
# greylisted builds are ok as deps, but not primary builds # greylisted builds are ok as deps, but not primary builds
if depth == 0: if depth == 0:
log ("%sGreylisted build %s%s" % (head, build.nvr, tail)) log("%sGreylisted build %s%s" % (head, build.nvr, tail))
build.setState('grey') build.setState('grey')
return build return build
# get rid of 'grey' state (filter will not be checked again) # get rid of 'grey' state (filter will not be checked again)
build.updateState() build.updateState()
elif not check: elif not check:
log ("%sBlocked build %s%s" % (head, build.nvr, tail)) log("%sBlocked build %s%s" % (head, build.nvr, tail))
build.setState('blocked') build.setState('blocked')
return build return build
# make sure we dont have the build name protected # make sure we dont have the build name protected
@ -737,29 +737,29 @@ class BuildTracker(object):
if replace: if replace:
build.substitute = replace build.substitute = replace
if depth > 0: if depth > 0:
log ("%sDep replaced: %s->%s" % (head, build.nvr, replace)) log("%sDep replaced: %s->%s" % (head, build.nvr, replace))
return build return build
if options.prefer_new and (depth > 0) and (tag is not None) and not (build.state == "common"): if options.prefer_new and (depth > 0) and (tag is not None) and not (build.state == "common"):
latestBuild = self.newerBuild(build, tag) latestBuild = self.newerBuild(build, tag)
if latestBuild != None: if latestBuild != None:
build.substitute = latestBuild.nvr build.substitute = latestBuild.nvr
log ("%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr)) log("%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr))
return build return build
else: else:
log ("%sProtected Build: %s" % (head, build.nvr)) log("%sProtected Build: %s" % (head, build.nvr))
if build.state == "common": if build.state == "common":
# we're good # we're good
if build.rebuilt: if build.rebuilt:
log ("%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail)) log("%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail))
else: else:
log ("%sCommon build %s%s" % (head, build.nvr, tail)) log("%sCommon build %s%s" % (head, build.nvr, tail))
elif build.state == 'pending': elif build.state == 'pending':
log ("%sRebuild in progress: %s%s" % (head, build.nvr, tail)) log("%sRebuild in progress: %s%s" % (head, build.nvr, tail))
elif build.state == "broken": elif build.state == "broken":
# The build already exists locally, but is somehow invalid. # The build already exists locally, but is somehow invalid.
# We should not replace it automatically. An admin can reset it # We should not replace it automatically. An admin can reset it
# if that is the correct thing. A substitution might also be in order # if that is the correct thing. A substitution might also be in order
log ("%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail)) log("%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail))
# #
# !! Cases where importing a noarch is /not/ ok must occur # !! Cases where importing a noarch is /not/ ok must occur
# before this point # before this point
@ -767,30 +767,30 @@ class BuildTracker(object):
elif (options.import_noarch or options.import_noarch_only) and build.isNoarch(): elif (options.import_noarch or options.import_noarch_only) and build.isNoarch():
self.importBuild(build, tag) self.importBuild(build, tag)
elif options.import_noarch_only and not build.isNoarch(): elif options.import_noarch_only and not build.isNoarch():
log ("%sSkipping archful build: %s" % (head, build.nvr)) log("%sSkipping archful build: %s" % (head, build.nvr))
elif build.state == "noroot": elif build.state == "noroot":
# Can't rebuild it, this is what substitutions are for # Can't rebuild it, this is what substitutions are for
log ("%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail)) log("%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail))
elif build.state == 'brokendeps': elif build.state == 'brokendeps':
# should not be possible at this point # should not be possible at this point
log ("Error: build reports brokendeps state before dep scan") log("Error: build reports brokendeps state before dep scan")
elif build.state == "missing": elif build.state == "missing":
# scan its deps # scan its deps
log ("%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail)) log("%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail))
newdeps = [] newdeps = []
# include extra local builds as deps. # include extra local builds as deps.
if self.includelist: if self.includelist:
for dep in self.includelist: for dep in self.includelist:
info = session.getBuild(dep) info = session.getBuild(dep)
if info: if info:
log ("%s Adding local Dep %s%s" % (head, dep, tail)) log("%s Adding local Dep %s%s" % (head, dep, tail))
extradep = LocalBuild(info) extradep = LocalBuild(info)
newdeps.append(extradep) newdeps.append(extradep)
else: else:
log ("%s Warning: could not find build for %s" % (head, dep)) log("%s Warning: could not find build for %s" % (head, dep))
# don't actually set build.revised_deps until we finish the dep scan # don't actually set build.revised_deps until we finish the dep scan
for dep_id in build.deps: for dep_id in build.deps:
dep = self.scanBuild(dep_id, from_build=build, depth=depth+1, tag=tag) dep = self.scanBuild(dep_id, from_build=build, depth=depth + 1, tag=tag)
if dep.name in self.ignorelist: if dep.name in self.ignorelist:
# we are not done dep solving yet. but we dont want this dep in our buildroot # we are not done dep solving yet. but we dont want this dep in our buildroot
continue continue
@ -798,10 +798,10 @@ class BuildTracker(object):
if dep.substitute: if dep.substitute:
dep2 = self.getSubstitute(dep.substitute) dep2 = self.getSubstitute(dep.substitute)
if isinstance(dep2, TrackedBuild): if isinstance(dep2, TrackedBuild):
self.scanBuild(dep2.id, from_build=build, depth=depth+1, tag=tag) self.scanBuild(dep2.id, from_build=build, depth=depth + 1, tag=tag)
elif dep2 is None: elif dep2 is None:
# dep is missing on both local and remote # dep is missing on both local and remote
log ("%sSubstitute dep unavailable: %s" % (head, dep2.nvr)) log("%sSubstitute dep unavailable: %s" % (head, dep2.nvr))
# no point in continuing # no point in continuing
break break
# otherwise dep2 should be LocalBuild instance # otherwise dep2 should be LocalBuild instance
@ -809,7 +809,7 @@ class BuildTracker(object):
elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'): elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'):
# no point in continuing # no point in continuing
build.setState('brokendeps') build.setState('brokendeps')
log ("%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state)) log("%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state))
newdeps = None newdeps = None
break break
else: else:
@ -837,11 +837,11 @@ class BuildTracker(object):
if options.first_one: if options.first_one:
return return
except (socket.timeout, socket.error): except (socket.timeout, socket.error):
log ("retry") log("retry")
continue continue
break break
else: else:
log ("Error: unable to scan %(name)s-%(version)s-%(release)s" % build) log("Error: unable to scan %(name)s-%(version)s-%(release)s" % build)
continue continue
def _importURL(self, url, fn): def _importURL(self, url, fn):
@ -853,8 +853,8 @@ class BuildTracker(object):
old_umask = os.umask(0o02) old_umask = os.umask(0o02)
try: try:
koji.ensuredir(os.path.dirname(dst)) koji.ensuredir(os.path.dirname(dst))
os.chown(os.path.dirname(dst), 48, 48) #XXX - hack os.chown(os.path.dirname(dst), 48, 48) # XXX - hack
log ("Downloading %s to %s" % (url, dst)) log("Downloading %s to %s" % (url, dst))
fsrc = urllib2.urlopen(url) fsrc = urllib2.urlopen(url)
fdst = open(fn, 'w') fdst = open(fn, 'w')
shutil.copyfileobj(fsrc, fdst) shutil.copyfileobj(fsrc, fdst)
@ -867,24 +867,24 @@ class BuildTracker(object):
# for now, though, just use uploadWrapper # for now, though, just use uploadWrapper
koji.ensuredir(options.workpath) koji.ensuredir(options.workpath)
dst = "%s/%s" % (options.workpath, fn) dst = "%s/%s" % (options.workpath, fn)
log ("Downloading %s to %s..." % (url, dst)) log("Downloading %s to %s..." % (url, dst))
fsrc = urllib2.urlopen(url) fsrc = urllib2.urlopen(url)
fdst = open(dst, 'w') fdst = open(dst, 'w')
shutil.copyfileobj(fsrc, fdst) shutil.copyfileobj(fsrc, fdst)
fsrc.close() fsrc.close()
fdst.close() fdst.close()
log ("Uploading %s..." % dst) log("Uploading %s..." % dst)
session.uploadWrapper(dst, serverdir, blocksize=65536) session.uploadWrapper(dst, serverdir, blocksize=65536)
session.importRPM(serverdir, fn) session.importRPM(serverdir, fn)
def importBuild(self, build, tag=None): def importBuild(self, build, tag=None):
'''import a build from remote hub''' '''import a build from remote hub'''
if not build.srpm: if not build.srpm:
log ("No srpm for build %s, skipping import" % build.nvr) log("No srpm for build %s, skipping import" % build.nvr)
# TODO - support no-src imports here # TODO - support no-src imports here
return False return False
if not options.remote_topurl: if not options.remote_topurl:
log ("Skipping import of %s, remote_topurl not specified" % build.nvr) log("Skipping import of %s, remote_topurl not specified" % build.nvr)
return False return False
pathinfo = koji.PathInfo(options.remote_topurl) pathinfo = koji.PathInfo(options.remote_topurl)
build_url = pathinfo.build(build.info) build_url = pathinfo.build(build.info)
@ -954,7 +954,7 @@ class BuildTracker(object):
else: else:
parents = session.getInheritanceData(taginfo['id']) parents = session.getInheritanceData(taginfo['id'])
if parents: if parents:
log ("Warning: shadow build tag has inheritance") log("Warning: shadow build tag has inheritance")
# check package list # check package list
pkgs = {} pkgs = {}
for pkg in session.listPackages(tagID=taginfo['id']): for pkg in session.listPackages(tagID=taginfo['id']):
@ -1010,7 +1010,7 @@ class BuildTracker(object):
build_group = group build_group = group
else: else:
# we should have no other groups but build # we should have no other groups but build
log ("Warning: found stray group: %s" % group) log("Warning: found stray group: %s" % group)
drop_groups.append(group['name']) drop_groups.append(group['name'])
if build_group: if build_group:
# fix build group package list based on base of build to shadow # fix build group package list based on base of build to shadow
@ -1021,7 +1021,7 @@ class BuildTracker(object):
# no group deps needed/allowed # no group deps needed/allowed
drop_deps = [(g['name'], 1) for g in build_group['grouplist']] drop_deps = [(g['name'], 1) for g in build_group['grouplist']]
if drop_deps: if drop_deps:
log ("Warning: build group had deps: %r" % build_group) log("Warning: build group had deps: %r" % build_group)
else: else:
add_pkgs = build.base add_pkgs = build.base
drop_pkgs = [] drop_pkgs = []
@ -1061,17 +1061,17 @@ class BuildTracker(object):
# [?] use remote SCM url (if avail)? # [?] use remote SCM url (if avail)?
src = build.getSource() src = build.getSource()
if not src: if not src:
log ("Couldn't get source for %s" % build.nvr) log("Couldn't get source for %s" % build.nvr)
return None return None
# wait for repo task # wait for repo task
log ("Waiting on newRepo task %i" % task_id) log("Waiting on newRepo task %i" % task_id)
while True: while True:
tinfo = session.getTaskInfo(task_id) tinfo = session.getTaskInfo(task_id)
tstate = koji.TASK_STATES[tinfo['state']] tstate = koji.TASK_STATES[tinfo['state']]
if tstate == 'CLOSED': if tstate == 'CLOSED':
break break
elif tstate in ('CANCELED', 'FAILED'): elif tstate in ('CANCELED', 'FAILED'):
log ("Error: failed to generate repo") log("Error: failed to generate repo")
return None return None
# add a timeout? # add a timeout?
# TODO ...and verify repo # TODO ...and verify repo
@ -1127,12 +1127,12 @@ class BuildTracker(object):
states = sorted(self.state_idx.keys()) states = sorted(self.state_idx.keys())
parts = ["%s: %i" % (s, len(self.state_idx[s])) for s in states] parts = ["%s: %i" % (s, len(self.state_idx[s])) for s in states]
parts.append("total: %i" % N) parts.append("total: %i" % N)
log (' '.join(parts)) log(' '.join(parts))
def _print_builds(self, mylist): def _print_builds(self, mylist):
"""small helper function for output""" """small helper function for output"""
for build in mylist: for build in mylist:
log (" %s (%s)" % (build.nvr, build.state)) log(" %s (%s)" % (build.nvr, build.state))
def checkJobs(self, tag=None): def checkJobs(self, tag=None):
"""Check outstanding jobs. Return true if anything changes""" """Check outstanding jobs. Return true if anything changes"""
@ -1140,31 +1140,31 @@ class BuildTracker(object):
for build_id, build in self.state_idx['pending'].items(): for build_id, build in self.state_idx['pending'].items():
# check pending builds # check pending builds
if not build.task_id: if not build.task_id:
log ("No task id recorded for %s" % build.nvr) log("No task id recorded for %s" % build.nvr)
build.updateState() build.updateState()
ret = True ret = True
info = session.getTaskInfo(build.task_id) info = session.getTaskInfo(build.task_id)
if not info: if not info:
log ("No such task: %i (build %s)" % (build.task_id, build.nvr)) log("No such task: %i (build %s)" % (build.task_id, build.nvr))
build.updateState() build.updateState()
ret = True ret = True
continue continue
state = koji.TASK_STATES[info['state']] state = koji.TASK_STATES[info['state']]
if state in ('CANCELED', 'FAILED'): if state in ('CANCELED', 'FAILED'):
log ("Task %i is %s (build %s)" % (build.task_id, state, build.nvr)) log("Task %i is %s (build %s)" % (build.task_id, state, build.nvr))
# we have to set the state to broken manually (updateState will mark # we have to set the state to broken manually (updateState will mark
# a failed build as missing) # a failed build as missing)
build.setState('broken') build.setState('broken')
ret = True ret = True
elif state == 'CLOSED': elif state == 'CLOSED':
log ("Task %i complete (build %s)" % (build.task_id, build.nvr)) log("Task %i complete (build %s)" % (build.task_id, build.nvr))
if options.tag_build and not tag == None: if options.tag_build and not tag == None:
self.tagSuccessful(build.nvr, tag) self.tagSuccessful(build.nvr, tag)
build.updateState() build.updateState()
ret = True ret = True
if build.state != 'common': if build.state != 'common':
log ("Task %i finished, but %s still missing" \ log("Task %i finished, but %s still missing" \
% (build.task_id, build.nvr)) % (build.task_id, build.nvr))
return ret return ret
def checkBuildDeps(self, build): def checkBuildDeps(self, build):
@ -1175,7 +1175,7 @@ class BuildTracker(object):
problem = [x for x in build.revised_deps problem = [x for x in build.revised_deps
if x.state in ('broken', 'brokendeps', 'noroot', 'blocked')] if x.state in ('broken', 'brokendeps', 'noroot', 'blocked')]
if problem: if problem:
log ("Can't rebuild %s, missing %i deps" % (build.nvr, len(problem))) log("Can't rebuild %s, missing %i deps" % (build.nvr, len(problem)))
build.setState('brokendeps') build.setState('brokendeps')
self._print_builds(problem) self._print_builds(problem)
return False return False
@ -1201,7 +1201,7 @@ class BuildTracker(object):
if not self.checkBuildDeps(build): if not self.checkBuildDeps(build):
continue continue
# otherwise, we should be good to rebuild # otherwise, we should be good to rebuild
log ("rebuild: %s" % build.nvr) log("rebuild: %s" % build.nvr)
task_id = self.rebuild(build) task_id = self.rebuild(build)
ret = True ret = True
if options.test: if options.test:
@ -1209,7 +1209,7 @@ class BuildTracker(object):
build.setState('common') build.setState('common')
elif not task_id: elif not task_id:
# something went wrong setting up the rebuild # something went wrong setting up the rebuild
log ("Did not get a task for %s" % build.nvr) log("Did not get a task for %s" % build.nvr)
build.setState('broken') build.setState('broken')
else: else:
# build might not show up as 'BUILDING' immediately, so we # build might not show up as 'BUILDING' immediately, so we
@ -1218,13 +1218,13 @@ class BuildTracker(object):
build.setState('pending') build.setState('pending')
if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs: if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs:
if options.debug: if options.debug:
log ("Maximum number of jobs reached.") log("Maximum number of jobs reached.")
break break
return ret return ret
def runRebuilds(self, tag=None): def runRebuilds(self, tag=None):
"""Rebuild missing builds""" """Rebuild missing builds"""
log ("Determining rebuild order") log("Determining rebuild order")
# using self.state_idx to track build states # using self.state_idx to track build states
# make sure state_idx has at least these states # make sure state_idx has at least these states
initial_avail = len(self.state_idx['common']) initial_avail = len(self.state_idx['common'])
@ -1240,16 +1240,16 @@ class BuildTracker(object):
time.sleep(30) time.sleep(30)
continue continue
self.report_brief() self.report_brief()
log ("Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail)) log("Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail))
def tagSuccessful(self, nvr, tag): def tagSuccessful(self, nvr, tag):
"""tag completed builds into final tags""" """tag completed builds into final tags"""
# TODO: check if there are other reasons why tagging may fail and handle them # TODO: check if there are other reasons why tagging may fail and handle them
try: try:
session.tagBuildBypass(tag, nvr) session.tagBuildBypass(tag, nvr)
log ("tagged %s to %s" % (nvr, tag)) log("tagged %s to %s" % (nvr, tag))
except koji.TagError: except koji.TagError:
log ("NOTICE: %s already tagged in %s" % (nvr, tag)) log("NOTICE: %s already tagged in %s" % (nvr, tag))
def main(args): def main(args):
@ -1263,11 +1263,11 @@ def main(args):
if options.logfile: if options.logfile:
filename = options.logfile filename = options.logfile
try: try:
logfile = os.open(filename,os.O_CREAT|os.O_RDWR|os.O_APPEND, 0o777) logfile = os.open(filename, os.O_CREAT | os.O_RDWR | os.O_APPEND, 0o777)
except: except:
logfile = None logfile = None
if logfile is not None: if logfile is not None:
log ("logging to %s" % filename) log("logging to %s" % filename)
os.write(logfile, "\n\n========================================================================\n") os.write(logfile, "\n\n========================================================================\n")
if options.build: if options.build:
@ -1275,10 +1275,10 @@ def main(args):
tracker.scanBuild(binfo['id'], tag=tag) tracker.scanBuild(binfo['id'], tag=tag)
else: else:
if tag is None: if tag is None:
log ("Tag is required") log("Tag is required")
return return
else: else:
log ("Working on tag %s" % (tag)) log("Working on tag %s" % (tag))
tracker.scanTag(tag) tracker.scanTag(tag)
tracker.report() tracker.report()
tracker.runRebuilds(tag) tracker.runRebuilds(tag)

View file

@ -48,7 +48,7 @@ def getTag(session, tag, event=None):
cache = tag_cache cache = tag_cache
now = time.time() now = time.time()
if (tag, event) in cache: if (tag, event) in cache:
ts, info = cache[(tag,event)] ts, info = cache[(tag, event)]
if now - ts < 600: if now - ts < 600:
# use the cache # use the cache
return info return info
@ -84,7 +84,7 @@ class ManagedRepo(object):
if self.current: if self.current:
order = self.session.getFullInheritance(self.tag_id, event=self.event_id) order = self.session.getFullInheritance(self.tag_id, event=self.event_id)
# order may contain same tag more than once # order may contain same tag more than once
tags = {self.tag_id : 1} tags = {self.tag_id: 1}
for x in order: for x in order:
tags[x['parent_id']] = 1 tags[x['parent_id']] = 1
self.taglist = to_list(tags.keys()) self.taglist = to_list(tags.keys())
@ -348,11 +348,11 @@ class RepoManager(object):
# we're already tracking it # we're already tracking it
if repo.state != data['state']: if repo.state != data['state']:
self.logger.info('State changed for repo %s: %s -> %s' self.logger.info('State changed for repo %s: %s -> %s'
%(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']])) % (repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))
repo.state = data['state'] repo.state = data['state']
else: else:
self.logger.info('Found repo %s, state=%s' self.logger.info('Found repo %s, state=%s'
%(repo_id, koji.REPO_STATES[data['state']])) % (repo_id, koji.REPO_STATES[data['state']]))
repo = ManagedRepo(self, data) repo = ManagedRepo(self, data)
self.repos[repo_id] = repo self.repos[repo_id] = repo
if not getTag(self.session, repo.tag_id) and not repo.expired(): if not getTag(self.session, repo.tag_id) and not repo.expired():
@ -516,7 +516,7 @@ class RepoManager(object):
# use the cache # use the cache
return stats return stats
data = self.session.listBuildroots(tagID=tag_id, data = self.session.listBuildroots(tagID=tag_id,
queryOpts={'order': '-create_event_id', 'limit' : 100}) queryOpts={'order': '-create_event_id', 'limit': 100})
# XXX magic number (limit) # XXX magic number (limit)
if data: if data:
tag_name = data[0]['tag_name'] tag_name = data[0]['tag_name']
@ -525,7 +525,7 @@ class RepoManager(object):
stats = {'data': data, 'ts': now, 'tag_name': tag_name} stats = {'data': data, 'ts': now, 'tag_name': tag_name}
recent = [x for x in data if now - x['create_ts'] < 3600 * 24] recent = [x for x in data if now - x['create_ts'] < 3600 * 24]
# XXX magic number # XXX magic number
stats ['n_recent'] = len(recent) stats['n_recent'] = len(recent)
self.tag_use_stats[tag_id] = stats self.tag_use_stats[tag_id] = stats
self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent))) self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent)))
return stats return stats
@ -633,8 +633,8 @@ class RepoManager(object):
# TODO: implement a timeout # TODO: implement a timeout
# also check other newRepo tasks # also check other newRepo tasks
repo_tasks = self.session.listTasks(opts={'method':'newRepo', repo_tasks = self.session.listTasks(opts={'method': 'newRepo',
'state':([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])}) 'state': ([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])})
others = [t for t in repo_tasks if t['id'] not in self.tasks] others = [t for t in repo_tasks if t['id'] not in self.tasks]
for tinfo in others: for tinfo in others:
if tinfo['id'] not in self.other_tasks: if tinfo['id'] not in self.other_tasks:
@ -701,7 +701,7 @@ class RepoManager(object):
entry = { entry = {
'taginfo': taginfo, 'taginfo': taginfo,
'expire_ts': ts, 'expire_ts': ts,
'needed_since' : time.time(), 'needed_since': time.time(),
} }
self.setTagScore(entry) self.setTagScore(entry)
self.needed_tags[tag_id] = entry self.needed_tags[tag_id] = entry
@ -824,7 +824,7 @@ def main(options, session):
repomgr.readCurrentRepos() repomgr.readCurrentRepos()
def shutdown(*args): def shutdown(*args):
raise SystemExit raise SystemExit
signal.signal(signal.SIGTERM,shutdown) signal.signal(signal.SIGTERM, shutdown)
curr_chk_thread = start_currency_checker(session, repomgr) curr_chk_thread = start_currency_checker(session, repomgr)
regen_thread = start_regen_loop(session, repomgr) regen_thread = start_regen_loop(session, repomgr)
# TODO also move rmtree jobs to threads # TODO also move rmtree jobs to threads
@ -926,15 +926,15 @@ def get_options():
'offline_retry_interval': 120, 'offline_retry_interval': 120,
'no_ssl_verify': False, 'no_ssl_verify': False,
'max_delete_processes': 4, 'max_delete_processes': 4,
'max_repo_tasks' : 4, 'max_repo_tasks': 4,
'max_repo_tasks_maven' : 2, 'max_repo_tasks_maven': 2,
'repo_tasks_limit' : 10, 'repo_tasks_limit': 10,
'delete_batch_size' : 3, 'delete_batch_size': 3,
'deleted_repo_lifetime': 7*24*3600, 'deleted_repo_lifetime': 7 * 24 * 3600,
# XXX should really be called expired_repo_lifetime # XXX should really be called expired_repo_lifetime
'dist_repo_lifetime': 7*24*3600, 'dist_repo_lifetime': 7 * 24 * 3600,
'recent_tasks_lifetime': 600, 'recent_tasks_lifetime': 600,
'sleeptime' : 15, 'sleeptime': 15,
'cert': None, 'cert': None,
'ca': '', # FIXME: unused, remove in next major release 'ca': '', # FIXME: unused, remove in next major release
'serverca': None, 'serverca': None,
@ -948,7 +948,7 @@ def get_options():
str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice', str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',
'cert', 'ca', 'serverca', 'debuginfo_tags', 'cert', 'ca', 'serverca', 'debuginfo_tags',
'source_tags', 'separate_source_tags', 'ignore_tags') # FIXME: remove ca here 'source_tags', 'separate_source_tags', 'ignore_tags') # FIXME: remove ca here
bool_opts = ('verbose','debug','ignore_stray_repos', 'offline_retry', bool_opts = ('verbose', 'debug', 'ignore_stray_repos', 'offline_retry',
'krb_rdns', 'krb_canon_host', 'no_ssl_verify') 'krb_rdns', 'krb_canon_host', 'no_ssl_verify')
for name in config.options(section): for name in config.options(section):
if name in int_opts: if name in int_opts:
@ -962,7 +962,7 @@ def get_options():
for name, value in defaults.items(): for name, value in defaults.items():
if getattr(options, name, None) is None: if getattr(options, name, None) is None:
setattr(options, name, value) setattr(options, name, value)
if options.logfile in ('','None','none'): if options.logfile in ('', 'None', 'none'):
options.logfile = None options.logfile = None
# special handling for cert defaults # special handling for cert defaults
cert_defaults = { cert_defaults = {
@ -983,10 +983,10 @@ def quit(msg=None, code=1):
sys.stderr.flush() sys.stderr.flush()
sys.exit(code) sys.exit(code)
if __name__ == "__main__": if __name__ == "__main__":
options = get_options() options = get_options()
topdir = getattr(options,'topdir',None) topdir = getattr(options, 'topdir', None)
pathinfo = koji.PathInfo(topdir) pathinfo = koji.PathInfo(topdir)
if options.show_config: if options.show_config:
pprint.pprint(options.__dict__) pprint.pprint(options.__dict__)
@ -999,7 +999,7 @@ if __name__ == "__main__":
except: except:
sys.stderr.write("Cannot create logfile: %s\n" % options.logfile) sys.stderr.write("Cannot create logfile: %s\n" % options.logfile)
sys.exit(1) sys.exit(1)
if not os.access(options.logfile,os.W_OK): if not os.access(options.logfile, os.W_OK):
sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile) sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile)
sys.exit(1) sys.exit(1)
koji.add_file_logger("koji", options.logfile) koji.add_file_logger("koji", options.logfile)
@ -1015,7 +1015,7 @@ if __name__ == "__main__":
logger.setLevel(logging.WARNING) logger.setLevel(logging.WARNING)
session_opts = koji.grab_session_options(options) session_opts = koji.grab_session_options(options)
session = koji.ClientSession(options.server,session_opts) session = koji.ClientSession(options.server, session_opts)
if options.cert is not None and os.path.isfile(options.cert): if options.cert is not None and os.path.isfile(options.cert):
# authenticate using SSL client certificates # authenticate using SSL client certificates
session.ssl_login(options.cert, None, options.serverca) session.ssl_login(options.cert, None, options.serverca)

View file

@ -214,8 +214,8 @@ def main(options, session):
def restart(*args): def restart(*args):
logger.warn("Initiating graceful restart") logger.warn("Initiating graceful restart")
tm.restart_pending = True tm.restart_pending = True
signal.signal(signal.SIGTERM,shutdown) signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGUSR1,restart) signal.signal(signal.SIGUSR1, restart)
taken = False taken = False
tm.cleanupAllVMs() tm.cleanupAllVMs()
while True: while True:
@ -224,7 +224,7 @@ def main(options, session):
tm.updateTasks() tm.updateTasks()
taken = tm.getNextTask() taken = tm.getNextTask()
tm.cleanupExpiredVMs() tm.cleanupExpiredVMs()
except (SystemExit,ServerExit,KeyboardInterrupt): except (SystemExit, ServerExit, KeyboardInterrupt):
logger.warn("Exiting") logger.warn("Exiting")
break break
except ServerRestart: except ServerRestart:
@ -245,7 +245,7 @@ def main(options, session):
# The load-balancing code in getNextTask() will prevent a single builder # The load-balancing code in getNextTask() will prevent a single builder
# from getting overloaded. # from getting overloaded.
time.sleep(options.sleeptime) time.sleep(options.sleeptime)
except (SystemExit,KeyboardInterrupt): except (SystemExit, KeyboardInterrupt):
logger.warn("Exiting") logger.warn("Exiting")
break break
logger.warn("Shutting down, please wait...") logger.warn("Shutting down, please wait...")
@ -293,7 +293,7 @@ class DaemonXMLRPCServer(six.moves.xmlrpc_server.SimpleXMLRPCServer):
if sys.version_info[:2] <= (2, 4): if sys.version_info[:2] <= (2, 4):
# Copy and paste from SimpleXMLRPCServer, with the addition of passing # Copy and paste from SimpleXMLRPCServer, with the addition of passing
# allow_none=True to xmlrpclib.dumps() # allow_none=True to xmlrpclib.dumps()
def _marshaled_dispatch(self, data, dispatch_method = None): def _marshaled_dispatch(self, data, dispatch_method=None):
params, method = six.moves.xmlrpc_client.loads(data) params, method = six.moves.xmlrpc_client.loads(data)
try: try:
if dispatch_method is not None: if dispatch_method is not None:
@ -344,11 +344,11 @@ class WinBuildTask(MultiPlatformTask):
if not repo_info: if not repo_info:
raise koji.BuildError('invalid repo ID: %s' % repo_id) raise koji.BuildError('invalid repo ID: %s' % repo_id)
policy_data = { policy_data = {
'user_id' : task_info['owner'], 'user_id': task_info['owner'],
'source' : source_url, 'source': source_url,
'task_id' : self.id, 'task_id': self.id,
'build_tag' : build_tag['id'], 'build_tag': build_tag['id'],
'skip_tag' : bool(opts.get('skip_tag')), 'skip_tag': bool(opts.get('skip_tag')),
'target': target_info['id'] 'target': target_info['id']
} }
if not opts.get('skip_tag'): if not opts.get('skip_tag'):
@ -840,7 +840,7 @@ class VMExecTask(BaseTaskHandler):
conn = libvirt.open(None) conn = libvirt.open(None)
clone_name = self.clone(conn, name, opts) clone_name = self.clone(conn, name, opts)
self.logger.debug('Cloned VM %s to %s',name, clone_name) self.logger.debug('Cloned VM %s to %s', name, clone_name)
try: try:
vm = conn.lookupByName(clone_name) vm = conn.lookupByName(clone_name)
macaddr = self.macAddr(vm) macaddr = self.macAddr(vm)

View file

@ -62,7 +62,7 @@ def _setUserCookie(environ, user):
value = "%s:%s" % (shasum.hexdigest(), value) value = "%s:%s" % (shasum.hexdigest(), value)
cookies = six.moves.http_cookies.SimpleCookie() cookies = six.moves.http_cookies.SimpleCookie()
cookies['user'] = value cookies['user'] = value
c = cookies['user'] #morsel instance c = cookies['user'] # morsel instance
c['secure'] = True c['secure'] = True
c['path'] = os.path.dirname(environ['SCRIPT_NAME']) c['path'] = os.path.dirname(environ['SCRIPT_NAME'])
# the Cookie module treats integer expire times as relative seconds # the Cookie module treats integer expire times as relative seconds
@ -75,7 +75,7 @@ def _setUserCookie(environ, user):
def _clearUserCookie(environ): def _clearUserCookie(environ):
cookies = six.moves.http_cookies.SimpleCookie() cookies = six.moves.http_cookies.SimpleCookie()
cookies['user'] = '' cookies['user'] = ''
c = cookies['user'] #morsel instance c = cookies['user'] # morsel instance
c['path'] = os.path.dirname(environ['SCRIPT_NAME']) c['path'] = os.path.dirname(environ['SCRIPT_NAME'])
c['expires'] = 0 c['expires'] = 0
out = c.OutputString() out = c.OutputString()
@ -697,7 +697,7 @@ def taskinfo(environ, taskID):
pathinfo = koji.PathInfo(topdir=topurl) pathinfo = koji.PathInfo(topdir=topurl)
values['pathinfo'] = pathinfo values['pathinfo'] = pathinfo
paths = [] # (volume, relpath) tuples paths = [] # (volume, relpath) tuples
for relname, volumes in six.iteritems(server.listTaskOutput(task['id'], all_volumes=True)): for relname, volumes in six.iteritems(server.listTaskOutput(task['id'], all_volumes=True)):
paths += [(volume, relname) for volume in volumes] paths += [(volume, relname) for volume in volumes]
values['output'] = sorted(paths, key=_sortByExtAndName) values['output'] = sorted(paths, key=_sortByExtAndName)
@ -1071,7 +1071,7 @@ def tagparent(environ, tagID, parentID, action):
if datum['priority'] > maxPriority: if datum['priority'] > maxPriority:
maxPriority = datum['priority'] maxPriority = datum['priority']
values['maxPriority'] = maxPriority values['maxPriority'] = maxPriority
inheritanceData = [datum for datum in inheritanceData \ inheritanceData = [datum for datum in inheritanceData \
if datum['parent_id'] == parent['id']] if datum['parent_id'] == parent['id']]
if len(inheritanceData) == 0: if len(inheritanceData) == 0:
values['inheritanceData'] = None values['inheritanceData'] = None
@ -1446,8 +1446,8 @@ def archiveinfo(environ, archiveID, fileOrder='name', fileStart=None, buildrootO
values['wininfo'] = wininfo values['wininfo'] = wininfo
values['builtInRoot'] = builtInRoot values['builtInRoot'] = builtInRoot
values['buildroots'] = buildroots values['buildroots'] = buildroots
values['show_rpm_components'] = server.listRPMs(imageID=archive['id'], queryOpts={'limit':1}) values['show_rpm_components'] = server.listRPMs(imageID=archive['id'], queryOpts={'limit': 1})
values['show_archive_components'] = server.listArchives(imageID=archive['id'], queryOpts={'limit':1}) values['show_archive_components'] = server.listArchives(imageID=archive['id'], queryOpts={'limit': 1})
return _genHTML(environ, 'archiveinfo.chtml') return _genHTML(environ, 'archiveinfo.chtml')

View file

@ -448,7 +448,7 @@ def formatDep(name, version, flags):
if flags & koji.RPMSENSE_EQUAL: if flags & koji.RPMSENSE_EQUAL:
s = s + "=" s = s + "="
if version: if version:
s = "%s %s" %(s, version) s = "%s %s" % (s, version)
return s return s
def formatMode(mode): def formatMode(mode):
@ -706,7 +706,7 @@ class TaskResultLine(object):
return composer(self, length, postscript) return composer(self, length, postscript)
self.composer = composer_wrapper self.composer = composer_wrapper
self.size=self._size() self.size = self._size()
def default_composer(self, length=None, postscript=None): def default_composer(self, length=None, postscript=None):
line_text = '' line_text = ''