flake8: follow E265 rule

This commit is contained in:
Yuming Zhu 2020-02-25 22:50:14 +08:00
parent 642508ccf6
commit 97cfaa4fcf
27 changed files with 794 additions and 793 deletions

View file

@ -131,7 +131,7 @@ def main(options, session):
tm.findHandlers(globals())
tm.findHandlers(vars(koji.tasks))
if options.plugin:
#load plugins
# load plugins
pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))
for name in options.plugin:
logger.info('Loading plugin: %s' % name)
@ -192,9 +192,9 @@ class BuildRoot(object):
self._new(*args,**kwargs)
def _load(self, data):
#manage an existing buildroot
# manage an existing buildroot
if isinstance(data, dict):
#assume data already pulled from db
# assume data already pulled from db
self.id = data['id']
else:
self.id = data
@ -291,7 +291,7 @@ class BuildRoot(object):
opts['tag_macros'][macro] = self.config['extra'][key]
output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts)
#write config
# write config
with open(configfile,'w') as fo:
fo.write(output)
@ -398,7 +398,7 @@ class BuildRoot(object):
"""Run mock"""
mockpath = getattr(self.options,"mockpath","/usr/bin/mock")
cmd = [mockpath, "-r", self.mockcfg]
#if self.options.debug_mock:
# if self.options.debug_mock:
# cmd.append('--debug')
# TODO: should we pass something like --verbose --trace instead?
if 'mock.new_chroot' in self.config['extra']:
@ -495,7 +495,7 @@ class BuildRoot(object):
ts_offsets[fname] = position
incremental_upload(self.session, fname, fd, uploadpath, logger=self.logger)
#clean up and return exit status of command
# clean up and return exit status of command
for (fname, (fd, inode, size, fpath)) in logs.items():
if not fd:
continue
@ -507,7 +507,7 @@ class BuildRoot(object):
return status[1]
else:
#in no case should exceptions propagate past here
# in no case should exceptions propagate past here
try:
self.session._forget()
if workdir:
@ -524,7 +524,7 @@ class BuildRoot(object):
os.setreuid(uid,uid)
os.execvp(cmd[0],cmd)
except:
#diediedie
# diediedie
print("Failed to exec mock")
print(''.join(traceback.format_exception(*sys.exc_info())))
os._exit(1)
@ -656,9 +656,9 @@ class BuildRoot(object):
ts = rpm.TransactionSet()
for h in ts.dbMatch():
pkg = koji.get_header_fields(h, fields)
#skip our fake packages
# skip our fake packages
if pkg['name'] in ['buildsys-build', 'gpg-pubkey']:
#XXX config
# XXX config
continue
pkg['payloadhash'] = koji.hex_string(pkg['sigmd5'])
del pkg['sigmd5']
@ -744,9 +744,9 @@ class BuildRoot(object):
external_repos = self.session.getExternalRepoList(self.repo_info['tag_id'],
event=self.repo_info['create_event'])
if not external_repos:
#nothing to do
# nothing to do
return
#index external repos by expanded url
# index external repos by expanded url
erepo_idx = {}
for erepo in external_repos:
# substitute $arch in the url with the arch of the repo we're generating
@ -781,7 +781,7 @@ class BuildRoot(object):
pkgorigins = r.getinfo(librepo.LRR_YUM_REPOMD)['origin']['location_href']
koji.util.rmtree(tmpdir)
elif yum_available:
#XXX - cheap hack to get relative paths
# XXX - cheap hack to get relative paths
repomdpath = os.path.join(repodir, self.br_arch, 'repodata', 'repomd.xml')
with koji.openRemoteFile(repomdpath, **opts) as fo:
try:
@ -796,8 +796,8 @@ class BuildRoot(object):
relpath = os.path.join(repodir, self.br_arch, pkgorigins)
with koji.openRemoteFile(relpath, **opts) as fo:
#at this point we know there were external repos at the create event,
#so there should be an origins file.
# at this point we know there were external repos at the create event,
# so there should be an origins file.
origin_idx = {}
# don't use 'with GzipFile' as it is not supported on py2.6
fo2 = GzipFile(fileobj=fo, mode='r')
@ -807,7 +807,7 @@ class BuildRoot(object):
parts=line.split(None, 2)
if len(parts) < 2:
continue
#first field is formated by yum as [e:]n-v-r.a
# first field is formated by yum as [e:]n-v-r.a
nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0])
origin_idx[nvra] = parts[1]
fo2.close()
@ -874,7 +874,7 @@ class BuildRoot(object):
class ChainBuildTask(BaseTaskHandler):
Methods = ['chainbuild']
#mostly just waiting on other tasks
# mostly just waiting on other tasks
_taskWeight = 0.1
def handler(self, srcs, target, opts=None):
@ -896,7 +896,7 @@ class ChainBuildTask(BaseTaskHandler):
raise koji.GenericError('unknown build target: %s' % target)
nvrs = []
for n_level, build_level in enumerate(srcs):
#if there are any nvrs to wait on, do so
# if there are any nvrs to wait on, do so
if nvrs:
task_id = self.session.host.subtask(method='waitrepo',
arglist=[target_info['build_tag_name'], None, nvrs],
@ -904,7 +904,7 @@ class ChainBuildTask(BaseTaskHandler):
parent=self.id)
self.wait(task_id, all=True, failany=True)
nvrs = []
#kick off the builds for this level
# kick off the builds for this level
build_tasks = []
for n_src, src in enumerate(build_level):
if SCM.is_scm_url(src):
@ -915,11 +915,11 @@ class ChainBuildTask(BaseTaskHandler):
build_tasks.append(task_id)
else:
nvrs.append(src)
#next pass will wait for these
# next pass will wait for these
if build_tasks:
#the level could have been all nvrs
# the level could have been all nvrs
self.wait(build_tasks, all=True, failany=True)
#see what builds we created in this batch so the next pass can wait for them also
# see what builds we created in this batch so the next pass can wait for them also
for build_task in build_tasks:
builds = self.session.listBuilds(taskID=build_task)
if builds:
@ -929,7 +929,7 @@ class ChainBuildTask(BaseTaskHandler):
class BuildTask(BaseTaskHandler):
Methods = ['build']
#we mostly just wait on other tasks
# we mostly just wait on other tasks
_taskWeight = 0.2
def handler(self, src, target, opts=None):
@ -949,7 +949,7 @@ class BuildTask(BaseTaskHandler):
self.event_id = repo_info['create_event']
else:
repo_info = None
#we'll wait for a repo later (self.getRepo)
# we'll wait for a repo later (self.getRepo)
self.event_id = None
task_info = self.session.getTaskInfo(self.id)
target_info = None
@ -959,7 +959,7 @@ class BuildTask(BaseTaskHandler):
dest_tag = target_info['dest_tag']
build_tag = target_info['build_tag']
if repo_info is not None:
#make sure specified repo matches target
# make sure specified repo matches target
if repo_info['tag_id'] != target_info['build_tag']:
raise koji.BuildError('Repo/Target mismatch: %s/%s' \
% (repo_info['tag_name'], target_info['build_tag_name']))
@ -970,7 +970,7 @@ class BuildTask(BaseTaskHandler):
raise koji.GenericError('unknown build target: %s' % target)
build_tag = repo_info['tag_id']
if target is None:
#ok, call it skip-tag for the buildroot tag
# ok, call it skip-tag for the buildroot tag
self.opts['skip_tag'] = True
dest_tag = build_tag
else:
@ -978,7 +978,7 @@ class BuildTask(BaseTaskHandler):
if not taginfo:
raise koji.GenericError('neither tag nor target: %s' % target)
dest_tag = taginfo['id']
#policy checks...
# policy checks...
policy_data = {
'user_id' : task_info['owner'],
'source' : src,
@ -991,7 +991,7 @@ class BuildTask(BaseTaskHandler):
if not self.opts.get('skip_tag'):
policy_data['tag'] = dest_tag #id
if not SCM.is_scm_url(src) and not opts.get('scratch'):
#let hub policy decide
# let hub policy decide
self.session.host.assertPolicy('build_from_srpm', policy_data)
if opts.get('repo_id') is not None:
# use of this option is governed by policy
@ -1024,11 +1024,11 @@ class BuildTask(BaseTaskHandler):
% (data['name'], target_info['dest_tag_name']))
# TODO - more pre tests
archlist = self.getArchList(build_tag, h, extra=extra_arches)
#let the system know about the build we're attempting
# let the system know about the build we're attempting
if not self.opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
build_id = self.session.host.initBuild(data)
#(initBuild raises an exception if there is a conflict)
# (initBuild raises an exception if there is a conflict)
failany = (self.opts.get('fail_fast', False)
or not getattr(self.options, 'build_arch_can_fail', False))
try:
@ -1037,16 +1037,16 @@ class BuildTask(BaseTaskHandler):
repo_info['id'], failany=failany)
if opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
self.session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs)
else:
self.session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs)
except (SystemExit,ServerExit,KeyboardInterrupt):
#we do not trap these
# we do not trap these
raise
except:
if not self.opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
self.session.host.failBuild(self.id, build_id)
# reraise the exception
raise
@ -1067,7 +1067,7 @@ class BuildTask(BaseTaskHandler):
return src
else:
raise koji.BuildError('Invalid source specification: %s' % src)
#XXX - other methods?
# XXX - other methods?
def getSRPMFromSRPM(self, src, build_tag, repo_id):
# rebuild srpm in mock, so it gets correct disttag, rpm version, etc.
@ -1085,7 +1085,7 @@ class BuildTask(BaseTaskHandler):
return srpm
def getSRPMFromSCM(self, url, build_tag, repo_id):
#TODO - allow different ways to get the srpm
# TODO - allow different ways to get the srpm
task_id = self.session.host.subtask(method='buildSRPMFromSCM',
arglist=[url, build_tag, {'repo_id': repo_id, 'scratch': self.opts.get('scratch')}],
label='srpm',
@ -1100,7 +1100,7 @@ class BuildTask(BaseTaskHandler):
return srpm
def readSRPMHeader(self, srpm):
#srpm arg should be a path relative to <BASEDIR>/work
# srpm arg should be a path relative to <BASEDIR>/work
self.logger.debug("Reading SRPM")
relpath = "work/%s" % srpm
opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])
@ -1117,7 +1117,7 @@ class BuildTask(BaseTaskHandler):
buildconfig = self.session.getBuildConfig(build_tag, event=self.event_id)
arches = buildconfig['arches']
if not arches:
#XXX - need to handle this better
# XXX - need to handle this better
raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
tag_archlist = [koji.canonArch(a) for a in arches.split()]
self.logger.debug('arches: %s' % arches)
@ -1139,13 +1139,13 @@ class BuildTask(BaseTaskHandler):
if excludearch:
archlist = [ a for a in archlist if a not in excludearch ]
self.logger.debug('archlist after excludearch: %r' % archlist)
#noarch is funny
# noarch is funny
if 'noarch' not in excludearch and \
( 'noarch' in buildarchs or 'noarch' in exclusivearch ):
archlist.append('noarch')
override = self.opts.get('arch_override')
if self.opts.get('scratch') and override:
#only honor override for scratch builds
# only honor override for scratch builds
self.logger.debug('arch override: %s' % override)
archlist = override.split()
archdict = {}
@ -1248,9 +1248,9 @@ class BuildTask(BaseTaskHandler):
return srpm,rpms,brmap,logs
def tagBuild(self,build_id,dest_tag):
#XXX - need options to skip tagging and to force tagging
#create the tagBuild subtask
#this will handle the "post tests"
# XXX - need options to skip tagging and to force tagging
# create the tagBuild subtask
# this will handle the "post tests"
task_id = self.session.host.subtask(method='tagBuild',
arglist=[dest_tag,build_id,False,None,True],
label='tag',
@ -1279,7 +1279,7 @@ class BaseBuildTask(BaseTaskHandler):
(self.id, self.method,
', '.join(tag_arches), ', '.join(host_arches)))
return False
#otherwise...
# otherwise...
# This is in principle an error condition, but this is not a good place
# to fail. Instead we proceed and let the task fail normally.
return True
@ -1448,7 +1448,7 @@ class BuildArchTask(BaseBuildTask):
ret['brootid'] = broot.id
broot.expire()
#Let TaskManager clean up
# Let TaskManager clean up
return ret
@ -1525,7 +1525,7 @@ class MavenTask(MultiPlatformTask):
raise
except:
if not self.opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
self.session.host.failBuild(self.id, self.build_id)
# reraise the exception
raise
@ -1988,7 +1988,7 @@ class WrapperRPMTask(BaseBuildTask):
gid = grp.getgrnam('mock')[2]
self.chownTree(specdir, uid, gid)
#build srpm
# build srpm
self.logger.debug("Running srpm build")
buildroot.build_srpm(specfile, specdir, None)
@ -2327,7 +2327,7 @@ class ChainMavenTask(MultiPlatformTask):
class TagBuildTask(BaseTaskHandler):
Methods = ['tagBuild']
#XXX - set weight?
# XXX - set weight?
def handler(self, tag_id, build_id, force=False, fromtag=None, ignore_success=False):
task = self.session.getTaskInfo(self.id)
@ -2336,11 +2336,11 @@ class TagBuildTask(BaseTaskHandler):
self.session.getBuild(build_id, strict=True)
self.session.getTag(tag_id, strict=True)
#several basic sanity checks have already been run (and will be run
#again when we make the final call). Our job is to perform the more
#computationally expensive 'post' tests.
# several basic sanity checks have already been run (and will be run
# again when we make the final call). Our job is to perform the more
# computationally expensive 'post' tests.
#XXX - add more post tests
# XXX - add more post tests
self.session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag)
self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success)
except Exception as e:
@ -2376,7 +2376,7 @@ class BuildBaseImageTask(BuildImageTask):
target_info = self.session.getBuildTarget(target, strict=True)
build_tag = target_info['build_tag']
repo_info = self.getRepo(build_tag)
#check requested arches against build tag
# check requested arches against build tag
buildconfig = self.session.getBuildConfig(build_tag)
if not buildconfig['arches']:
raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@ -2475,11 +2475,11 @@ class BuildBaseImageTask(BuildImageTask):
results)
except (SystemExit,ServerExit,KeyboardInterrupt):
#we do not trap these
# we do not trap these
raise
except:
if not opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
if bld_info:
self.session.host.failBuild(self.id, bld_info['id'])
# reraise the exception
@ -2512,7 +2512,7 @@ class BuildApplianceTask(BuildImageTask):
target_info = self.session.getBuildTarget(target, strict=True)
build_tag = target_info['build_tag']
repo_info = self.getRepo(build_tag)
#check requested arch against build tag
# check requested arch against build tag
buildconfig = self.session.getBuildConfig(build_tag)
if not buildconfig['arches']:
raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@ -2561,11 +2561,11 @@ class BuildApplianceTask(BuildImageTask):
self.session.host.moveImageBuildToScratch(self.id, results)
except (SystemExit,ServerExit,KeyboardInterrupt):
#we do not trap these
# we do not trap these
raise
except:
if not opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
if bld_info:
self.session.host.failBuild(self.id, bld_info['id'])
# reraise the exception
@ -2597,7 +2597,7 @@ class BuildLiveCDTask(BuildImageTask):
target_info = self.session.getBuildTarget(target, strict=True)
build_tag = target_info['build_tag']
repo_info = self.getRepo(build_tag)
#check requested arch against build tag
# check requested arch against build tag
buildconfig = self.session.getBuildConfig(build_tag)
if not buildconfig['arches']:
raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@ -2645,11 +2645,11 @@ class BuildLiveCDTask(BuildImageTask):
self.session.host.moveImageBuildToScratch(self.id, results)
except (SystemExit,ServerExit,KeyboardInterrupt):
#we do not trap these
# we do not trap these
raise
except:
if not opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
if bld_info:
self.session.host.failBuild(self.id, bld_info['id'])
# reraise the exception
@ -2683,7 +2683,7 @@ class BuildLiveMediaTask(BuildImageTask):
target_info = self.session.getBuildTarget(target, strict=True)
build_tag = target_info['build_tag']
repo_info = self.getRepo(build_tag)
#check requested arch against build tag
# check requested arch against build tag
buildconfig = self.session.getBuildConfig(build_tag)
if not buildconfig['arches']:
raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@ -2783,11 +2783,11 @@ class BuildLiveMediaTask(BuildImageTask):
self.session.host.moveImageBuildToScratch(self.id, results)
except (SystemExit, ServerExit, KeyboardInterrupt):
#we do not trap these
# we do not trap these
raise
except:
if not opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
if bld_info:
self.session.host.failBuild(self.id, bld_info['id'])
# reraise the exception
@ -2953,7 +2953,7 @@ class ImageTask(BaseTaskHandler):
baseurl = '%s/%s' % (repopath, arch)
self.logger.debug('BASEURL: %s' % baseurl)
self.ks.handler.repo.repoList.append(repo_class(baseurl=baseurl, name='koji-%s-%i' % (target_info['build_tag_name'], repo_info['id'])))
#inject url if provided
# inject url if provided
if opts.get('install_tree_url'):
self.ks.handler.url(url=opts['install_tree_url'])
@ -3285,7 +3285,7 @@ class LiveCDTask(ImageTask):
## livemedia-creator
# livemedia-creator
class LiveMediaTask(ImageTask):
Methods = ['createLiveMedia']
@ -3410,7 +3410,7 @@ class LiveMediaTask(ImageTask):
'--no-virt',
'--resultdir', resultdir,
'--project', name,
#'--tmp', '/tmp'
# '--tmp', '/tmp'
]
@ -3508,10 +3508,10 @@ class LiveMediaTask(ImageTask):
if not opts.get('scratch'):
# TODO - generate list of rpms in image
# (getImagePackages doesn't work here)
#hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
# hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
# cachedir[1:]))
imgdata ['rpmlist'] = []
#broot.markExternalRPMs(hdrlist)
# broot.markExternalRPMs(hdrlist)
broot.expire()
return imgdata
@ -3666,10 +3666,10 @@ class OzImageTask(BaseTaskHandler):
the way we want
"""
return {
#Oz specific
# Oz specific
'oz_data_dir': os.path.join(self.workdir, 'oz_data'),
'oz_screenshot_dir': os.path.join(self.workdir, 'oz_screenshots'),
#IF specific
# IF specific
'imgdir': os.path.join(self.workdir, 'scratch_images'),
'tmpdir': os.path.join(self.workdir, 'oz-tmp'),
'verbose': True,
@ -4251,7 +4251,7 @@ class BaseImageTask(OzImageTask):
}
# record the RPMs that were installed
if not opts.get('scratch'):
#fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',
# fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',
# 'payloadhash', 'buildtime')
icicle = xml.dom.minidom.parseString(images['raw']['icicle'])
self.logger.debug('ICICLE: %s' % images['raw']['icicle'])
@ -4540,7 +4540,7 @@ class BuildIndirectionImageTask(OzImageTask):
bld_info, target_info, bd)
except:
if not opts.get('scratch'):
#scratch builds do not get imported
# scratch builds do not get imported
if bld_info:
self.session.host.failBuild(self.id, bld_info['id'])
# reraise the exception
@ -4770,7 +4770,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask):
'repo_id': repo_id}
if self.options.scm_credentials_dir is not None and os.path.isdir(self.options.scm_credentials_dir):
rootopts['bind_opts'] = {'dirs' : {self.options.scm_credentials_dir : '/credentials',}}
## Force internal_dev_setup back to true because bind_opts is used to turn it off
# Force internal_dev_setup back to true because bind_opts is used to turn it off
rootopts['internal_dev_setup'] = True
br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))
broot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, **rootopts)
@ -4820,7 +4820,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask):
# Run spec file sanity checks. Any failures will throw a BuildError
self.spec_sanity_checks(spec_file)
#build srpm
# build srpm
self.logger.debug("Running srpm build")
broot.build_srpm(spec_file, sourcedir, scm.source_cmd)
@ -4841,7 +4841,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask):
if srpm_name != os.path.basename(srpm):
raise koji.BuildError('srpm name mismatch: %s != %s' % (srpm_name, os.path.basename(srpm)))
#upload srpm and return
# upload srpm and return
self.uploadFile(srpm)
brootid = broot.id
@ -4941,7 +4941,7 @@ Status: %(status)s\r
server = smtplib.SMTP(self.options.smtphost)
if self.options.smtp_user is not None and self.options.smtp_pass is not None:
server.login(self.options.smtp_user, self.options.smtp_pass)
#server.set_debuglevel(True)
# server.set_debuglevel(True)
server.sendmail(from_addr, recipients, message)
server.quit()
@ -5192,9 +5192,9 @@ class NewRepoTask(BaseTaskHandler):
for fn in os.listdir(path):
if fn != 'groups' and os.path.isfile("%s/%s/pkglist" % (path, fn)):
arches.append(fn)
#see if we can find a previous repo to update from
#only shadowbuild tags should start with SHADOWBUILD, their repos are auto
#expired. so lets get the most recent expired tag for newRepo shadowbuild tasks.
# see if we can find a previous repo to update from
# only shadowbuild tags should start with SHADOWBUILD, their repos are auto
# expired. so lets get the most recent expired tag for newRepo shadowbuild tasks.
if tinfo['name'].startswith('SHADOWBUILD'):
oldrepo_state = koji.REPO_EXPIRED
else:
@ -5242,7 +5242,7 @@ class CreaterepoTask(BaseTaskHandler):
_taskWeight = 1.5
def handler(self, repo_id, arch, oldrepo):
#arch is the arch of the repo, not the task
# arch is the arch of the repo, not the task
rinfo = self.session.repoInfo(repo_id, strict=True)
if rinfo['state'] != koji.REPO_INIT:
raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % rinfo)
@ -5253,7 +5253,7 @@ class CreaterepoTask(BaseTaskHandler):
if not os.path.isdir(self.repodir):
raise koji.GenericError("Repo directory missing: %s" % self.repodir)
groupdata = os.path.join(toprepodir, 'groups', 'comps.xml')
#set up our output dir
# set up our output dir
self.outdir = '%s/repo' % self.workdir
self.datadir = '%s/repodata' % self.outdir
pkglist = os.path.join(self.repodir, 'pkglist')
@ -5286,7 +5286,7 @@ class CreaterepoTask(BaseTaskHandler):
cmd.extend(['-i', pkglist])
if os.path.isfile(groupdata):
cmd.extend(['-g', groupdata])
#attempt to recycle repodata from last repo
# attempt to recycle repodata from last repo
if pkglist and oldrepo and self.options.createrepo_update:
# old repo could be from inherited tag, so path needs to be
# composed from that tag, not rinfo['tag_name']
@ -5459,7 +5459,7 @@ class createDistRepoTask(BaseTaskHandler):
"sparc64", "s390x": "s390", "ppc64": "ppc"}
def handler(self, tag, repo_id, arch, keys, opts):
#arch is the arch of the repo, not the task
# arch is the arch of the repo, not the task
self.rinfo = self.session.repoInfo(repo_id, strict=True)
if self.rinfo['state'] != koji.REPO_INIT:
raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % self.rinfo)
@ -6047,7 +6047,7 @@ enabled=1
class WaitrepoTask(BaseTaskHandler):
Methods = ['waitrepo']
#mostly just waiting
# mostly just waiting
_taskWeight = 0.2
PAUSE = 60
@ -6101,7 +6101,7 @@ class WaitrepoTask(BaseTaskHandler):
(koji.util.duration(start), taginfo['name']))
return repo
else:
#no check requested -- return first ready repo
# no check requested -- return first ready repo
return repo
if (time.time() - start) > (self.TIMEOUT * 60.0):
@ -6140,7 +6140,7 @@ def get_options():
parser.add_option("--debug-xmlrpc", action="store_true", default=False,
help="show xmlrpc debug output")
parser.add_option("--debug-mock", action="store_true", default=False,
#obsolete option
# obsolete option
help=SUPPRESS_HELP)
parser.add_option("--skip-main", action="store_true", default=False,
help="don't actually run main")
@ -6163,7 +6163,7 @@ def get_options():
if args:
parser.error("incorrect number of arguments")
#not reached
# not reached
assert False # pragma: no cover
# load local config
@ -6256,12 +6256,12 @@ def get_options():
if getattr(options, name, None) is None:
setattr(options, name, value)
#honor topdir
# honor topdir
if options.topdir:
koji.BASEDIR = options.topdir
koji.pathinfo.topdir = options.topdir
#make sure workdir exists
# make sure workdir exists
if not os.path.exists(options.workdir):
koji.ensuredir(options.workdir)
@ -6308,7 +6308,7 @@ def quit(msg=None, code=1):
if __name__ == "__main__":
koji.add_file_logger("koji", "/var/log/kojid.log")
#note we're setting logging params for all of koji*
# note we're setting logging params for all of koji*
options = get_options()
if options.log_level:
lvl = getattr(logging, options.log_level, None)
@ -6326,7 +6326,7 @@ if __name__ == "__main__":
if options.admin_emails:
koji.add_mail_logger("koji", options.admin_emails)
#start a session and login
# start a session and login
session_opts = koji.grab_session_options(options)
session = koji.ClientSession(options.server, session_opts)
if options.cert and os.path.isfile(options.cert):
@ -6360,14 +6360,14 @@ if __name__ == "__main__":
quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1])
else:
quit("No username/password supplied and Kerberos missing or not configured")
#make session exclusive
# make session exclusive
try:
session.exclusiveSession(force=options.force_lock)
except koji.AuthLockError:
quit("Error: Unable to get lock. Trying using --force-lock")
if not session.logged_in:
quit("Error: Unknown login error")
#make sure it works
# make sure it works
try:
ret = session.echo("OK")
except requests.exceptions.ConnectionError:
@ -6377,7 +6377,7 @@ if __name__ == "__main__":
# run main
if options.daemon:
#detach
# detach
koji.daemonize()
main(options, session)
# not reached

View file

@ -164,7 +164,7 @@ class RepoMerge(object):
n = self.yumbase.add_enable_repo(rid, baseurls=[r])
n._merge_rank = count
#setup our sacks
# setup our sacks
self.yumbase._getSacks(archlist=self.archlist)
self.sort_and_filter()
@ -205,8 +205,8 @@ class RepoMerge(object):
if reponum == 0 and not pkg.basepath:
# this is the first repo (i.e. the koji repo) and appears
# to be using relative urls
#XXX - kind of a hack, but yum leaves us little choice
#force the pkg object to report a relative location
# XXX - kind of a hack, but yum leaves us little choice
# force the pkg object to report a relative location
loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True)
pkg._return_remote_location = make_const_func(loc)
if pkg.sourcerpm in seen_srpms:
@ -296,8 +296,8 @@ class RepoMerge(object):
if reponum == 0 and not pkg.basepath:
# this is the first repo (i.e. the koji repo) and appears
# to be using relative urls
#XXX - kind of a hack, but yum leaves us little choice
#force the pkg object to report a relative location
# XXX - kind of a hack, but yum leaves us little choice
# force the pkg object to report a relative location
loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True)
pkg._return_remote_location = make_const_func(loc)

View file

@ -50,7 +50,7 @@ def register_plugin(plugin):
"""
for v in six.itervalues(vars(plugin)):
if isinstance(v, six.class_types):
#skip classes
# skip classes
continue
if callable(v):
if getattr(v, 'exported_cli', False):
@ -166,12 +166,12 @@ def get_options():
value = os.path.expanduser(getattr(options, name))
setattr(options, name, value)
#honor topdir
# honor topdir
if options.topdir:
koji.BASEDIR = options.topdir
koji.pathinfo.topdir = options.topdir
#pkgurl is obsolete
# pkgurl is obsolete
if options.pkgurl:
if options.topurl:
warn("Warning: the pkgurl option is obsolete")

View file

@ -484,11 +484,11 @@ def handle_build(options, session, args):
opts[key] = val
priority = None
if build_opts.background:
#relative to koji.PRIO_DEFAULT
# relative to koji.PRIO_DEFAULT
priority = 5
# try to check that source is an SRPM
if '://' not in source:
#treat source as an srpm and upload it
# treat source as an srpm and upload it
if not build_opts.quiet:
print("Uploading srpm: %s" % source)
serverdir = unique_path('cli-build')
@ -546,7 +546,7 @@ def handle_chain_build(options, session, args):
src_list = []
build_level = []
#src_lists is a list of lists of sources to build.
# src_lists is a list of lists of sources to build.
# each list is block of builds ("build level") which must all be completed
# before the next block begins. Blocks are separated on the command line with ':'
for src in sources:
@ -571,7 +571,7 @@ def handle_chain_build(options, session, args):
priority = None
if build_opts.background:
#relative to koji.PRIO_DEFAULT
# relative to koji.PRIO_DEFAULT
priority = 5
task_id = session.chainBuild(src_list, target, priority=priority)
@ -671,7 +671,7 @@ def handle_maven_build(options, session, args):
opts['skip_tag'] = True
priority = None
if build_opts.background:
#relative to koji.PRIO_DEFAULT
# relative to koji.PRIO_DEFAULT
priority = 5
task_id = session.mavenBuild(source, target, opts, priority=priority)
if not build_opts.quiet:
@ -894,7 +894,7 @@ def anon_handle_mock_config(goptions, session, args):
(options, args) = parser.parse_args(args)
activate_session(session, goptions)
if args:
#for historical reasons, we also accept buildroot name as first arg
# for historical reasons, we also accept buildroot name as first arg
if not options.name:
options.name = args[0]
else:
@ -1155,7 +1155,7 @@ def handle_import(goptions, session, args):
if data['sourcepackage']:
break
else:
#no srpm included, check for build
# no srpm included, check for build
binfo = session.getBuild(nvr)
if not binfo:
print(_("Missing build or srpm: %s") % nvr)
@ -1164,7 +1164,7 @@ def handle_import(goptions, session, args):
print(_("Aborting import"))
return
#local function to help us out below
# local function to help us out below
def do_import(path, data):
rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')])
prev = session.getRPM(rinfo)
@ -1391,13 +1391,13 @@ def _import_comps_alt(session, filename, tag, options): # no cover 3.x
uservisible=bool(group.user_visible),
description=group.description,
langonly=group.langonly)
#yum.comps does not support the biarchonly field
# yum.comps does not support the biarchonly field
for ptype, pdata in [('mandatory', group.mandatory_packages),
('default', group.default_packages),
('optional', group.optional_packages),
('conditional', group.conditional_packages)]:
for pkg in pdata:
#yum.comps does not support basearchonly
# yum.comps does not support basearchonly
pkgopts = {'type' : ptype}
if ptype == 'conditional':
pkgopts['requires'] = pdata[pkg]
@ -1407,8 +1407,8 @@ def _import_comps_alt(session, filename, tag, options): # no cover 3.x
s_opts = ', '.join(["'%s': %r" % (k, pkgopts[k]) for k in sorted(pkgopts.keys())])
print(" Package: %s: {%s}" % (pkg, s_opts))
session.groupPackageListAdd(tag, group.groupid, pkg, force=force, **pkgopts)
#yum.comps does not support group dependencies
#yum.comps does not support metapkgs
# yum.comps does not support group dependencies
# yum.comps does not support metapkgs
def handle_import_sig(goptions, session, args):
@ -1540,9 +1540,9 @@ def handle_prune_signed_copies(options, session, args):
# 4) for a specified tag, remove all signed copies (no inheritance)
# (but skip builds that are multiply tagged)
#for now, we're just implementing mode #1
#(with the modification that we check to see if the build was latest within
#the last N days)
# for now, we're just implementing mode #1
# (with the modification that we check to see if the build was latest within
# the last N days)
if options.ignore_tag_file:
with open(options.ignore_tag_file) as fo:
options.ignore_tag.extend([line.strip() for line in fo.readlines()])
@ -1579,7 +1579,7 @@ def handle_prune_signed_copies(options, session, args):
print("...got %i builds" % len(builds))
builds.sort()
else:
#single build
# single build
binfo = session.getBuild(options.build)
if not binfo:
parser.error('No such build: %s' % options.build)
@ -1601,21 +1601,21 @@ def handle_prune_signed_copies(options, session, args):
time_str = time.asctime(time.localtime(ts))
return "%s: %s" % (time_str, fmt % x)
for nvr, binfo in builds:
#listBuilds returns slightly different data than normal
# listBuilds returns slightly different data than normal
if 'id' not in binfo:
binfo['id'] = binfo['build_id']
if 'name' not in binfo:
binfo['name'] = binfo['package_name']
if options.debug:
print("DEBUG: %s" % nvr)
#see how recently this build was latest for a tag
# see how recently this build was latest for a tag
is_latest = False
is_protected = False
last_latest = None
tags = {}
for entry in session.queryHistory(build=binfo['id'])['tag_listing']:
#we used queryHistory rather than listTags so we can consider tags
#that the build was recently untagged from
# we used queryHistory rather than listTags so we can consider tags
# that the build was recently untagged from
tags.setdefault(entry['tag.name'], 1)
if options.debug:
print("Tags: %s" % to_list(tags.keys()))
@ -1633,43 +1633,43 @@ def handle_prune_signed_copies(options, session, args):
break
if ignore_tag:
continue
#in order to determine how recently this build was latest, we have
#to look at the tagging history.
# in order to determine how recently this build was latest, we have
# to look at the tagging history.
hist = session.queryHistory(tag=tag_name, package=binfo['name'])['tag_listing']
if not hist:
#really shouldn't happen
# really shouldn't happen
raise koji.GenericError("No history found for %s in %s" % (nvr, tag_name))
timeline = []
for x in hist:
#note that for revoked entries, we're effectively splitting them into
#two parts: creation and revocation.
# note that for revoked entries, we're effectively splitting them into
# two parts: creation and revocation.
timeline.append((x['create_event'], 1, x))
#at the same event, revokes happen first
# at the same event, revokes happen first
if x['revoke_event'] is not None:
timeline.append((x['revoke_event'], 0, x))
timeline.sort(key=lambda entry: entry[:2])
#find most recent creation entry for our build and crop there
# find most recent creation entry for our build and crop there
latest_ts = None
for i in range(len(timeline)-1, -1, -1):
#searching in reverse cronological order
# searching in reverse cronological order
event_id, is_create, entry = timeline[i]
if entry['build_id'] == binfo['id'] and is_create:
latest_ts = event_id
break
if not latest_ts:
#really shouldn't happen
# really shouldn't happen
raise koji.GenericError("No creation event found for %s in %s" % (nvr, tag_name))
our_entry = entry
if options.debug:
print(_histline(event_id, our_entry))
#now go through the events since most recent creation entry
# now go through the events since most recent creation entry
timeline = timeline[i+1:]
if not timeline:
is_latest = True
if options.debug:
print("%s is latest in tag %s" % (nvr, tag_name))
break
#before we go any further, is this a protected tag?
# before we go any further, is this a protected tag?
protect_tag = False
for pattern in options.protect_tag:
if fnmatch.fnmatch(tag_name, pattern):
@ -1680,13 +1680,13 @@ def handle_prune_signed_copies(options, session, args):
# if this build was in this tag within that limit, then we will
# not prune its signed copies
if our_entry['revoke_event'] is None:
#we're still tagged with a protected tag
# we're still tagged with a protected tag
if options.debug:
print("Build %s has protected tag %s" % (nvr, tag_name))
is_protected = True
break
elif our_entry['revoke_ts'] > cutoff_ts:
#we were still tagged here sometime before the cutoff
# we were still tagged here sometime before the cutoff
if options.debug:
print("Build %s had protected tag %s until %s" \
% (nvr, tag_name, time.asctime(time.localtime(our_entry['revoke_ts']))))
@ -1696,40 +1696,40 @@ def handle_prune_signed_copies(options, session, args):
revoke_ts = None
others = {}
for event_id, is_create, entry in timeline:
#So two things can knock this build from the title of latest:
# So two things can knock this build from the title of latest:
# - it could be untagged (entry revoked)
# - another build could become latest (replaced)
#Note however that if the superceding entry is itself revoked, then
#our build could become latest again
# Note however that if the superceding entry is itself revoked, then
# our build could become latest again
if options.debug:
print(_histline(event_id, entry))
if entry['build_id'] == binfo['id']:
if is_create:
#shouldn't happen
# shouldn't happen
raise koji.GenericError("Duplicate creation event found for %s in %s" \
% (nvr, tag_name))
else:
#we've been revoked
# we've been revoked
revoke_ts = entry['revoke_ts']
break
else:
if is_create:
#this build has become latest
# this build has become latest
replaced_ts = entry['create_ts']
if entry['active']:
#this entry not revoked yet, so we're done for this tag
# this entry not revoked yet, so we're done for this tag
break
#since this entry is revoked later, our build might eventually be
#uncovered, so we have to keep looking
# since this entry is revoked later, our build might eventually be
# uncovered, so we have to keep looking
others[entry['build_id']] = 1
else:
#other build revoked
#see if our build has resurfaced
# other build revoked
# see if our build has resurfaced
if entry['build_id'] in others:
del others[entry['build_id']]
if replaced_ts is not None and not others:
#we've become latest again
#(note: we're not revoked yet because that triggers a break above)
# we've become latest again
# (note: we're not revoked yet because that triggers a break above)
replaced_ts = None
latest_ts = entry['revoke_ts']
if last_latest is None:
@ -1738,25 +1738,25 @@ def handle_prune_signed_copies(options, session, args):
timestamps = [last_latest]
if revoke_ts is None:
if replaced_ts is None:
#turns out we are still latest
# turns out we are still latest
is_latest = True
if options.debug:
print("%s is latest (again) in tag %s" % (nvr, tag_name))
break
else:
#replaced (but not revoked)
# replaced (but not revoked)
timestamps.append(replaced_ts)
if options.debug:
print("tag %s: %s not latest (replaced %s)" \
% (tag_name, nvr, time.asctime(time.localtime(replaced_ts))))
elif replaced_ts is None:
#revoked but not replaced
# revoked but not replaced
timestamps.append(revoke_ts)
if options.debug:
print("tag %s: %s not latest (revoked %s)" \
% (tag_name, nvr, time.asctime(time.localtime(revoke_ts))))
else:
#revoked AND replaced
# revoked AND replaced
timestamps.append(min(revoke_ts, replaced_ts))
if options.debug:
print("tag %s: %s not latest (revoked %s, replaced %s)" \
@ -1772,13 +1772,13 @@ def handle_prune_signed_copies(options, session, args):
continue
if is_protected:
continue
#not latest anywhere since cutoff, so we can remove all signed copies
# not latest anywhere since cutoff, so we can remove all signed copies
rpms = session.listRPMs(buildID=binfo['id'])
session.multicall = True
for rpminfo in rpms:
session.queryRPMSigs(rpm_id=rpminfo['id'])
by_sig = {}
#index by sig
# index by sig
for rpminfo, [sigs] in zip(rpms, session.multiCall()):
for sig in sigs:
sigkey = sig['sigkey']
@ -1799,7 +1799,7 @@ def handle_prune_signed_copies(options, session, args):
except OSError:
continue
if not stat.S_ISREG(st.st_mode):
#warn about this
# warn about this
print("Skipping %s. Not a regular file" % signedpath)
continue
if st.st_mtime > cutoff_ts:
@ -1819,7 +1819,7 @@ def handle_prune_signed_copies(options, session, args):
mycount +=1
build_files += 1
build_space += st.st_size
#XXX - this makes some layout assumptions, but
# XXX - this makes some layout assumptions, but
# pathinfo doesn't report what we need
mydir = os.path.dirname(signedpath)
archdirs[mydir] = 1
@ -2078,7 +2078,7 @@ def handle_list_signed(goptions, session, args):
for rinfo in rpms:
rpm_idx.setdefault(rinfo['id'], rinfo)
tagged[rinfo['id']] = 1
#Now figure out which sig entries actually have live copies
# Now figure out which sig entries actually have live copies
for sig in sigs:
rpm_id = sig['rpm_id']
sigkey = sig['sigkey']
@ -2862,7 +2862,7 @@ def anon_handle_list_pkgs(goptions, session, args):
# no limiting clauses were specified
allpkgs = True
opts['inherited'] = not options.noinherit
#hiding dups only makes sense if we're querying a tag
# hiding dups only makes sense if we're querying a tag
if options.tag:
opts['with_dups'] = options.show_dups
else:
@ -3736,7 +3736,7 @@ def handle_add_target(goptions, session, args):
if len(args) > 2:
dest_tag = args[2]
else:
#most targets have the same name as their destination
# most targets have the same name as their destination
dest_tag = name
activate_session(session, goptions)
if not (session.hasPerm('admin') or session.hasPerm('target')):
@ -3866,7 +3866,7 @@ def anon_handle_list_targets(goptions, session, args):
targets = [x[1] for x in tmp_list]
for target in targets:
print(fmt % target)
#pprint.pprint(session.getBuildTargets())
# pprint.pprint(session.getBuildTargets())
def _printInheritance(tags, sibdepths=None, reverse=False):
@ -3992,7 +3992,7 @@ def anon_handle_list_tags(goptions, session, args):
tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None))
tags.sort(key=lambda x: x['name'])
#if options.verbose:
# if options.verbose:
# fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s"
if options.show_id:
fmt = "%(name)s [%(id)i]"
@ -4094,14 +4094,14 @@ def _print_histline(entry, **kwargs):
if len(edit) != 1:
bad_edit = "%i elements" % (len(edit)+1)
other = edit[0]
#check edit for sanity
# check edit for sanity
if create or not other[2]:
bad_edit = "out of order"
if event_id != other[0]:
bad_edit = "non-matching"
if bad_edit:
print("Warning: unusual edit at event %i in table %s (%s)" % (event_id, table, bad_edit))
#we'll simply treat them as separate events
# we'll simply treat them as separate events
pprint.pprint(entry)
pprint.pprint(edit)
_print_histline(entry, **kwargs)
@ -4415,11 +4415,11 @@ def anon_handle_list_history(goptions, session, args):
if x['revoke_event'] is not None:
if distinguish_match(x, 'revoked'):
timeline.append((x['revoke_event'], table, 0, x.copy()))
#pprint.pprint(timeline[-1])
# pprint.pprint(timeline[-1])
if distinguish_match(x, 'created'):
timeline.append((x['create_event'], table, 1, x))
timeline.sort(key=lambda entry: entry[:3])
#group edits together
# group edits together
new_timeline = []
last_event = None
edit_index = {}
@ -4892,7 +4892,7 @@ def handle_edit_tag(goptions, session, args):
opts['extra'] = extra
if options.remove_extra:
opts['remove_extra'] = options.remove_extra
#XXX change callname
# XXX change callname
session.editTag2(tag, **opts)
@ -4927,7 +4927,7 @@ def handle_lock_tag(goptions, session, args):
selected = [session.getTag(name, strict=True) for name in args]
for tag in selected:
if options.master:
#set the master lock
# set the master lock
if tag['locked']:
print(_("Tag %s: master lock already set") % tag['name'])
continue
@ -5293,12 +5293,12 @@ def anon_handle_list_external_repos(goptions, session, args):
def _pick_external_repo_priority(session, tag):
"""pick priority after current ones, leaving space for later insertions"""
repolist = session.getTagExternalRepos(tag_info=tag)
#ordered by priority
# ordered by priority
if not repolist:
priority = 5
else:
priority = (repolist[-1]['priority'] + 7) // 5 * 5
#at least 3 higher than current max and a multiple of 5
# at least 3 higher than current max and a multiple of 5
return priority
@ -5404,7 +5404,7 @@ def handle_remove_external_repo(goptions, session, args):
return 0
tags = current_tags
if delete:
#removing entirely
# removing entirely
if current_tags and not options.force:
print(_("Error: external repo %s used by tag(s): %s") % (repo, ', '.join(current_tags)))
print(_("Use --force to remove anyway"))
@ -5708,10 +5708,10 @@ def _build_image_indirection(options, task_opts, session, args):
if not options.quiet:
print("Created task: %d" % task_id)
print("Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id))
#if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):
# if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):
# session.logout()
# return watch_tasks(session, [task_id], quiet=options.quiet)
#else:
# else:
# return
@ -6045,7 +6045,7 @@ def handle_win_build(options, session, args):
opts[key] = val
priority = None
if build_opts.background:
#relative to koji.PRIO_DEFAULT
# relative to koji.PRIO_DEFAULT
priority = 5
task_id = session.winBuild(vm_name, scmurl, target, opts, priority=priority)
if not build_opts.quiet:
@ -6376,7 +6376,7 @@ def handle_tag_build(opts, session, args):
tasks = []
for pkg in args[1:]:
task_id = session.tagBuild(args[0], pkg, force=options.force)
#XXX - wait on task
# XXX - wait on task
tasks.append(task_id)
print("Created task %d" % task_id)
if _running_in_bg() or options.nowait:
@ -6468,7 +6468,7 @@ def handle_untag_build(goptions, session, args):
builds = []
for binfo in tagged:
if binfo['name'] not in seen_pkg:
#latest for this package
# latest for this package
if options.verbose:
print(_("Leaving latest build for package %(name)s: %(nvr)s") % binfo)
else:

View file

@ -72,7 +72,7 @@ def arg_filter(arg):
pass
if arg in ARGMAP:
return ARGMAP[arg]
#handle lists/dicts?
# handle lists/dicts?
return arg
@ -148,7 +148,7 @@ class TaskWatcher(object):
self.level = level
self.quiet = quiet
#XXX - a bunch of this stuff needs to adapt to different tasks
# XXX - a bunch of this stuff needs to adapt to different tasks
def str(self):
if self.info:
@ -189,7 +189,7 @@ class TaskWatcher(object):
sys.exit(1)
state = self.info['state']
if last:
#compare and note status changes
# compare and note status changes
laststate = last['state']
if laststate != state:
if not self.quiet:
@ -555,7 +555,7 @@ def activate_session(session, options):
noauth = options.authtype == "noauth" or getattr(options, 'noauth', False)
runas = getattr(options, 'runas', None)
if noauth:
#skip authentication
# skip authentication
pass
elif options.authtype == "ssl" or os.path.isfile(options.cert) and options.authtype is None:
# authenticate using SSL client cert
@ -626,7 +626,7 @@ def _list_tasks(options, session):
tasklist = session.listTasks(callopts, qopts)
tasks = dict([(x['id'], x) for x in tasklist])
#thread the tasks
# thread the tasks
for t in tasklist:
if t['parent'] is not None:
parent = tasks.get(t['parent'])

File diff suppressed because it is too large Load diff

View file

@ -64,7 +64,7 @@ class HandlerRegistry(object):
def __init__(self):
self.funcs = {}
#introspection functions
# introspection functions
self.register_function(self.list_api, name="_listapi")
self.register_function(self.system_listMethods, name="system.listMethods")
self.register_function(self.system_methodSignature, name="system.methodSignature")
@ -106,7 +106,7 @@ class HandlerRegistry(object):
"""
for v in six.itervalues(vars(plugin)):
if isinstance(v, type):
#skip classes
# skip classes
continue
if callable(v):
if getattr(v, 'exported', False):
@ -138,8 +138,8 @@ class HandlerRegistry(object):
def list_api(self):
funcs = []
for name, func in self.funcs.items():
#the keys in self.funcs determine the name of the method as seen over xmlrpc
#func.__name__ might differ (e.g. for dotted method names)
# the keys in self.funcs determine the name of the method as seen over xmlrpc
# func.__name__ might differ (e.g. for dotted method names)
args = self._getFuncArgs(func)
argspec = self.getargspec(func)
funcs.append({'name': name,
@ -164,7 +164,7 @@ class HandlerRegistry(object):
return koji.util.to_list(self.funcs.keys())
def system_methodSignature(self, method):
#it is not possible to autogenerate this data
# it is not possible to autogenerate this data
return 'signatures not supported'
def system_methodHelp(self, method):
@ -268,7 +268,7 @@ class ModXMLRPCRequestHandler(object):
return response
def handle_upload(self, environ):
#uploads can't be in a multicall
# uploads can't be in a multicall
context.method = None
self.check_session()
self.enforce_lockout()
@ -280,13 +280,13 @@ class ModXMLRPCRequestHandler(object):
def check_session(self):
if not hasattr(context, "session"):
#we may be called again by one of our meta-calls (like multiCall)
#so we should only create a session if one does not already exist
# we may be called again by one of our meta-calls (like multiCall)
# so we should only create a session if one does not already exist
context.session = koji.auth.Session()
try:
context.session.validate()
except koji.AuthLockError:
#might be ok, depending on method
# might be ok, depending on method
if context.method not in ('exclusiveSession', 'login', 'krbLogin', 'logout'):
raise
@ -359,7 +359,7 @@ class ModXMLRPCRequestHandler(object):
"""Handle a single XML-RPC request"""
pass
#XXX no longer used
# XXX no longer used
def offline_reply(start_response, msg=None):
@ -395,13 +395,13 @@ def load_config(environ):
- all PythonOptions (except ConfigFile) are now deprecated and support for them
will disappear in a future version of Koji
"""
#get our config file(s)
# get our config file(s)
cf = environ.get('koji.hub.ConfigFile', '/etc/koji-hub/hub.conf')
cfdir = environ.get('koji.hub.ConfigDir', '/etc/koji-hub/hub.conf.d')
config = koji.read_config_files([cfdir, (cf, True)], raw=True)
cfgmap = [
#option, type, default
# option, type, default
['DBName', 'string', None],
['DBUser', 'string', None],
['DBHost', 'string', None],
@ -479,7 +479,7 @@ def load_config(environ):
# load policies
# (only from config file)
if config and config.has_section('policy'):
#for the moment, we simply transfer the policy conf to opts
# for the moment, we simply transfer the policy conf to opts
opts['policy'] = dict(config.items('policy'))
else:
opts['policy'] = {}
@ -504,7 +504,7 @@ def load_plugins(opts):
tracker.load(name)
except Exception:
logger.error(''.join(traceback.format_exception(*sys.exc_info())))
#make this non-fatal, but set ServerOffline
# make this non-fatal, but set ServerOffline
opts['ServerOffline'] = True
opts['OfflineMessage'] = 'configuration error'
return tracker
@ -542,7 +542,7 @@ _default_policies = {
def get_policy(opts, plugins):
if not opts.get('policy'):
return
#first find available policy tests
# first find available policy tests
alltests = [koji.policy.findSimpleTests([vars(kojihub), vars(koji.policy)])]
# we delay merging these to allow a test to be overridden for a specific policy
for plugin_name in opts.get('Plugins', '').split():
@ -552,7 +552,7 @@ def get_policy(opts, plugins):
alltests.append(koji.policy.findSimpleTests(vars(plugin)))
policy = {}
for pname, text in six.iteritems(opts['policy']):
#filter/merge tests
# filter/merge tests
merged = {}
for tests in alltests:
# tests can be limited to certain policies by setting a class variable
@ -598,7 +598,7 @@ def setup_logging1():
global log_handler
logger = logging.getLogger("koji")
logger.setLevel(logging.WARNING)
#stderr logging (stderr goes to httpd logs)
# stderr logging (stderr goes to httpd logs)
log_handler = logging.StreamHandler()
log_format = '%(asctime)s [%(levelname)s] SETUP p=%(process)s %(name)s: %(message)s'
log_handler.setFormatter(HubFormatter(log_format))
@ -608,7 +608,7 @@ def setup_logging1():
def setup_logging2(opts):
global log_handler
"""Adjust logging based on configuration options"""
#determine log level
# determine log level
level = opts['LogLevel']
valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
# the config value can be a single level name or a series of
@ -624,7 +624,7 @@ def setup_logging2(opts):
default = level
if level not in valid_levels:
raise koji.GenericError("Invalid log level: %s" % level)
#all our loggers start with koji
# all our loggers start with koji
if name == '':
name = 'koji'
default = level
@ -639,9 +639,9 @@ def setup_logging2(opts):
if opts.get('KojiDebug'):
logger.setLevel(logging.DEBUG)
elif default is None:
#LogLevel did not configure a default level
# LogLevel did not configure a default level
logger.setLevel(logging.WARNING)
#log_handler defined in setup_logging1
# log_handler defined in setup_logging1
log_handler.setFormatter(HubFormatter(opts['LogFormat']))
@ -746,7 +746,7 @@ def application(environ, start_response):
]
start_response('200 OK', headers)
if h.traceback:
#rollback
# rollback
context.cnx.rollback()
elif context.commit_pending:
# Currently there is not much data we can provide to the
@ -764,7 +764,7 @@ def application(environ, start_response):
h.logger.debug("Returning %d bytes after %f seconds", len(response),
time.time() - start)
finally:
#make sure context gets cleaned up
# make sure context gets cleaned up
if hasattr(context, 'cnx'):
try:
context.cnx.close()

View file

@ -128,7 +128,7 @@ for h in (
'RECOMMENDNAME', 'RECOMMENDVERSION', 'RECOMMENDFLAGS'):
SUPPORTED_OPT_DEP_HDRS[h] = hasattr(rpm, 'RPMTAG_%s' % h)
## BEGIN kojikamid dup
# BEGIN kojikamid dup #
class Enum(dict):
"""A simple class to track our enumerated constants
@ -167,7 +167,7 @@ class Enum(dict):
# deprecated
getvalue = _notImplemented
#read-only
# read-only
__setitem__ = _notImplemented
__delitem__ = _notImplemented
clear = _notImplemented
@ -176,7 +176,7 @@ class Enum(dict):
update = _notImplemented
setdefault = _notImplemented
## END kojikamid dup
# END kojikamid dup #
API_VERSION = 1
@ -215,7 +215,7 @@ AUTHTYPE_KERB = 1
AUTHTYPE_SSL = 2
AUTHTYPE_GSSAPI = 3
#dependency types
# dependency types
DEP_REQUIRE = 0
DEP_PROVIDE = 1
DEP_OBSOLETE = 2
@ -225,7 +225,7 @@ DEP_ENHANCE = 5
DEP_SUPPLEMENT = 6
DEP_RECOMMEND = 7
#dependency flags
# dependency flags
RPMSENSE_LESS = 2
RPMSENSE_GREATER = 4
RPMSENSE_EQUAL = 8
@ -266,7 +266,7 @@ TAG_UPDATE_TYPES = Enum((
'MANUAL',
))
## BEGIN kojikamid dup
# BEGIN kojikamid dup #
CHECKSUM_TYPES = Enum((
'md5',
@ -274,9 +274,9 @@ CHECKSUM_TYPES = Enum((
'sha256',
))
## END kojikamid dup
# END kojikamid dup #
#PARAMETERS
# PARAMETERS
BASEDIR = '/mnt/koji'
# default task priority
PRIO_DEFAULT = 20
@ -285,9 +285,9 @@ PRIO_DEFAULT = 20
DEFAULT_REQUEST_TIMEOUT = 60 * 60 * 12
DEFAULT_AUTH_TIMEOUT = 60
## BEGIN kojikamid dup
# BEGIN kojikamid dup #
#Exceptions
# Exceptions
PythonImportError = ImportError # will be masked by koji's one
class GenericError(Exception):
@ -302,7 +302,7 @@ class GenericError(Exception):
return str(self.args[0])
except:
return str(self.__dict__)
## END kojikamid dup
# END kojikamid dup #
class LockError(GenericError):
"""Raised when there is a lock conflict"""
@ -320,12 +320,12 @@ class ActionNotAllowed(GenericError):
"""Raised when the session does not have permission to take some action"""
faultCode = 1004
## BEGIN kojikamid dup
# BEGIN kojikamid dup #
class BuildError(GenericError):
"""Raised when a build fails"""
faultCode = 1005
## END kojikamid dup
# END kojikamid dup #
class AuthLockError(AuthError):
"""Raised when a lock prevents authentication"""
@ -403,7 +403,7 @@ class MultiCallInProgress(object):
pass
#A function to get create an exception from a fault
# A function to get create an exception from a fault
def convertFault(fault):
"""Convert a fault to the corresponding Exception type, if possible"""
code = getattr(fault, 'faultCode', None)
@ -415,7 +415,7 @@ def convertFault(fault):
ret = v(fault.faultString)
ret.fromFault = True
return ret
#otherwise...
# otherwise...
return fault
def listFaults():
@ -440,7 +440,7 @@ def listFaults():
ret.sort(key=lambda x: x['faultCode'])
return ret
#functions for encoding/decoding optional arguments
# functions for encoding/decoding optional arguments
def encode_args(*args, **opts):
"""The function encodes optional arguments as regular arguments.
@ -481,10 +481,10 @@ def decode_int(n):
"""If n is not an integer, attempt to convert it"""
if isinstance(n, six.integer_types):
return n
#else
# else
return int(n)
#commonly used functions
# commonly used functions
def safe_xmlrpc_loads(s):
"""Load xmlrpc data from a string, but catch faults"""
@ -493,7 +493,7 @@ def safe_xmlrpc_loads(s):
except Fault as f:
return f
## BEGIN kojikamid dup
# BEGIN kojikamid dup #
def ensuredir(directory):
@ -528,7 +528,7 @@ def ensuredir(directory):
raise
return directory
## END kojikamid dup
# END kojikamid dup #
def daemonize():
"""Detach and run in background"""
@ -537,12 +537,12 @@ def daemonize():
os._exit(0)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
#fork again
# fork again
pid = os.fork()
if pid:
os._exit(0)
os.chdir("/")
#redirect stdin/stdout/sterr
# redirect stdin/stdout/sterr
fd0 = os.open('/dev/null', os.O_RDONLY)
fd1 = os.open('/dev/null', os.O_RDWR)
fd2 = os.open('/dev/null', os.O_RDWR)
@ -597,7 +597,7 @@ def rpm_hdr_size(f, ofs=None):
il = multibyte(data[0:4])
dl = multibyte(data[4:8])
#this is what the section data says the size should be
# this is what the section data says the size should be
hdrsize = 8 + 16 * il + dl
# hdrsize rounded up to nearest 8 bytes
@ -624,7 +624,7 @@ class RawHeader(object):
self._index()
def version(self):
#fourth byte is the version
# fourth byte is the version
return _ord(self.header[3])
def _index(self):
@ -635,7 +635,7 @@ class RawHeader(object):
il = multibyte(data[:4])
dl = multibyte(data[4:8])
#read the index (starts at offset 16)
# read the index (starts at offset 16)
index = {}
for i in range(il):
entry = []
@ -643,30 +643,31 @@ class RawHeader(object):
ofs = 16 + i*16 + j*4
data = [_ord(x) for x in self.header[ofs:ofs+4]]
entry.append(multibyte(data))
#print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry))
# print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry))
index[entry[0]] = entry
self.datalen = dl
self.index = index
def dump(self):
print("HEADER DUMP:")
#calculate start of store
# calculate start of store
il = len(self.index)
store = 16 + il * 16
#print("start is: %d" % start)
#print("index length: %d" % il)
# print("start is: %d" % start)
# print("index length: %d" % il)
print("Store at offset %d (%0x)" % (store, store))
#sort entries by offset, dtype
#also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count
# sort entries by offset, dtype
# also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count
order = sorted([(x[2], x[1], x[0], x[3]) for x in six.itervalues(self.index)])
next = store
#map some rpmtag codes
# map some rpmtag codes
tags = {}
for name, code in six.iteritems(rpm.__dict__):
if name.startswith('RPMTAG_') and isinstance(code, int):
tags[code] = name[7:].lower()
for entry in order:
#tag, dtype, offset, count = entry
# tag, dtype, offset, count = entry
offset, dtype, tag, count = entry
pos = store + offset
if next is not None:
@ -679,17 +680,17 @@ class RawHeader(object):
print("Tag: %d [%s], Type: %d, Offset: %x, Count: %d" \
% (tag, tags.get(tag, '?'), dtype, offset, count))
if dtype == 0:
#null
# null
print("[NULL entry]")
next = pos
elif dtype == 1:
#char
# char
for i in range(count):
print("Char: %r" % self.header[pos])
pos += 1
next = pos
elif dtype >= 2 and dtype <= 5:
#integer
# integer
n = 1 << (dtype - 2)
for i in range(count):
data = [_ord(x) for x in self.header[pos:pos+n]]
@ -738,7 +739,7 @@ class RawHeader(object):
return self._getitem(dtype, offset, count)
def _getitem(self, dtype, offset, count):
#calculate start of store
# calculate start of store
il = len(self.index)
store = 16 + il * 16
pos = store + offset
@ -752,10 +753,10 @@ class RawHeader(object):
end = self.header.find('\0', pos)
return self.header[pos:end]
elif dtype == 7:
#raw data
# raw data
return self.header[pos:pos+count]
else:
#XXX - not all valid data types are handled
# XXX - not all valid data types are handled
raise GenericError("Unable to read header data type: %x" % dtype)
def get(self, key, default=None):
@ -1108,7 +1109,7 @@ def is_debuginfo(name):
def canonArch(arch):
"""Given an arch, return the "canonical" arch"""
#XXX - this could stand to be smarter, and we should probably
# XXX - this could stand to be smarter, and we should probably
# have some other related arch-mangling functions.
if fnmatch(arch, 'i?86') or arch == 'athlon':
return 'i386'
@ -1295,12 +1296,12 @@ BuildArch: noarch
#package requirements
"""]
#add a requires entry for all the packages in buildgroup, and in
#groups required by buildgroup
# add a requires entry for all the packages in buildgroup, and in
# groups required by buildgroup
need = [buildgroup]
seen_grp = {}
seen_pkg = {}
#index groups
# index groups
groups = dict([(g['name'], g) for g in grplist])
for group_name in need:
if group_name in seen_grp:
@ -1375,7 +1376,7 @@ def generate_comps(groups, expand_groups=False):
""" <biarchonly>%s</biarchonly>
""" % boolean_text(True))
#print grouplist, if any
# print grouplist, if any
if g['grouplist'] and not expand_groups:
data.append(
""" <grouplist>
@ -1383,7 +1384,7 @@ def generate_comps(groups, expand_groups=False):
grouplist = list(g['grouplist'])
grouplist.sort(key=lambda x: x['name'])
for x in grouplist:
#['req_id','type','is_metapkg','name']
# ['req_id','type','is_metapkg','name']
name = x['name']
thetype = x['type']
tag = "groupreq"
@ -1401,9 +1402,9 @@ def generate_comps(groups, expand_groups=False):
""" </grouplist>
""")
#print packagelist, if any
# print packagelist, if any
def package_entry(pkg):
#p['package_id','type','basearchonly','requires','name']
# p['package_id','type','basearchonly','requires','name']
name = pkg['package']
opts = 'type="%s"' % pkg['type']
if pkg['basearchonly']:
@ -1424,7 +1425,7 @@ def generate_comps(groups, expand_groups=False):
""" % package_entry(p))
# also include expanded list, if needed
if expand_groups and g['grouplist']:
#add a requires entry for all packages in groups required by buildgroup
# add a requires entry for all packages in groups required by buildgroup
need = [req['name'] for req in g['grouplist']]
seen_grp = {g['name'] : 1}
seen_pkg = {}
@ -1484,12 +1485,12 @@ def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts)
raise GenericError("please provide a repo and tag")
topurls = opts.get('topurls')
if not topurls:
#cli command still passes plain topurl
# cli command still passes plain topurl
topurl = opts.get('topurl')
if topurl:
topurls = [topurl]
if topurls:
#XXX - PathInfo isn't quite right for this, but it will do for now
# XXX - PathInfo isn't quite right for this, but it will do for now
pathinfos = [PathInfo(topdir=_u) for _u in topurls]
urls = ["%s/%s" % (_p.repo(repoid, tag_name), arch) for _p in pathinfos]
else:
@ -1539,7 +1540,7 @@ def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts)
if mavenrc:
files['etc/mavenrc'] = mavenrc
#generate yum.conf
# generate yum.conf
yc_parts = ["[main]\n"]
# HTTP proxy for yum
if opts.get('yum_proxy'):
@ -1780,7 +1781,7 @@ def read_config(profile_name, user_config=None):
result = config_defaults.copy()
#note: later config files override earlier ones
# note: later config files override earlier ones
# /etc/koji.conf.d
configs = ['/etc/koji.conf.d']
@ -1807,9 +1808,9 @@ def read_config(profile_name, user_config=None):
got_conf = True
result['profile'] = profile_name
for name, value in config.items(profile_name):
#note the config_defaults dictionary also serves to indicate which
#options *can* be set via the config file. Such options should
#not have a default value set in the option parser.
# note the config_defaults dictionary also serves to indicate which
# options *can* be set via the config file. Such options should
# not have a default value set in the option parser.
if name in result:
if name in ('anon_retry', 'offline_retry',
'use_fast_upload', 'krb_rdns', 'debug',
@ -1984,7 +1985,7 @@ class PathInfo(object):
def volumedir(self, volume):
if volume == 'DEFAULT' or volume is None:
return self.topdir
#else
# else
return self.topdir + ("/vol/%s" % volume)
def build(self, build):
@ -2141,7 +2142,7 @@ def is_cert_error(e):
'certificate expired' in ssl_reason):
return True
#otherwise
# otherwise
return False
@ -2553,7 +2554,7 @@ class ClientSession(object):
handler, headers, request = self._prepCall('logout', ())
self._sendCall(handler, headers, request)
except AuthExpired:
#this can happen when an exclusive session is forced
# this can happen when an exclusive session is forced
pass
self.setSession(None)
@ -2578,10 +2579,10 @@ class ClientSession(object):
return
self.setSession(None)
#we've had some trouble with this method causing strange problems
#(like infinite recursion). Possibly triggered by initialization failure,
#and possibly due to some interaction with __getattr__.
#Re-enabling with a small improvement
# we've had some trouble with this method causing strange problems
# (like infinite recursion). Possibly triggered by initialization failure,
# and possibly due to some interaction with __getattr__.
# Re-enabling with a small improvement
def __del__(self):
if self.__dict__:
try:
@ -2594,7 +2595,7 @@ class ClientSession(object):
return self._callMethod(name, args, opts)
def _prepCall(self, name, args, kwargs=None):
#pass named opts in a way the server can understand
# pass named opts in a way the server can understand
if kwargs is None:
kwargs = {}
if name == 'rawUpload':
@ -2713,27 +2714,27 @@ class ClientSession(object):
self.retries += 1
try:
return self._sendCall(handler, headers, request)
#basically, we want to retry on most errors, with a few exceptions
# basically, we want to retry on most errors, with a few exceptions
# - faults (this means the call completed and failed)
# - SystemExit, KeyboardInterrupt
# note that, for logged-in sessions the server should tell us (via a RetryError fault)
# if the call cannot be retried. For non-logged-in sessions, all calls should be read-only
# and hence retryable.
except Fault as fault:
#try to convert the fault to a known exception
# try to convert the fault to a known exception
err = convertFault(fault)
if isinstance(err, ServerOffline):
if self.opts.get('offline_retry', False):
secs = self.opts.get('offline_retry_interval', interval)
self.logger.debug("Server offline. Retrying in %i seconds", secs)
time.sleep(secs)
#reset try count - this isn't a typical error, this is a running server
#correctly reporting an outage
# reset try count - this isn't a typical error, this is a running server
# correctly reporting an outage
tries = 0
continue
raise err
except (SystemExit, KeyboardInterrupt):
#(depending on the python version, these may or may not be subclasses of Exception)
# (depending on the python version, these may or may not be subclasses of Exception)
raise
except Exception as e:
tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
@ -2744,8 +2745,8 @@ class ClientSession(object):
raise
if not self.logged_in:
#in the past, non-logged-in sessions did not retry. For compatibility purposes
#this behavior is governed by the anon_retry opt.
# in the past, non-logged-in sessions did not retry. For compatibility purposes
# this behavior is governed by the anon_retry opt.
if not self.opts.get('anon_retry', False):
raise
@ -2754,14 +2755,14 @@ class ClientSession(object):
if tries > max_retries:
raise
#otherwise keep retrying
# otherwise keep retrying
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(tb_str)
self.logger.info("Try #%s for call %s (%s) failed: %s", tries, self.callnum, name, e)
if tries > 1:
# first retry is immediate, after that we honor retry_interval
time.sleep(interval)
#not reached
# not reached
def multiCall(self, strict=False, batch=None):
"""Execute a prepared multicall
@ -2816,7 +2817,7 @@ class ClientSession(object):
else:
ret = self._callMethod('multiCall', (calls,), {})
if strict:
#check for faults and raise first one
# check for faults and raise first one
for entry in ret:
if isinstance(entry, dict):
fault = Fault(entry['faultCode'], entry['faultString'])
@ -2825,7 +2826,7 @@ class ClientSession(object):
return ret
def __getattr__(self, name):
#if name[:1] == '_':
# if name[:1] == '_':
# raise AttributeError("no attribute %r" % name)
if name == '_apidoc':
return self.__dict__['_apidoc']
@ -2953,7 +2954,7 @@ class ClientSession(object):
start = time.time()
# XXX - stick in a config or something
retries = 3
fo = open(localfile, "rb") #specify bufsize?
fo = open(localfile, "rb") # specify bufsize?
totalsize = os.path.getsize(localfile)
ofs = 0
md5sum = hashlib.md5()
@ -3207,15 +3208,15 @@ class DBHandler(logging.Handler):
columns.append(key)
values.append("%%(%s)s" % key)
data[key] = value % record.__dict__
#values.append(_quote(value % record.__dict__))
# values.append(_quote(value % record.__dict__))
columns = ",".join(columns)
values = ",".join(values)
command = "INSERT INTO %s (%s) VALUES (%s)" % (self.table, columns, values)
#note we're letting cursor.execute do the escaping
# note we're letting cursor.execute do the escaping
cursor.execute(command, data)
cursor.close()
#self.cnx.commit()
#XXX - committing here is most likely wrong, but we need to set commit_pending or something
# self.cnx.commit()
# XXX - committing here is most likely wrong, but we need to set commit_pending or something
# ...and this is really the wrong place for that
except:
self.handleError(record)
@ -3328,7 +3329,7 @@ def _taskLabel(taskInfo):
extra = build_target['name']
elif method == 'winbuild':
if 'request' in taskInfo:
#vm = taskInfo['request'][0]
# vm = taskInfo['request'][0]
url = taskInfo['request'][1]
target = taskInfo['request'][2]
module_info = _module_info(url)

View file

@ -33,7 +33,7 @@ arches = {
"amd64": "x86_64",
"ia32e": "x86_64",
#ppc64le
# ppc64le
"ppc64le": "noarch",
# ppc
@ -73,7 +73,7 @@ arches = {
"armv5tejl": "armv5tel",
"armv5tel": "noarch",
#arm hardware floating point
# arm hardware floating point
"armv7hnl": "armv7hl",
"armv7hl": "armv6hl",
"armv6hl": "noarch",
@ -86,7 +86,7 @@ arches = {
"sh4": "noarch",
"sh3": "noarch",
#itanium
# itanium
"ia64": "noarch",
}

View file

@ -79,7 +79,7 @@ class Session(object):
self._perms = None
self._groups = None
self._host_id = ''
#get session data from request
# get session data from request
if args is None:
environ = getattr(context, 'environ', {})
args = environ.get('QUERY_STRING', '')
@ -97,7 +97,7 @@ class Session(object):
callnum = args['callnum'][0]
except:
callnum = None
#lookup the session
# lookup the session
c = context.cnx.cursor()
fields = {
'authtype': 'authtype',
@ -125,10 +125,10 @@ class Session(object):
if not row:
raise koji.AuthError('Invalid session or bad credentials')
session_data = dict(zip(aliases, row))
#check for expiration
# check for expiration
if session_data['expired']:
raise koji.AuthExpired('session "%i" has expired' % id)
#check for callnum sanity
# check for callnum sanity
if callnum is not None:
try:
callnum = int(callnum)
@ -140,14 +140,14 @@ class Session(object):
raise koji.SequenceError("%d > %d (session %d)" \
% (lastcall, callnum, id))
elif lastcall == callnum:
#Some explanation:
#This function is one of the few that performs its own commit.
#However, our storage of the current callnum is /after/ that
#commit. This means the the current callnum only gets committed if
#a commit happens afterward.
#We only schedule a commit for dml operations, so if we find the
#callnum in the db then a previous attempt succeeded but failed to
#return. Data was changed, so we cannot simply try the call again.
# Some explanation:
# This function is one of the few that performs its own commit.
# However, our storage of the current callnum is /after/ that
# commit. This means the the current callnum only gets committed if
# a commit happens afterward.
# We only schedule a commit for dml operations, so if we find the
# callnum in the db then a previous attempt succeeded but failed to
# return. Data was changed, so we cannot simply try the call again.
method = getattr(context, 'method', 'UNKNOWN')
if method not in RetryWhitelist:
raise koji.RetryError(
@ -155,7 +155,7 @@ class Session(object):
% (callnum, method, id))
# read user data
#historical note:
# historical note:
# we used to get a row lock here as an attempt to maintain sanity of exclusive
# sessions, but it was an imperfect approach and the lock could cause some
# performance issues.
@ -166,25 +166,25 @@ class Session(object):
if user_data['status'] != koji.USER_STATUS['NORMAL']:
raise koji.AuthError('logins by %s are not allowed' % user_data['name'])
#check for exclusive sessions
# check for exclusive sessions
if session_data['exclusive']:
#we are the exclusive session for this user
# we are the exclusive session for this user
self.exclusive = True
else:
#see if an exclusive session exists
# see if an exclusive session exists
q = """SELECT id FROM sessions WHERE user_id=%(user_id)s
AND "exclusive" = TRUE AND expired = FALSE"""
#should not return multiple rows (unique constraint)
# should not return multiple rows (unique constraint)
c.execute(q, session_data)
row = c.fetchone()
if row:
(excl_id,) = row
if excl_id == session_data['master']:
#(note excl_id cannot be None)
#our master session has the lock
# (note excl_id cannot be None)
# our master session has the lock
self.exclusive = True
else:
#a session unrelated to us has the lock
# a session unrelated to us has the lock
self.lockerror = "User locked by another session"
# we don't enforce here, but rely on the dispatcher to enforce
# if appropriate (otherwise it would be impossible to steal
@ -193,11 +193,11 @@ class Session(object):
# update timestamp
q = """UPDATE sessions SET update_time=NOW() WHERE id = %(id)i"""
c.execute(q, locals())
#save update time
# save update time
context.cnx.commit()
#update callnum (this is deliberately after the commit)
#see earlier note near RetryError
# update callnum (this is deliberately after the commit)
# see earlier note near RetryError
if callnum is not None:
q = """UPDATE sessions SET callnum=%(callnum)i WHERE id = %(id)i"""
c.execute(q, locals())
@ -218,7 +218,7 @@ class Session(object):
# grab perm and groups data on the fly
if name == 'perms':
if self._perms is None:
#in a dict for quicker lookup
# in a dict for quicker lookup
self._perms = dict([[name, 1] for name in get_user_perms(self.user_id)])
return self._perms
elif name == 'groups':
@ -254,7 +254,7 @@ class Session(object):
return override
else:
hostip = context.environ['REMOTE_ADDR']
#XXX - REMOTE_ADDR not promised by wsgi spec
# XXX - REMOTE_ADDR not promised by wsgi spec
if hostip == '127.0.0.1':
hostip = socket.gethostbyname(socket.gethostname())
return hostip
@ -294,7 +294,7 @@ class Session(object):
self.checkLoginAllowed(user_id)
#create session and return
# create session and return
sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_NORMAL)
session_id = sinfo['session-id']
context.cnx.commit()
@ -386,7 +386,7 @@ class Session(object):
# so get the local ip via a different method
local_ip = socket.gethostbyname(context.environ['SERVER_NAME'])
remote_ip = context.environ['REMOTE_ADDR']
#XXX - REMOTE_ADDR not promised by wsgi spec
# XXX - REMOTE_ADDR not promised by wsgi spec
# it appears that calling setports() with *any* value results in authentication
# failing with "Incorrect net address", so return 0 (which prevents
@ -466,11 +466,11 @@ class Session(object):
if self.master is not None:
raise koji.GenericError("subsessions cannot become exclusive")
if self.exclusive:
#shouldn't happen
# shouldn't happen
raise koji.GenericError("session is already exclusive")
user_id = self.user_id
session_id = self.id
#acquire a row lock on the user entry
# acquire a row lock on the user entry
q = """SELECT id FROM users WHERE id=%(user_id)s FOR UPDATE"""
c.execute(q, locals())
# check that no other sessions for this user are exclusive
@ -481,13 +481,13 @@ class Session(object):
row = c.fetchone()
if row:
if force:
#expire the previous exclusive session and try again
# expire the previous exclusive session and try again
(excl_id,) = row
q = """UPDATE sessions SET expired=TRUE,"exclusive"=NULL WHERE id=%(excl_id)s"""
c.execute(q, locals())
else:
raise koji.AuthLockError("Cannot get exclusive session")
#mark this session exclusive
# mark this session exclusive
q = """UPDATE sessions SET "exclusive"=TRUE WHERE id=%(session_id)s"""
c.execute(q, locals())
context.cnx.commit()
@ -503,12 +503,12 @@ class Session(object):
def logout(self):
"""expire a login session"""
if not self.logged_in:
#XXX raise an error?
# XXX raise an error?
raise koji.AuthError("Not logged in")
update = """UPDATE sessions
SET expired=TRUE,exclusive=NULL
WHERE id = %(id)i OR master = %(id)i"""
#note we expire subsessions as well
# note we expire subsessions as well
c = context.cnx.cursor()
c.execute(update, {'id': self.id})
context.cnx.commit()
@ -517,7 +517,7 @@ class Session(object):
def logoutChild(self, session_id):
"""expire a subsession"""
if not self.logged_in:
#XXX raise an error?
# XXX raise an error?
raise koji.AuthError("Not logged in")
update = """UPDATE sessions
SET expired=TRUE,exclusive=NULL
@ -547,7 +547,7 @@ class Session(object):
(session_id,) = c.fetchone()
#add session id to database
# add session id to database
q = """
INSERT INTO sessions (id, user_id, key, hostip, authtype, master)
VALUES (%(session_id)i, %(user_id)i, %(key)s, %(hostip)s, %(authtype)i, %(master)s)
@ -555,7 +555,7 @@ class Session(object):
c.execute(q, locals())
context.cnx.commit()
#return session info
# return session info
return {'session-id' : session_id, 'session-key' : key}
def subsession(self):
@ -589,7 +589,7 @@ class Session(object):
def hasGroup(self, group_id):
if not self.logged_in:
return False
#groups indexed by id
# groups indexed by id
return group_id in self.groups
def isUser(self, user_id):
@ -616,7 +616,7 @@ class Session(object):
return None
def getHostId(self):
#for compatibility
# for compatibility
return self.host_id
def getUserId(self, username):
@ -805,7 +805,7 @@ def get_user_perms(user_id):
FROM user_perms JOIN permissions ON perm_id = permissions.id
WHERE active = TRUE AND user_id=%(user_id)s"""
c.execute(q, locals())
#return a list of permissions by name
# return a list of permissions by name
return [row[0] for row in c.fetchall()]
def get_user_data(user_id):

View file

@ -171,7 +171,7 @@ def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, a
return status[1]
## BEGIN kojikamid dup
# BEGIN kojikamid dup #
class SCM(object):
"SCM abstraction class"
@ -397,7 +397,7 @@ class SCM(object):
env = None
def _run(cmd, chdir=None, fatal=False, log=True, _count=[0]):
if globals().get('KOJIKAMID'):
#we've been inserted into kojikamid, use its run()
# we've been inserted into kojikamid, use its run()
return run(cmd, chdir=chdir, fatal=fatal, log=log) # noqa: F821
else:
append = (_count[0] > 0)
@ -546,7 +546,7 @@ class SCM(object):
# just use the same url
r['source'] = self.url
return r
## END kojikamid dup
# END kojikamid dup #
class TaskManager(object):
@ -613,7 +613,7 @@ class TaskManager(object):
If nolocal is True, do not try to scan local buildroots.
"""
#query buildroots in db that are not expired
# query buildroots in db that are not expired
states = [koji.BR_STATES[x] for x in ('INIT', 'WAITING', 'BUILDING')]
db_br = self.session.listBuildroots(hostID=self.host_id, state=tuple(states))
# index by id
@ -627,8 +627,8 @@ class TaskManager(object):
self.logger.warn("Expiring taskless buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)
self.session.host.setBuildRootState(id, st_expired)
elif task_id not in self.tasks:
#task not running - expire the buildroot
#TODO - consider recycling hooks here (with strong sanity checks)
# task not running - expire the buildroot
# TODO - consider recycling hooks here (with strong sanity checks)
self.logger.info("Expiring buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)
self.logger.debug("Buildroot task: %r, Current tasks: %r" % (task_id, to_list(self.tasks.keys())))
self.session.host.setBuildRootState(id, st_expired)
@ -640,13 +640,13 @@ class TaskManager(object):
local_only = [id for id in local_br if id not in db_br]
if local_only:
missed_br = self.session.listBuildroots(buildrootID=tuple(local_only))
#get all the task info in one call
# get all the task info in one call
tasks = []
for br in missed_br:
task_id = br['task_id']
if task_id:
tasks.append(task_id)
#index
# index
missed_br = dict([(row['id'], row) for row in missed_br])
tasks = dict([(row['id'], row) for row in self.session.getTaskInfo(tasks)])
for id in local_only:
@ -671,7 +671,7 @@ class TaskManager(object):
self.logger.warn("%s: invalid task %s" % (desc, br['task_id']))
continue
if (task['state'] == koji.TASK_STATES['FAILED'] and age < self.options.failed_buildroot_lifetime):
#XXX - this could be smarter
# XXX - this could be smarter
# keep buildroots for failed tasks around for a little while
self.logger.debug("Keeping failed buildroot: %s" % desc)
continue
@ -689,17 +689,17 @@ class TaskManager(object):
continue
else:
age = min(age, time.time() - st.st_mtime)
#note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153)
#If rpmlib is installing in this chroot, removing it entirely
#can lead to a world of hurt.
#We remove the rootdir contents but leave the rootdir unless it
#is really old
# note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153)
# If rpmlib is installing in this chroot, removing it entirely
# can lead to a world of hurt.
# We remove the rootdir contents but leave the rootdir unless it
# is really old
if age > 3600*24:
#dir untouched for a day
# dir untouched for a day
self.logger.info("Removing buildroot: %s" % desc)
if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0:
continue
#also remove the config
# also remove the config
try:
os.unlink(data['cfg'])
except OSError as e:
@ -726,7 +726,7 @@ class TaskManager(object):
self.logger.debug("Expired/stray buildroots: %d" % len(local_only))
def _scanLocalBuildroots(self):
#XXX
# XXX
configdir = '/etc/mock/koji'
buildroots = {}
for f in os.listdir(configdir):
@ -785,13 +785,13 @@ class TaskManager(object):
# by this host.
id = task['id']
if id not in self.pids:
#We don't have a process for this
#Expected to happen after a restart, otherwise this is an error
# We don't have a process for this
# Expected to happen after a restart, otherwise this is an error
stale.append(id)
continue
tasks[id] = task
if task.get('alert', False):
#wake up the process
# wake up the process
self.logger.info("Waking up task: %r" % task)
os.kill(self.pids[id], signal.SIGUSR2)
if not task['waiting']:
@ -801,8 +801,8 @@ class TaskManager(object):
self.tasks = tasks
self.logger.debug("Current tasks: %r" % self.tasks)
if len(stale) > 0:
#A stale task is one which is opened to us, but we know nothing
#about). This will happen after a daemon restart, for example.
# A stale task is one which is opened to us, but we know nothing
# about). This will happen after a daemon restart, for example.
self.logger.info("freeing stale tasks: %r" % stale)
self.session.host.freeTasks(stale)
for id, pid in list(self.pids.items()):
@ -844,15 +844,15 @@ class TaskManager(object):
self.logger.debug("Load Data:")
self.logger.debug(" hosts: %r" % hosts)
self.logger.debug(" tasks: %r" % tasks)
#now we organize this data into channel-arch bins
# now we organize this data into channel-arch bins
bin_hosts = {} #hosts indexed by bin
bins = {} #bins for this host
our_avail = None
for host in hosts:
host['bins'] = []
if host['id'] == self.host_id:
#note: task_load reported by server might differ from what we
#sent due to precision variation
# note: task_load reported by server might differ from what we
# sent due to precision variation
our_avail = host['capacity'] - host['task_load']
for chan in host['channels']:
for arch in host['arches'].split() + ['noarch']:
@ -867,7 +867,7 @@ class TaskManager(object):
elif not bins:
self.logger.info("No bins for this host. Missing channel/arch config?")
# Note: we may still take an assigned task below
#sort available capacities for each of our bins
# sort available capacities for each of our bins
avail = {}
for bin in bins:
avail[bin] = [host['capacity'] - host['task_load'] for host in bin_hosts[bin]]
@ -889,7 +889,7 @@ class TaskManager(object):
if task['state'] == koji.TASK_STATES['ASSIGNED']:
self.logger.debug("task is assigned")
if self.host_id == task['host_id']:
#assigned to us, we can take it regardless
# assigned to us, we can take it regardless
if self.takeTask(task):
return True
elif task['state'] == koji.TASK_STATES['FREE']:
@ -897,18 +897,18 @@ class TaskManager(object):
self.logger.debug("task is free, bin=%r" % bin)
if bin not in bins:
continue
#see where our available capacity is compared to other hosts for this bin
#(note: the hosts in this bin are exactly those that could
#accept this task)
# see where our available capacity is compared to other hosts for this bin
# (note: the hosts in this bin are exactly those that could
# accept this task)
bin_avail = avail.get(bin, [0])
if self.checkAvailDelay(task, bin_avail, our_avail):
# decline for now and give the upper half a chance
continue
#otherwise, we attempt to open the task
# otherwise, we attempt to open the task
if self.takeTask(task):
return True
else:
#should not happen
# should not happen
raise Exception("Invalid task state reported by server")
return False
@ -968,11 +968,11 @@ class TaskManager(object):
try:
(childpid, status) = os.waitpid(pid, os.WNOHANG)
except OSError as e:
#check errno
# check errno
if e.errno != errno.ECHILD:
#should not happen
# should not happen
raise
#otherwise assume the process is gone
# otherwise assume the process is gone
self.logger.info("%s: %s" % (prefix, e))
return True
if childpid != 0:
@ -1118,7 +1118,7 @@ class TaskManager(object):
if children:
self._killChildren(task_id, children, sig=signal.SIGKILL, timeout=3.0)
#expire the task's subsession
# expire the task's subsession
session_id = self.subsessions.get(task_id)
if session_id:
self.logger.info("Expiring subsession %i (task %i)" % (session_id, task_id))
@ -1126,7 +1126,7 @@ class TaskManager(object):
self.session.logoutChild(session_id)
del self.subsessions[task_id]
except:
#not much we can do about it
# not much we can do about it
pass
if wait:
return self._waitTask(task_id, pid)
@ -1200,7 +1200,7 @@ class TaskManager(object):
self.status = "Load average %.2f > %.2f" % (loadavgs[0], maxload)
self.logger.info(self.status)
return False
#XXX - add more checks
# XXX - add more checks
return True
def takeTask(self, task):
@ -1250,7 +1250,7 @@ class TaskManager(object):
if state != 'OPEN':
self.logger.warn("Task %i changed is %s", task_id, state)
return False
#otherwise...
# otherwise...
raise
if handler.Foreground:
self.logger.info("running task in foreground")
@ -1263,27 +1263,27 @@ class TaskManager(object):
return True
def forkTask(self, handler):
#get the subsession before we fork
# get the subsession before we fork
newhub = self.session.subsession()
session_id = newhub.sinfo['session-id']
pid = os.fork()
if pid:
newhub._forget()
return pid, session_id
#in no circumstance should we return after the fork
#nor should any exceptions propagate past here
# in no circumstance should we return after the fork
# nor should any exceptions propagate past here
try:
self.session._forget()
#set process group
# set process group
os.setpgrp()
#use the subsession
# use the subsession
self.session = newhub
handler.session = self.session
#set a do-nothing handler for sigusr2
# set a do-nothing handler for sigusr2
signal.signal(signal.SIGUSR2, lambda *args: None)
self.runTask(handler)
finally:
#diediedie
# diediedie
try:
self.session.logout()
finally:
@ -1302,10 +1302,10 @@ class TaskManager(object):
tb = ''.join(traceback.format_exception(*sys.exc_info())).replace(r"\n", "\n")
self.logger.warn("FAULT:\n%s" % tb)
except (SystemExit, koji.tasks.ServerExit, KeyboardInterrupt):
#we do not trap these
# we do not trap these
raise
except koji.tasks.ServerRestart:
#freeing this task will allow the pending restart to take effect
# freeing this task will allow the pending restart to take effect
self.session.host.freeTasks([handler.id])
return
except:
@ -1315,7 +1315,7 @@ class TaskManager(object):
e_class, e = sys.exc_info()[:2]
faultCode = getattr(e_class, 'faultCode', 1)
if issubclass(e_class, koji.GenericError):
#just pass it through
# just pass it through
tb = str(e)
response = koji.xmlrpcplus.dumps(koji.xmlrpcplus.Fault(faultCode, tb))

View file

@ -75,8 +75,8 @@ class DBWrapper:
if not self.cnx:
raise Exception('connection is closed')
self.cnx.cursor().execute('ROLLBACK')
#We do this rather than cnx.rollback to avoid opening a new transaction
#If our connection gets recycled cnx.rollback will be called then.
# We do this rather than cnx.rollback to avoid opening a new transaction
# If our connection gets recycled cnx.rollback will be called then.
self.cnx = None
@ -177,7 +177,7 @@ def connect():
return DBWrapper(conn)
except psycopg2.Error:
del _DBconn.conn
#create a fresh connection
# create a fresh connection
opts = _DBopts
if opts is None:
opts = {}

View file

@ -62,7 +62,7 @@ class PluginTracker(object):
def __init__(self, path=None, prefix='_koji_plugin__'):
self.searchpath = path
#prefix should not have a '.' in it, this can cause problems.
# prefix should not have a '.' in it, this can cause problems.
self.prefix = prefix
self.plugins = {}
@ -71,9 +71,9 @@ class PluginTracker(object):
return self.plugins[name]
mod_name = name
if self.prefix:
#mod_name determines how the module is named in sys.modules
#Using a prefix helps prevent overlap with other modules
#(no '.' -- it causes problems)
# mod_name determines how the module is named in sys.modules
# Using a prefix helps prevent overlap with other modules
# (no '.' -- it causes problems)
mod_name = self.prefix + name
if mod_name in sys.modules and not reload:
raise koji.PluginError('module name conflict: %s' % mod_name)

View file

@ -31,7 +31,7 @@ from koji.util import to_list
class BaseSimpleTest(object):
"""Abstract base class for simple tests"""
#Provide the name of the test
# Provide the name of the test
name = None
def __init__(self, str):
@ -62,12 +62,12 @@ class FalseTest(BaseSimpleTest):
class AllTest(TrueTest):
name = 'all'
#alias for true
# alias for true
class NoneTest(FalseTest):
name = 'none'
#alias for false
# alias for false
class HasTest(BaseSimpleTest):
@ -233,11 +233,11 @@ class SimpleRuleSet(object):
for line in lines:
rule = self.parse_line(line)
if rule is None:
#blank/etc
# blank/etc
continue
tests, negate, action = rule
if action == '{':
#nested rules
# nested rules
child = []
cursor.append([tests, negate, child])
stack.append(cursor)
@ -275,11 +275,11 @@ class SimpleRuleSet(object):
"""
line = line.split('#', 1)[0].strip()
if not line:
#blank or all comment
# blank or all comment
return None
if line == '}':
return None, False, '}'
#?? allow }} ??
# ?? allow }} ??
negate = False
pos = line.rfind('::')
if pos == -1:
@ -328,7 +328,7 @@ class SimpleRuleSet(object):
if not check:
break
else:
#all tests in current rule passed
# all tests in current rule passed
value = True
if negate:
value = not value
@ -393,11 +393,11 @@ def findSimpleTests(namespace):
if isinstance(value, type(BaseSimpleTest)) and issubclass(value, BaseSimpleTest):
name = getattr(value, 'name', None)
if not name:
#use the class name
# use the class name
name = key
#but trim 'Test' from the end
# but trim 'Test' from the end
if name.endswith('Test') and len(name) > 4:
name = name[:-4]
ret.setdefault(name, value)
#...so first test wins in case of name overlap
# ...so first test wins in case of name overlap
return ret

View file

@ -48,7 +48,7 @@ class Rpmdiff:
PRCO = ( 'REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')
#{fname : (size, mode, mtime, flags, dev, inode,
# {fname : (size, mode, mtime, flags, dev, inode,
# nlink, state, vflags, user, group, digest)}
__FILEIDX = [ ['S', 0],
['M', 1],
@ -71,7 +71,7 @@ class Rpmdiff:
try:
PREREQ_FLAG=rpm.RPMSENSE_PREREQ
except:
#(proyvind): This seems ugly, but then again so does
# (proyvind): This seems ugly, but then again so does
# this whole check as well.
PREREQ_FLAG=False

View file

@ -51,7 +51,7 @@ def scan_mounts(topdir):
logger.warning('Found deleted mountpoint: %s' % path)
mplist.append(path)
fo.close()
#reverse sort so deeper dirs come first
# reverse sort so deeper dirs come first
mplist.sort(reverse=True)
return mplist
@ -64,7 +64,7 @@ def umount_all(topdir):
rv = os.spawnvp(os.P_WAIT, cmd[0], cmd)
if rv != 0:
raise koji.GenericError('umount failed (exit code %r) for %s' % (rv, path))
#check mounts again
# check mounts again
remain = scan_mounts(topdir)
if remain:
raise koji.GenericError("Unmounting incomplete: %r" % remain)
@ -340,7 +340,7 @@ class BaseTaskHandler(object):
if self.workdir is None:
return
safe_rmtree(self.workdir, unmount=False, strict=True)
#os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir])
# os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir])
def wait(self, subtasks=None, all=False, failany=False, canfail=None,
timeout=None):
@ -385,7 +385,7 @@ class BaseTaskHandler(object):
while True:
finished, unfinished = self.session.host.taskWait(self.id)
if len(unfinished) == 0:
#all done
# all done
break
elif len(finished) > 0:
if all:
@ -561,7 +561,7 @@ class BaseTaskHandler(object):
repo_info = self.session.getRepo(tag)
taginfo = self.session.getTag(tag, strict=True)
if not repo_info:
#make sure there is a target
# make sure there is a target
targets = self.session.getBuildTargets(buildTagID=taginfo['id'])
if not targets:
raise koji.BuildError('no repo (and no target) for tag %s' % taginfo['name'])
@ -666,7 +666,7 @@ class ShutdownTask(BaseTaskHandler):
_taskWeight = 0.0
Foreground = True
def handler(self):
#note: this is a foreground task
# note: this is a foreground task
raise ServerExit
@ -677,7 +677,7 @@ class RestartTask(BaseTaskHandler):
_taskWeight = 0.1
Foreground = True
def handler(self, host):
#note: this is a foreground task
# note: this is a foreground task
if host['id'] != self.session.host.getID():
raise koji.GenericError("Host mismatch")
self.manager.restart_pending = True
@ -691,7 +691,7 @@ class RestartVerifyTask(BaseTaskHandler):
_taskWeight = 0.1
Foreground = True
def handler(self, task_id, host):
#note: this is a foreground task
# note: this is a foreground task
tinfo = self.session.getTaskInfo(task_id)
state = koji.TASK_STATES[tinfo['state']]
if state != 'CLOSED':
@ -754,7 +754,7 @@ class RestartHostsTask(BaseTaskHandler):
class DependantTask(BaseTaskHandler):
Methods = ['dependantTask']
#mostly just waiting on other tasks
# mostly just waiting on other tasks
_taskWeight = 0.2
def handler(self, wait_list, task_list):

View file

@ -189,7 +189,7 @@ def dslice(dict_, keys, strict=True):
ret = {}
for key in keys:
if strict or key in dict_:
#for strict we skip the has_key check and let the dict generate the KeyError
# for strict we skip the has_key check and let the dict generate the KeyError
ret[key] = dict_[key]
return ret
@ -639,13 +639,13 @@ def setup_rlimits(opts, logger=None):
class adler32_constructor(object):
#mimicing the hashlib constructors
# mimicing the hashlib constructors
def __init__(self, arg=''):
if six.PY3 and isinstance(arg, str):
arg = bytes(arg, 'utf-8')
self._value = adler32(arg) & 0xffffffff
#the bitwise and works around a bug in some versions of python
#see: https://bugs.python.org/issue1202
# the bitwise and works around a bug in some versions of python
# see: https://bugs.python.org/issue1202
def update(self, arg):
if six.PY3 and isinstance(arg, str):

View file

@ -118,9 +118,10 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
if weight is not None:
weight = max(weight, 0.5)
self.session.host.setTaskWeight(self.id, weight)
#noarch is funny
# noarch is funny
if arch == "noarch":
#we need a buildroot arch. Pick one that:
# we need a buildroot arch. Pick one that:
# a) this host can handle
# b) the build tag can support
# c) is canonical
@ -130,16 +131,16 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
tag_arches = self.session.getBuildConfig(root)['arches']
if not tag_arches:
raise koji.BuildError("No arch list for tag: %s" % root)
#index canonical host arches
# index canonical host arches
host_arches = set([koji.canonArch(a) for a in host_arches.split()])
#pick the first suitable match from tag's archlist
# pick the first suitable match from tag's archlist
for br_arch in tag_arches.split():
br_arch = koji.canonArch(br_arch)
if br_arch in host_arches:
#we're done
# we're done
break
else:
#no overlap
# no overlap
raise koji.BuildError("host does not match tag arches: %s (%s)" % (root, tag_arches))
else:
br_arch = arch
@ -152,7 +153,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
else:
repo_info = self.session.getRepo(root)
if not repo_info:
#wait for it
# wait for it
task_id = self.session.host.subtask(method='waitrepo',
arglist=[root, None, None],
parent=self.id)
@ -163,13 +164,13 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
broot.workdir = self.workdir
broot.init()
rootdir = broot.rootdir()
#workaround for rpm oddness
# workaround for rpm oddness
os.system('rm -f "%s"/var/lib/rpm/__db.*' % rootdir)
#update buildroot state (so that updateBuildRootList() will work)
# update buildroot state (so that updateBuildRootList() will work)
self.session.host.setBuildRootState(broot.id, 'BUILDING')
try:
if packages:
#pkglog = '%s/%s' % (broot.resultdir(), 'packages.log')
# pkglog = '%s/%s' % (broot.resultdir(), 'packages.log')
pkgcmd = ['--install'] + packages
status = broot.mock(pkgcmd)
self.session.host.updateBuildRootList(broot.id, broot.getPackageList())
@ -179,9 +180,9 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
if isinstance(command, str):
cmdstr = command
else:
#we were passed an arglist
#we still have to run this through the shell (for redirection)
#but we can preserve the list structure precisely with careful escaping
# we were passed an arglist
# we still have to run this through the shell (for redirection)
# but we can preserve the list structure precisely with careful escaping
cmdstr = ' '.join(["'%s'" % arg.replace("'", r"'\''") for arg in command])
# A nasty hack to put command output into its own file until mock can be
# patched to do something more reasonable than stuff everything into build.log
@ -198,7 +199,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
elif new_chroot is False: # None -> no option added
mock_cmd.append('--old-chroot')
if skip_setarch:
#we can't really skip it, but we can set it to the current one instead of of the chroot one
# we can't really skip it, but we can set it to the current one instead of of the chroot one
myarch = platform.uname()[5]
mock_cmd.extend(['--arch', myarch])
mock_cmd.append('--')
@ -235,9 +236,9 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
if mount.startswith(safe_root):
break
else:
#no match
# no match
raise koji.GenericError("read-write mount point is not safe: %s" % mount)
#normpath should have removed any .. dirs, but just in case...
# normpath should have removed any .. dirs, but just in case...
if mount.find('/../') != -1:
raise koji.GenericError("read-write mount point is not safe: %s" % mount)
@ -266,7 +267,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
else:
opts = opts.split(',')
if 'bind' in opts:
#make sure dir exists
# make sure dir exists
if not os.path.isdir(dev):
error = koji.GenericError("No such directory or mount: %s" % dev)
break
@ -297,7 +298,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
with open(fn, 'r') as fslog:
for line in fslog.readlines():
mounts.add(line.strip())
#also, check /proc/mounts just in case
# also, check /proc/mounts just in case
mounts |= set(scan_mounts(rootdir))
mounts = sorted(mounts)
# deeper directories first

View file

@ -1,4 +1,4 @@
#koji hub plugin
# koji hub plugin
# There is a kojid plugin that goes with this hub plugin. The kojid builder
# plugin has a config file. This hub plugin has no config file.
@ -15,7 +15,6 @@ import kojihub
from koji.context import context
from koji.plugin import export
__all__ = ('runroot',)
@ -41,11 +40,11 @@ def runroot(tagInfo, arch, command, channel=None, **opts):
tag = kojihub.get_tag(tagInfo, strict=True)
if arch == 'noarch':
#not all arches can generate a proper buildroot for all tags
# not all arches can generate a proper buildroot for all tags
if not tag['arches']:
raise koji.GenericError('no arches defined for tag %s' % tag['name'])
#get all known arches for the system
# get all known arches for the system
fullarches = kojihub.get_all_arches()
tagarches = tag['arches'].split()

View file

@ -16,9 +16,9 @@ def get_install_requires():
'requests',
'requests-kerberos',
'six',
#'libcomps',
#'rpm-py-installer', # it is optional feature
#'rpm',
# 'libcomps',
# 'rpm-py-installer', # it is optional feature
# 'rpm',
]
if sys.version_info[0] < 3:
# optional auth library for older hubs
@ -62,9 +62,9 @@ setup(
'koji_cli_plugins': 'plugins/cli',
},
# doesn't make sense, as we have only example config
#data_files=[
# ('/etc', ['cli/koji.conf']),
#],
# data_files=[
# ('/etc', ['cli/koji.conf']),
# ],
scripts=[
'cli/koji',
'util/koji-gc',

View file

@ -50,7 +50,7 @@ def getTag(session, tag, event=None):
if (tag, event) in cache:
ts, info = cache[(tag,event)]
if now - ts < 600:
#use the cache
# use the cache
return info
info = session.getTag(tag, event=event)
if info:
@ -83,7 +83,7 @@ class ManagedRepo(object):
self.first_seen = time.time()
if self.current:
order = self.session.getFullInheritance(self.tag_id, event=self.event_id)
#order may contain same tag more than once
# order may contain same tag more than once
tags = {self.tag_id : 1}
for x in order:
tags[x['parent_id']] = 1
@ -156,13 +156,13 @@ class ManagedRepo(object):
- timestamp really, really old
"""
timeout = 36000
#XXX - config
# XXX - config
if self.state != koji.REPO_INIT:
return False
age = time.time() - max(self.event_ts, self.first_seen)
#the first_seen timestamp is also factored in because a repo can be
#created from an older event and should not be expired based solely on
#that event's timestamp.
# the first_seen timestamp is also factored in because a repo can be
# created from an older event and should not be expired based solely on
# that event's timestamp.
return age > timeout
def tryDelete(self):
@ -177,8 +177,8 @@ class ManagedRepo(object):
lifetime = self.options.deleted_repo_lifetime
# (should really be called expired_repo_lifetime)
try:
#also check dir age. We do this because a repo can be created from an older event
#and should not be removed based solely on that event's timestamp.
# also check dir age. We do this because a repo can be created from an older event
# and should not be removed based solely on that event's timestamp.
mtime = os.stat(path).st_mtime
except OSError as e:
if e.errno == 2:
@ -200,7 +200,7 @@ class ManagedRepo(object):
if self.state != koji.REPO_EXPIRED:
raise koji.GenericError("Repo not expired")
if self.session.repoDelete(self.repo_id) > 0:
#cannot delete, we are referenced by a buildroot
# cannot delete, we are referenced by a buildroot
self.logger.debug("Cannot delete repo %s, still referenced" % self.repo_id)
return False
self.logger.info("Deleted repo %s" % self.repo_id)
@ -299,9 +299,9 @@ class RepoManager(object):
(childpid, status) = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno != errno.ECHILD:
#should not happen
# should not happen
raise
#otherwise assume the process is gone
# otherwise assume the process is gone
self.logger.info("%s: %s" % (prefix, e))
return True
if childpid != 0:
@ -345,7 +345,7 @@ class RepoManager(object):
repo_id = data['id']
repo = self.repos.get(repo_id)
if repo:
#we're already tracking it
# we're already tracking it
if repo.state != data['state']:
self.logger.info('State changed for repo %s: %s -> %s'
%(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))
@ -383,7 +383,7 @@ class RepoManager(object):
repo.current = False
if repo.expire_ts is None:
repo.expire_ts = time.time()
#also no point in further checking
# also no point in further checking
continue
to_check.append(repo)
if self.logger.isEnabledFor(logging.DEBUG):
@ -441,7 +441,7 @@ class RepoManager(object):
Also, warn about any oddities"""
if self.delete_pids:
#skip
# skip
return
if not os.path.exists(topdir):
self.logger.debug("%s doesn't exist, skipping", topdir)
@ -466,14 +466,14 @@ class RepoManager(object):
self.logger.debug("%s/%s not an int, skipping", tagdir, repo_id)
continue
if repo_id in self.repos:
#we're already managing it, no need to deal with it here
# we're already managing it, no need to deal with it here
continue
repodir = "%s/%s" % (tagdir, repo_id)
try:
# lstat because it could be link to another volume
dirstat = os.lstat(repodir)
except OSError:
#just in case something deletes the repo out from under us
# just in case something deletes the repo out from under us
self.logger.debug("%s deleted already?!", repodir)
continue
symlink = False
@ -513,18 +513,18 @@ class RepoManager(object):
stats = self.tag_use_stats.get(tag_id)
now = time.time()
if stats and now - stats['ts'] < 3600:
#use the cache
# use the cache
return stats
data = self.session.listBuildroots(tagID=tag_id,
queryOpts={'order': '-create_event_id', 'limit' : 100})
#XXX magic number (limit)
# XXX magic number (limit)
if data:
tag_name = data[0]['tag_name']
else:
tag_name = "#%i" % tag_id
stats = {'data': data, 'ts': now, 'tag_name': tag_name}
recent = [x for x in data if now - x['create_ts'] < 3600 * 24]
#XXX magic number
# XXX magic number
stats ['n_recent'] = len(recent)
self.tag_use_stats[tag_id] = stats
self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent)))
@ -593,7 +593,7 @@ class RepoManager(object):
if n_deletes >= self.options.delete_batch_size:
break
if repo.expired():
#try to delete
# try to delete
if repo.tryDelete():
n_deletes += 1
del self.repos[repo.repo_id]
@ -652,7 +652,7 @@ class RepoManager(object):
t['build_tag'] for t in self.session.getBuildTargets()
if not koji.util.multi_fnmatch(t['build_tag_name'], ignore)
])
#index repos by tag
# index repos by tag
tag_repos = {}
for repo in to_list(self.repos.values()):
tag_repos.setdefault(repo.tag_id, []).append(repo)
@ -931,7 +931,7 @@ def get_options():
'repo_tasks_limit' : 10,
'delete_batch_size' : 3,
'deleted_repo_lifetime': 7*24*3600,
#XXX should really be called expired_repo_lifetime
# XXX should really be called expired_repo_lifetime
'dist_repo_lifetime': 7*24*3600,
'recent_tasks_lifetime': 600,
'sleeptime' : 15,
@ -1003,7 +1003,7 @@ if __name__ == "__main__":
sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile)
sys.exit(1)
koji.add_file_logger("koji", options.logfile)
#note we're setting logging for koji.*
# note we're setting logging for koji.*
logger = logging.getLogger("koji")
if options.debug:
logger.setLevel(logging.DEBUG)
@ -1024,7 +1024,7 @@ if __name__ == "__main__":
session.login()
elif koji.krbV and options.principal and options.keytab:
session.krb_login(options.principal, options.keytab, options.ccache)
#get an exclusive session
# get an exclusive session
try:
session.exclusiveSession(force=options.force_lock)
except koji.AuthLockError:

View file

@ -1,10 +1,10 @@
#!/bin/bash
awk '/^## INSERT kojikamid dup/ {exit} {print $0}' kojikamid.py
awk '/^# INSERT kojikamid dup #/ {exit} {print $0}' kojikamid.py
for fn in ../koji/__init__.py ../koji/daemon.py
do
awk '/^## END kojikamid dup/ {p=0} p {print $0} /^## BEGIN kojikamid dup/ {p=1}' $fn
awk '/^# END kojikamid dup #/ {p=0} p {print $0} /^# BEGIN kojikamid dup #/ {p=1}' $fn
done
awk 'p {print $0} /^## INSERT kojikamid dup/ {p=1}' kojikamid.py
awk 'p {print $0} /^# INSERT kojikamid dup #/ {p=1}' kojikamid.py

View file

@ -54,12 +54,12 @@ MANAGER_PORT = 7000
KOJIKAMID = True
## INSERT kojikamid dup
# INSERT kojikamid dup #
class fakemodule(object):
pass
#make parts of the above insert accessible as koji.X
# make parts of the above insert accessible as koji.X
koji = fakemodule()
koji.GenericError = GenericError # noqa: F821
koji.BuildError = BuildError # noqa: F821
@ -68,7 +68,7 @@ def encode_int(n):
"""If n is too large for a 32bit signed, convert it to a string"""
if n <= 2147483647:
return n
#else
# else
return str(n)
class WindowsBuild(object):

View file

@ -101,7 +101,7 @@ def get_options():
if args:
parser.error("incorrect number of arguments")
#not reached
# not reached
assert False # pragma: no cover
# load local config
@ -176,7 +176,7 @@ def get_options():
if os.path.exists(fn):
setattr(options, name, fn)
#make sure workdir exists
# make sure workdir exists
if not os.path.exists(options.workdir):
koji.ensuredir(options.workdir)
@ -198,7 +198,7 @@ def main(options, session):
tm = VMTaskManager(options, session)
tm.findHandlers(globals())
if options.plugin:
#load plugins
# load plugins
pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))
for name in options.plugin:
logger.info('Loading plugin: %s', name)
@ -1084,7 +1084,7 @@ class VMTaskManager(TaskManager):
if __name__ == "__main__":
koji.add_file_logger("koji", "/var/log/kojivmd.log")
#note we're setting logging params for all of koji*
# note we're setting logging params for all of koji*
options = get_options()
if options.debug:
logging.getLogger("koji").setLevel(logging.DEBUG)
@ -1097,7 +1097,7 @@ if __name__ == "__main__":
if options.admin_emails:
koji.add_mail_logger("koji", options.admin_emails)
#start a session and login
# start a session and login
session_opts = koji.grab_session_options(options)
session = koji.ClientSession(options.server, session_opts)
if options.cert and os.path.isfile(options.cert):
@ -1131,14 +1131,14 @@ if __name__ == "__main__":
quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1])
else:
quit("No username/password supplied and Kerberos missing or not configured")
#make session exclusive
# make session exclusive
try:
session.exclusiveSession(force=options.force_lock)
except koji.AuthLockError:
quit("Error: Unable to get lock. Trying using --force-lock")
if not session.logged_in:
quit("Error: Unknown login error")
#make sure it works
# make sure it works
try:
ret = session.echo("OK")
except requests.exceptions.ConnectionError:
@ -1148,7 +1148,7 @@ if __name__ == "__main__":
# run main
if options.daemon:
#detach
# detach
koji.daemonize()
main(options, session)
elif not options.skip_main:

View file

@ -45,7 +45,7 @@ from kojiweb.util import _genHTML, _getValidTokens, _initValues
# Convenience definition of a commonly-used sort function
_sortbyname = lambda x: x['name']
#loggers
# loggers
authlogger = logging.getLogger('koji.auth')
def _setUserCookie(environ, user):
@ -790,7 +790,7 @@ def getfile(environ, taskID, name, volume='DEFAULT', offset=None, size=None):
if size > (file_size - offset):
size = file_size - offset
#environ['koji.headers'].append(['Content-Length', str(size)])
# environ['koji.headers'].append(['Content-Length', str(size)])
return _chunk_file(server, environ, taskID, name, offset, size, volume)

View file

@ -44,7 +44,7 @@ class URLNotFound(ServerError):
class Dispatcher(object):
def __init__(self):
#we can't do much setup until we get a request
# we can't do much setup until we get a request
self.firstcall = True
self.options = {}
self.startup_error = None
@ -66,7 +66,7 @@ class Dispatcher(object):
self.logger = logging.getLogger("koji.web")
cfgmap = [
#option, type, default
# option, type, default
['SiteName', 'string', None],
['KojiHubURL', 'string', 'http://localhost/kojihub'],
['KojiFilesURL', 'string', 'http://localhost/kojifiles'],
@ -156,7 +156,7 @@ class Dispatcher(object):
def setup_logging2(self, environ):
"""Adjust logging based on configuration options"""
opts = self.options
#determine log level
# determine log level
level = opts['LogLevel']
valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
# the config value can be a single level name or a series of
@ -172,7 +172,7 @@ class Dispatcher(object):
default = level
if level not in valid_levels:
raise koji.GenericError("Invalid log level: %s" % level)
#all our loggers start with koji
# all our loggers start with koji
if name == '':
name = 'koji'
default = level
@ -187,7 +187,7 @@ class Dispatcher(object):
if opts.get('KojiDebug'):
logger.setLevel(logging.DEBUG)
elif default is None:
#LogLevel did not configure a default level
# LogLevel did not configure a default level
logger.setLevel(logging.WARNING)
self.formatter = HubFormatter(opts['LogFormat'])
self.formatter.environ = environ
@ -213,7 +213,7 @@ class Dispatcher(object):
def prep_handler(self, environ):
path_info = environ['PATH_INFO']
if not path_info:
#empty path info (no trailing slash) breaks our relative urls
# empty path info (no trailing slash) breaks our relative urls
environ['koji.redirect'] = environ['REQUEST_URI'] + '/'
raise ServerRedirect
elif path_info == '/':
@ -225,7 +225,7 @@ class Dispatcher(object):
func = self.handler_index.get(method)
if not func:
raise URLNotFound
#parse form args
# parse form args
data = {}
fs = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ.copy(), keep_blank_values=True)
for field in fs.list:
@ -245,7 +245,7 @@ class Dispatcher(object):
if not varkw:
# remove any unexpected args
data = dslice(data, args, strict=False)
#TODO (warning in header or something?)
# TODO (warning in header or something?)
return func, data
@ -318,7 +318,7 @@ class Dispatcher(object):
except (NameError, AttributeError):
tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
self.logger.error(tb_str)
#fallback to simple error page
# fallback to simple error page
return self.simple_error_page(message, err=tb_short)
values = _initValues(environ, *desc)
values['etype'] = etype

View file

@ -26,7 +26,7 @@ import hashlib
import os
import ssl
import stat
#a bunch of exception classes that explainError needs
# a bunch of exception classes that explainError needs
from socket import error as socket_error
from xml.parsers.expat import ExpatError