diff --git a/builder/kojid b/builder/kojid index 01a2411a..cf4d060d 100755 --- a/builder/kojid +++ b/builder/kojid @@ -131,7 +131,7 @@ def main(options, session): tm.findHandlers(globals()) tm.findHandlers(vars(koji.tasks)) if options.plugin: - #load plugins + # load plugins pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':')) for name in options.plugin: logger.info('Loading plugin: %s' % name) @@ -192,9 +192,9 @@ class BuildRoot(object): self._new(*args,**kwargs) def _load(self, data): - #manage an existing buildroot + # manage an existing buildroot if isinstance(data, dict): - #assume data already pulled from db + # assume data already pulled from db self.id = data['id'] else: self.id = data @@ -291,7 +291,7 @@ class BuildRoot(object): opts['tag_macros'][macro] = self.config['extra'][key] output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts) - #write config + # write config with open(configfile,'w') as fo: fo.write(output) @@ -398,7 +398,7 @@ class BuildRoot(object): """Run mock""" mockpath = getattr(self.options,"mockpath","/usr/bin/mock") cmd = [mockpath, "-r", self.mockcfg] - #if self.options.debug_mock: + # if self.options.debug_mock: # cmd.append('--debug') # TODO: should we pass something like --verbose --trace instead? if 'mock.new_chroot' in self.config['extra']: @@ -495,7 +495,7 @@ class BuildRoot(object): ts_offsets[fname] = position incremental_upload(self.session, fname, fd, uploadpath, logger=self.logger) - #clean up and return exit status of command + # clean up and return exit status of command for (fname, (fd, inode, size, fpath)) in logs.items(): if not fd: continue @@ -507,7 +507,7 @@ class BuildRoot(object): return status[1] else: - #in no case should exceptions propagate past here + # in no case should exceptions propagate past here try: self.session._forget() if workdir: @@ -524,7 +524,7 @@ class BuildRoot(object): os.setreuid(uid,uid) os.execvp(cmd[0],cmd) except: - #diediedie + # diediedie print("Failed to exec mock") print(''.join(traceback.format_exception(*sys.exc_info()))) os._exit(1) @@ -656,9 +656,9 @@ class BuildRoot(object): ts = rpm.TransactionSet() for h in ts.dbMatch(): pkg = koji.get_header_fields(h, fields) - #skip our fake packages + # skip our fake packages if pkg['name'] in ['buildsys-build', 'gpg-pubkey']: - #XXX config + # XXX config continue pkg['payloadhash'] = koji.hex_string(pkg['sigmd5']) del pkg['sigmd5'] @@ -744,9 +744,9 @@ class BuildRoot(object): external_repos = self.session.getExternalRepoList(self.repo_info['tag_id'], event=self.repo_info['create_event']) if not external_repos: - #nothing to do + # nothing to do return - #index external repos by expanded url + # index external repos by expanded url erepo_idx = {} for erepo in external_repos: # substitute $arch in the url with the arch of the repo we're generating @@ -781,7 +781,7 @@ class BuildRoot(object): pkgorigins = r.getinfo(librepo.LRR_YUM_REPOMD)['origin']['location_href'] koji.util.rmtree(tmpdir) elif yum_available: - #XXX - cheap hack to get relative paths + # XXX - cheap hack to get relative paths repomdpath = os.path.join(repodir, self.br_arch, 'repodata', 'repomd.xml') with koji.openRemoteFile(repomdpath, **opts) as fo: try: @@ -796,8 +796,8 @@ class BuildRoot(object): relpath = os.path.join(repodir, self.br_arch, pkgorigins) with koji.openRemoteFile(relpath, **opts) as fo: - #at this point we know there were external repos at the create event, - #so there should be an origins file. + # at this point we know there were external repos at the create event, + # so there should be an origins file. origin_idx = {} # don't use 'with GzipFile' as it is not supported on py2.6 fo2 = GzipFile(fileobj=fo, mode='r') @@ -807,7 +807,7 @@ class BuildRoot(object): parts=line.split(None, 2) if len(parts) < 2: continue - #first field is formated by yum as [e:]n-v-r.a + # first field is formated by yum as [e:]n-v-r.a nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0]) origin_idx[nvra] = parts[1] fo2.close() @@ -874,7 +874,7 @@ class BuildRoot(object): class ChainBuildTask(BaseTaskHandler): Methods = ['chainbuild'] - #mostly just waiting on other tasks + # mostly just waiting on other tasks _taskWeight = 0.1 def handler(self, srcs, target, opts=None): @@ -896,7 +896,7 @@ class ChainBuildTask(BaseTaskHandler): raise koji.GenericError('unknown build target: %s' % target) nvrs = [] for n_level, build_level in enumerate(srcs): - #if there are any nvrs to wait on, do so + # if there are any nvrs to wait on, do so if nvrs: task_id = self.session.host.subtask(method='waitrepo', arglist=[target_info['build_tag_name'], None, nvrs], @@ -904,7 +904,7 @@ class ChainBuildTask(BaseTaskHandler): parent=self.id) self.wait(task_id, all=True, failany=True) nvrs = [] - #kick off the builds for this level + # kick off the builds for this level build_tasks = [] for n_src, src in enumerate(build_level): if SCM.is_scm_url(src): @@ -915,11 +915,11 @@ class ChainBuildTask(BaseTaskHandler): build_tasks.append(task_id) else: nvrs.append(src) - #next pass will wait for these + # next pass will wait for these if build_tasks: - #the level could have been all nvrs + # the level could have been all nvrs self.wait(build_tasks, all=True, failany=True) - #see what builds we created in this batch so the next pass can wait for them also + # see what builds we created in this batch so the next pass can wait for them also for build_task in build_tasks: builds = self.session.listBuilds(taskID=build_task) if builds: @@ -929,7 +929,7 @@ class ChainBuildTask(BaseTaskHandler): class BuildTask(BaseTaskHandler): Methods = ['build'] - #we mostly just wait on other tasks + # we mostly just wait on other tasks _taskWeight = 0.2 def handler(self, src, target, opts=None): @@ -949,7 +949,7 @@ class BuildTask(BaseTaskHandler): self.event_id = repo_info['create_event'] else: repo_info = None - #we'll wait for a repo later (self.getRepo) + # we'll wait for a repo later (self.getRepo) self.event_id = None task_info = self.session.getTaskInfo(self.id) target_info = None @@ -959,7 +959,7 @@ class BuildTask(BaseTaskHandler): dest_tag = target_info['dest_tag'] build_tag = target_info['build_tag'] if repo_info is not None: - #make sure specified repo matches target + # make sure specified repo matches target if repo_info['tag_id'] != target_info['build_tag']: raise koji.BuildError('Repo/Target mismatch: %s/%s' \ % (repo_info['tag_name'], target_info['build_tag_name'])) @@ -970,7 +970,7 @@ class BuildTask(BaseTaskHandler): raise koji.GenericError('unknown build target: %s' % target) build_tag = repo_info['tag_id'] if target is None: - #ok, call it skip-tag for the buildroot tag + # ok, call it skip-tag for the buildroot tag self.opts['skip_tag'] = True dest_tag = build_tag else: @@ -978,7 +978,7 @@ class BuildTask(BaseTaskHandler): if not taginfo: raise koji.GenericError('neither tag nor target: %s' % target) dest_tag = taginfo['id'] - #policy checks... + # policy checks... policy_data = { 'user_id' : task_info['owner'], 'source' : src, @@ -991,7 +991,7 @@ class BuildTask(BaseTaskHandler): if not self.opts.get('skip_tag'): policy_data['tag'] = dest_tag #id if not SCM.is_scm_url(src) and not opts.get('scratch'): - #let hub policy decide + # let hub policy decide self.session.host.assertPolicy('build_from_srpm', policy_data) if opts.get('repo_id') is not None: # use of this option is governed by policy @@ -1024,11 +1024,11 @@ class BuildTask(BaseTaskHandler): % (data['name'], target_info['dest_tag_name'])) # TODO - more pre tests archlist = self.getArchList(build_tag, h, extra=extra_arches) - #let the system know about the build we're attempting + # let the system know about the build we're attempting if not self.opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported build_id = self.session.host.initBuild(data) - #(initBuild raises an exception if there is a conflict) + # (initBuild raises an exception if there is a conflict) failany = (self.opts.get('fail_fast', False) or not getattr(self.options, 'build_arch_can_fail', False)) try: @@ -1037,16 +1037,16 @@ class BuildTask(BaseTaskHandler): repo_info['id'], failany=failany) if opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported self.session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs) else: self.session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs) except (SystemExit,ServerExit,KeyboardInterrupt): - #we do not trap these + # we do not trap these raise except: if not self.opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported self.session.host.failBuild(self.id, build_id) # reraise the exception raise @@ -1067,7 +1067,7 @@ class BuildTask(BaseTaskHandler): return src else: raise koji.BuildError('Invalid source specification: %s' % src) - #XXX - other methods? + # XXX - other methods? def getSRPMFromSRPM(self, src, build_tag, repo_id): # rebuild srpm in mock, so it gets correct disttag, rpm version, etc. @@ -1085,7 +1085,7 @@ class BuildTask(BaseTaskHandler): return srpm def getSRPMFromSCM(self, url, build_tag, repo_id): - #TODO - allow different ways to get the srpm + # TODO - allow different ways to get the srpm task_id = self.session.host.subtask(method='buildSRPMFromSCM', arglist=[url, build_tag, {'repo_id': repo_id, 'scratch': self.opts.get('scratch')}], label='srpm', @@ -1100,7 +1100,7 @@ class BuildTask(BaseTaskHandler): return srpm def readSRPMHeader(self, srpm): - #srpm arg should be a path relative to /work + # srpm arg should be a path relative to /work self.logger.debug("Reading SRPM") relpath = "work/%s" % srpm opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')]) @@ -1117,7 +1117,7 @@ class BuildTask(BaseTaskHandler): buildconfig = self.session.getBuildConfig(build_tag, event=self.event_id) arches = buildconfig['arches'] if not arches: - #XXX - need to handle this better + # XXX - need to handle this better raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig) tag_archlist = [koji.canonArch(a) for a in arches.split()] self.logger.debug('arches: %s' % arches) @@ -1139,13 +1139,13 @@ class BuildTask(BaseTaskHandler): if excludearch: archlist = [ a for a in archlist if a not in excludearch ] self.logger.debug('archlist after excludearch: %r' % archlist) - #noarch is funny + # noarch is funny if 'noarch' not in excludearch and \ ( 'noarch' in buildarchs or 'noarch' in exclusivearch ): archlist.append('noarch') override = self.opts.get('arch_override') if self.opts.get('scratch') and override: - #only honor override for scratch builds + # only honor override for scratch builds self.logger.debug('arch override: %s' % override) archlist = override.split() archdict = {} @@ -1248,9 +1248,9 @@ class BuildTask(BaseTaskHandler): return srpm,rpms,brmap,logs def tagBuild(self,build_id,dest_tag): - #XXX - need options to skip tagging and to force tagging - #create the tagBuild subtask - #this will handle the "post tests" + # XXX - need options to skip tagging and to force tagging + # create the tagBuild subtask + # this will handle the "post tests" task_id = self.session.host.subtask(method='tagBuild', arglist=[dest_tag,build_id,False,None,True], label='tag', @@ -1279,7 +1279,7 @@ class BaseBuildTask(BaseTaskHandler): (self.id, self.method, ', '.join(tag_arches), ', '.join(host_arches))) return False - #otherwise... + # otherwise... # This is in principle an error condition, but this is not a good place # to fail. Instead we proceed and let the task fail normally. return True @@ -1448,7 +1448,7 @@ class BuildArchTask(BaseBuildTask): ret['brootid'] = broot.id broot.expire() - #Let TaskManager clean up + # Let TaskManager clean up return ret @@ -1525,7 +1525,7 @@ class MavenTask(MultiPlatformTask): raise except: if not self.opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported self.session.host.failBuild(self.id, self.build_id) # reraise the exception raise @@ -1988,7 +1988,7 @@ class WrapperRPMTask(BaseBuildTask): gid = grp.getgrnam('mock')[2] self.chownTree(specdir, uid, gid) - #build srpm + # build srpm self.logger.debug("Running srpm build") buildroot.build_srpm(specfile, specdir, None) @@ -2327,7 +2327,7 @@ class ChainMavenTask(MultiPlatformTask): class TagBuildTask(BaseTaskHandler): Methods = ['tagBuild'] - #XXX - set weight? + # XXX - set weight? def handler(self, tag_id, build_id, force=False, fromtag=None, ignore_success=False): task = self.session.getTaskInfo(self.id) @@ -2336,11 +2336,11 @@ class TagBuildTask(BaseTaskHandler): self.session.getBuild(build_id, strict=True) self.session.getTag(tag_id, strict=True) - #several basic sanity checks have already been run (and will be run - #again when we make the final call). Our job is to perform the more - #computationally expensive 'post' tests. + # several basic sanity checks have already been run (and will be run + # again when we make the final call). Our job is to perform the more + # computationally expensive 'post' tests. - #XXX - add more post tests + # XXX - add more post tests self.session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag) self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success) except Exception as e: @@ -2376,7 +2376,7 @@ class BuildBaseImageTask(BuildImageTask): target_info = self.session.getBuildTarget(target, strict=True) build_tag = target_info['build_tag'] repo_info = self.getRepo(build_tag) - #check requested arches against build tag + # check requested arches against build tag buildconfig = self.session.getBuildConfig(build_tag) if not buildconfig['arches']: raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig) @@ -2475,11 +2475,11 @@ class BuildBaseImageTask(BuildImageTask): results) except (SystemExit,ServerExit,KeyboardInterrupt): - #we do not trap these + # we do not trap these raise except: if not opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported if bld_info: self.session.host.failBuild(self.id, bld_info['id']) # reraise the exception @@ -2512,7 +2512,7 @@ class BuildApplianceTask(BuildImageTask): target_info = self.session.getBuildTarget(target, strict=True) build_tag = target_info['build_tag'] repo_info = self.getRepo(build_tag) - #check requested arch against build tag + # check requested arch against build tag buildconfig = self.session.getBuildConfig(build_tag) if not buildconfig['arches']: raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig) @@ -2561,11 +2561,11 @@ class BuildApplianceTask(BuildImageTask): self.session.host.moveImageBuildToScratch(self.id, results) except (SystemExit,ServerExit,KeyboardInterrupt): - #we do not trap these + # we do not trap these raise except: if not opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported if bld_info: self.session.host.failBuild(self.id, bld_info['id']) # reraise the exception @@ -2597,7 +2597,7 @@ class BuildLiveCDTask(BuildImageTask): target_info = self.session.getBuildTarget(target, strict=True) build_tag = target_info['build_tag'] repo_info = self.getRepo(build_tag) - #check requested arch against build tag + # check requested arch against build tag buildconfig = self.session.getBuildConfig(build_tag) if not buildconfig['arches']: raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig) @@ -2645,11 +2645,11 @@ class BuildLiveCDTask(BuildImageTask): self.session.host.moveImageBuildToScratch(self.id, results) except (SystemExit,ServerExit,KeyboardInterrupt): - #we do not trap these + # we do not trap these raise except: if not opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported if bld_info: self.session.host.failBuild(self.id, bld_info['id']) # reraise the exception @@ -2683,7 +2683,7 @@ class BuildLiveMediaTask(BuildImageTask): target_info = self.session.getBuildTarget(target, strict=True) build_tag = target_info['build_tag'] repo_info = self.getRepo(build_tag) - #check requested arch against build tag + # check requested arch against build tag buildconfig = self.session.getBuildConfig(build_tag) if not buildconfig['arches']: raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig) @@ -2783,11 +2783,11 @@ class BuildLiveMediaTask(BuildImageTask): self.session.host.moveImageBuildToScratch(self.id, results) except (SystemExit, ServerExit, KeyboardInterrupt): - #we do not trap these + # we do not trap these raise except: if not opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported if bld_info: self.session.host.failBuild(self.id, bld_info['id']) # reraise the exception @@ -2953,7 +2953,7 @@ class ImageTask(BaseTaskHandler): baseurl = '%s/%s' % (repopath, arch) self.logger.debug('BASEURL: %s' % baseurl) self.ks.handler.repo.repoList.append(repo_class(baseurl=baseurl, name='koji-%s-%i' % (target_info['build_tag_name'], repo_info['id']))) - #inject url if provided + # inject url if provided if opts.get('install_tree_url'): self.ks.handler.url(url=opts['install_tree_url']) @@ -3285,7 +3285,7 @@ class LiveCDTask(ImageTask): -## livemedia-creator +# livemedia-creator class LiveMediaTask(ImageTask): Methods = ['createLiveMedia'] @@ -3410,7 +3410,7 @@ class LiveMediaTask(ImageTask): '--no-virt', '--resultdir', resultdir, '--project', name, - #'--tmp', '/tmp' + # '--tmp', '/tmp' ] @@ -3508,10 +3508,10 @@ class LiveMediaTask(ImageTask): if not opts.get('scratch'): # TODO - generate list of rpms in image # (getImagePackages doesn't work here) - #hdrlist = self.getImagePackages(os.path.join(broot.rootdir(), + # hdrlist = self.getImagePackages(os.path.join(broot.rootdir(), # cachedir[1:])) imgdata ['rpmlist'] = [] - #broot.markExternalRPMs(hdrlist) + # broot.markExternalRPMs(hdrlist) broot.expire() return imgdata @@ -3666,10 +3666,10 @@ class OzImageTask(BaseTaskHandler): the way we want """ return { - #Oz specific + # Oz specific 'oz_data_dir': os.path.join(self.workdir, 'oz_data'), 'oz_screenshot_dir': os.path.join(self.workdir, 'oz_screenshots'), - #IF specific + # IF specific 'imgdir': os.path.join(self.workdir, 'scratch_images'), 'tmpdir': os.path.join(self.workdir, 'oz-tmp'), 'verbose': True, @@ -4251,7 +4251,7 @@ class BaseImageTask(OzImageTask): } # record the RPMs that were installed if not opts.get('scratch'): - #fields = ('name', 'version', 'release', 'arch', 'epoch', 'size', + # fields = ('name', 'version', 'release', 'arch', 'epoch', 'size', # 'payloadhash', 'buildtime') icicle = xml.dom.minidom.parseString(images['raw']['icicle']) self.logger.debug('ICICLE: %s' % images['raw']['icicle']) @@ -4540,7 +4540,7 @@ class BuildIndirectionImageTask(OzImageTask): bld_info, target_info, bd) except: if not opts.get('scratch'): - #scratch builds do not get imported + # scratch builds do not get imported if bld_info: self.session.host.failBuild(self.id, bld_info['id']) # reraise the exception @@ -4770,7 +4770,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask): 'repo_id': repo_id} if self.options.scm_credentials_dir is not None and os.path.isdir(self.options.scm_credentials_dir): rootopts['bind_opts'] = {'dirs' : {self.options.scm_credentials_dir : '/credentials',}} - ## Force internal_dev_setup back to true because bind_opts is used to turn it off + # Force internal_dev_setup back to true because bind_opts is used to turn it off rootopts['internal_dev_setup'] = True br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id)) broot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, **rootopts) @@ -4820,7 +4820,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask): # Run spec file sanity checks. Any failures will throw a BuildError self.spec_sanity_checks(spec_file) - #build srpm + # build srpm self.logger.debug("Running srpm build") broot.build_srpm(spec_file, sourcedir, scm.source_cmd) @@ -4841,7 +4841,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask): if srpm_name != os.path.basename(srpm): raise koji.BuildError('srpm name mismatch: %s != %s' % (srpm_name, os.path.basename(srpm))) - #upload srpm and return + # upload srpm and return self.uploadFile(srpm) brootid = broot.id @@ -4941,7 +4941,7 @@ Status: %(status)s\r server = smtplib.SMTP(self.options.smtphost) if self.options.smtp_user is not None and self.options.smtp_pass is not None: server.login(self.options.smtp_user, self.options.smtp_pass) - #server.set_debuglevel(True) + # server.set_debuglevel(True) server.sendmail(from_addr, recipients, message) server.quit() @@ -5192,9 +5192,9 @@ class NewRepoTask(BaseTaskHandler): for fn in os.listdir(path): if fn != 'groups' and os.path.isfile("%s/%s/pkglist" % (path, fn)): arches.append(fn) - #see if we can find a previous repo to update from - #only shadowbuild tags should start with SHADOWBUILD, their repos are auto - #expired. so lets get the most recent expired tag for newRepo shadowbuild tasks. + # see if we can find a previous repo to update from + # only shadowbuild tags should start with SHADOWBUILD, their repos are auto + # expired. so lets get the most recent expired tag for newRepo shadowbuild tasks. if tinfo['name'].startswith('SHADOWBUILD'): oldrepo_state = koji.REPO_EXPIRED else: @@ -5242,7 +5242,7 @@ class CreaterepoTask(BaseTaskHandler): _taskWeight = 1.5 def handler(self, repo_id, arch, oldrepo): - #arch is the arch of the repo, not the task + # arch is the arch of the repo, not the task rinfo = self.session.repoInfo(repo_id, strict=True) if rinfo['state'] != koji.REPO_INIT: raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % rinfo) @@ -5253,7 +5253,7 @@ class CreaterepoTask(BaseTaskHandler): if not os.path.isdir(self.repodir): raise koji.GenericError("Repo directory missing: %s" % self.repodir) groupdata = os.path.join(toprepodir, 'groups', 'comps.xml') - #set up our output dir + # set up our output dir self.outdir = '%s/repo' % self.workdir self.datadir = '%s/repodata' % self.outdir pkglist = os.path.join(self.repodir, 'pkglist') @@ -5286,7 +5286,7 @@ class CreaterepoTask(BaseTaskHandler): cmd.extend(['-i', pkglist]) if os.path.isfile(groupdata): cmd.extend(['-g', groupdata]) - #attempt to recycle repodata from last repo + # attempt to recycle repodata from last repo if pkglist and oldrepo and self.options.createrepo_update: # old repo could be from inherited tag, so path needs to be # composed from that tag, not rinfo['tag_name'] @@ -5459,7 +5459,7 @@ class createDistRepoTask(BaseTaskHandler): "sparc64", "s390x": "s390", "ppc64": "ppc"} def handler(self, tag, repo_id, arch, keys, opts): - #arch is the arch of the repo, not the task + # arch is the arch of the repo, not the task self.rinfo = self.session.repoInfo(repo_id, strict=True) if self.rinfo['state'] != koji.REPO_INIT: raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % self.rinfo) @@ -6047,7 +6047,7 @@ enabled=1 class WaitrepoTask(BaseTaskHandler): Methods = ['waitrepo'] - #mostly just waiting + # mostly just waiting _taskWeight = 0.2 PAUSE = 60 @@ -6101,7 +6101,7 @@ class WaitrepoTask(BaseTaskHandler): (koji.util.duration(start), taginfo['name'])) return repo else: - #no check requested -- return first ready repo + # no check requested -- return first ready repo return repo if (time.time() - start) > (self.TIMEOUT * 60.0): @@ -6140,7 +6140,7 @@ def get_options(): parser.add_option("--debug-xmlrpc", action="store_true", default=False, help="show xmlrpc debug output") parser.add_option("--debug-mock", action="store_true", default=False, - #obsolete option + # obsolete option help=SUPPRESS_HELP) parser.add_option("--skip-main", action="store_true", default=False, help="don't actually run main") @@ -6163,7 +6163,7 @@ def get_options(): if args: parser.error("incorrect number of arguments") - #not reached + # not reached assert False # pragma: no cover # load local config @@ -6256,12 +6256,12 @@ def get_options(): if getattr(options, name, None) is None: setattr(options, name, value) - #honor topdir + # honor topdir if options.topdir: koji.BASEDIR = options.topdir koji.pathinfo.topdir = options.topdir - #make sure workdir exists + # make sure workdir exists if not os.path.exists(options.workdir): koji.ensuredir(options.workdir) @@ -6308,7 +6308,7 @@ def quit(msg=None, code=1): if __name__ == "__main__": koji.add_file_logger("koji", "/var/log/kojid.log") - #note we're setting logging params for all of koji* + # note we're setting logging params for all of koji* options = get_options() if options.log_level: lvl = getattr(logging, options.log_level, None) @@ -6326,7 +6326,7 @@ if __name__ == "__main__": if options.admin_emails: koji.add_mail_logger("koji", options.admin_emails) - #start a session and login + # start a session and login session_opts = koji.grab_session_options(options) session = koji.ClientSession(options.server, session_opts) if options.cert and os.path.isfile(options.cert): @@ -6360,14 +6360,14 @@ if __name__ == "__main__": quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1]) else: quit("No username/password supplied and Kerberos missing or not configured") - #make session exclusive + # make session exclusive try: session.exclusiveSession(force=options.force_lock) except koji.AuthLockError: quit("Error: Unable to get lock. Trying using --force-lock") if not session.logged_in: quit("Error: Unknown login error") - #make sure it works + # make sure it works try: ret = session.echo("OK") except requests.exceptions.ConnectionError: @@ -6377,7 +6377,7 @@ if __name__ == "__main__": # run main if options.daemon: - #detach + # detach koji.daemonize() main(options, session) # not reached diff --git a/builder/mergerepos b/builder/mergerepos index 7628a78d..008dbf49 100755 --- a/builder/mergerepos +++ b/builder/mergerepos @@ -164,7 +164,7 @@ class RepoMerge(object): n = self.yumbase.add_enable_repo(rid, baseurls=[r]) n._merge_rank = count - #setup our sacks + # setup our sacks self.yumbase._getSacks(archlist=self.archlist) self.sort_and_filter() @@ -205,8 +205,8 @@ class RepoMerge(object): if reponum == 0 and not pkg.basepath: # this is the first repo (i.e. the koji repo) and appears # to be using relative urls - #XXX - kind of a hack, but yum leaves us little choice - #force the pkg object to report a relative location + # XXX - kind of a hack, but yum leaves us little choice + # force the pkg object to report a relative location loc = """\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True) pkg._return_remote_location = make_const_func(loc) if pkg.sourcerpm in seen_srpms: @@ -296,8 +296,8 @@ class RepoMerge(object): if reponum == 0 and not pkg.basepath: # this is the first repo (i.e. the koji repo) and appears # to be using relative urls - #XXX - kind of a hack, but yum leaves us little choice - #force the pkg object to report a relative location + # XXX - kind of a hack, but yum leaves us little choice + # force the pkg object to report a relative location loc = """\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True) pkg._return_remote_location = make_const_func(loc) diff --git a/cli/koji b/cli/koji index fb9e1051..f97e4de5 100755 --- a/cli/koji +++ b/cli/koji @@ -50,7 +50,7 @@ def register_plugin(plugin): """ for v in six.itervalues(vars(plugin)): if isinstance(v, six.class_types): - #skip classes + # skip classes continue if callable(v): if getattr(v, 'exported_cli', False): @@ -166,12 +166,12 @@ def get_options(): value = os.path.expanduser(getattr(options, name)) setattr(options, name, value) - #honor topdir + # honor topdir if options.topdir: koji.BASEDIR = options.topdir koji.pathinfo.topdir = options.topdir - #pkgurl is obsolete + # pkgurl is obsolete if options.pkgurl: if options.topurl: warn("Warning: the pkgurl option is obsolete") diff --git a/cli/koji_cli/commands.py b/cli/koji_cli/commands.py index bc7d76c1..f23c8a18 100644 --- a/cli/koji_cli/commands.py +++ b/cli/koji_cli/commands.py @@ -484,11 +484,11 @@ def handle_build(options, session, args): opts[key] = val priority = None if build_opts.background: - #relative to koji.PRIO_DEFAULT + # relative to koji.PRIO_DEFAULT priority = 5 # try to check that source is an SRPM if '://' not in source: - #treat source as an srpm and upload it + # treat source as an srpm and upload it if not build_opts.quiet: print("Uploading srpm: %s" % source) serverdir = unique_path('cli-build') @@ -546,7 +546,7 @@ def handle_chain_build(options, session, args): src_list = [] build_level = [] - #src_lists is a list of lists of sources to build. + # src_lists is a list of lists of sources to build. # each list is block of builds ("build level") which must all be completed # before the next block begins. Blocks are separated on the command line with ':' for src in sources: @@ -571,7 +571,7 @@ def handle_chain_build(options, session, args): priority = None if build_opts.background: - #relative to koji.PRIO_DEFAULT + # relative to koji.PRIO_DEFAULT priority = 5 task_id = session.chainBuild(src_list, target, priority=priority) @@ -671,7 +671,7 @@ def handle_maven_build(options, session, args): opts['skip_tag'] = True priority = None if build_opts.background: - #relative to koji.PRIO_DEFAULT + # relative to koji.PRIO_DEFAULT priority = 5 task_id = session.mavenBuild(source, target, opts, priority=priority) if not build_opts.quiet: @@ -894,7 +894,7 @@ def anon_handle_mock_config(goptions, session, args): (options, args) = parser.parse_args(args) activate_session(session, goptions) if args: - #for historical reasons, we also accept buildroot name as first arg + # for historical reasons, we also accept buildroot name as first arg if not options.name: options.name = args[0] else: @@ -1155,7 +1155,7 @@ def handle_import(goptions, session, args): if data['sourcepackage']: break else: - #no srpm included, check for build + # no srpm included, check for build binfo = session.getBuild(nvr) if not binfo: print(_("Missing build or srpm: %s") % nvr) @@ -1164,7 +1164,7 @@ def handle_import(goptions, session, args): print(_("Aborting import")) return - #local function to help us out below + # local function to help us out below def do_import(path, data): rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')]) prev = session.getRPM(rinfo) @@ -1391,13 +1391,13 @@ def _import_comps_alt(session, filename, tag, options): # no cover 3.x uservisible=bool(group.user_visible), description=group.description, langonly=group.langonly) - #yum.comps does not support the biarchonly field + # yum.comps does not support the biarchonly field for ptype, pdata in [('mandatory', group.mandatory_packages), ('default', group.default_packages), ('optional', group.optional_packages), ('conditional', group.conditional_packages)]: for pkg in pdata: - #yum.comps does not support basearchonly + # yum.comps does not support basearchonly pkgopts = {'type' : ptype} if ptype == 'conditional': pkgopts['requires'] = pdata[pkg] @@ -1407,8 +1407,8 @@ def _import_comps_alt(session, filename, tag, options): # no cover 3.x s_opts = ', '.join(["'%s': %r" % (k, pkgopts[k]) for k in sorted(pkgopts.keys())]) print(" Package: %s: {%s}" % (pkg, s_opts)) session.groupPackageListAdd(tag, group.groupid, pkg, force=force, **pkgopts) - #yum.comps does not support group dependencies - #yum.comps does not support metapkgs + # yum.comps does not support group dependencies + # yum.comps does not support metapkgs def handle_import_sig(goptions, session, args): @@ -1540,9 +1540,9 @@ def handle_prune_signed_copies(options, session, args): # 4) for a specified tag, remove all signed copies (no inheritance) # (but skip builds that are multiply tagged) - #for now, we're just implementing mode #1 - #(with the modification that we check to see if the build was latest within - #the last N days) + # for now, we're just implementing mode #1 + # (with the modification that we check to see if the build was latest within + # the last N days) if options.ignore_tag_file: with open(options.ignore_tag_file) as fo: options.ignore_tag.extend([line.strip() for line in fo.readlines()]) @@ -1579,7 +1579,7 @@ def handle_prune_signed_copies(options, session, args): print("...got %i builds" % len(builds)) builds.sort() else: - #single build + # single build binfo = session.getBuild(options.build) if not binfo: parser.error('No such build: %s' % options.build) @@ -1601,21 +1601,21 @@ def handle_prune_signed_copies(options, session, args): time_str = time.asctime(time.localtime(ts)) return "%s: %s" % (time_str, fmt % x) for nvr, binfo in builds: - #listBuilds returns slightly different data than normal + # listBuilds returns slightly different data than normal if 'id' not in binfo: binfo['id'] = binfo['build_id'] if 'name' not in binfo: binfo['name'] = binfo['package_name'] if options.debug: print("DEBUG: %s" % nvr) - #see how recently this build was latest for a tag + # see how recently this build was latest for a tag is_latest = False is_protected = False last_latest = None tags = {} for entry in session.queryHistory(build=binfo['id'])['tag_listing']: - #we used queryHistory rather than listTags so we can consider tags - #that the build was recently untagged from + # we used queryHistory rather than listTags so we can consider tags + # that the build was recently untagged from tags.setdefault(entry['tag.name'], 1) if options.debug: print("Tags: %s" % to_list(tags.keys())) @@ -1633,43 +1633,43 @@ def handle_prune_signed_copies(options, session, args): break if ignore_tag: continue - #in order to determine how recently this build was latest, we have - #to look at the tagging history. + # in order to determine how recently this build was latest, we have + # to look at the tagging history. hist = session.queryHistory(tag=tag_name, package=binfo['name'])['tag_listing'] if not hist: - #really shouldn't happen + # really shouldn't happen raise koji.GenericError("No history found for %s in %s" % (nvr, tag_name)) timeline = [] for x in hist: - #note that for revoked entries, we're effectively splitting them into - #two parts: creation and revocation. + # note that for revoked entries, we're effectively splitting them into + # two parts: creation and revocation. timeline.append((x['create_event'], 1, x)) - #at the same event, revokes happen first + # at the same event, revokes happen first if x['revoke_event'] is not None: timeline.append((x['revoke_event'], 0, x)) timeline.sort(key=lambda entry: entry[:2]) - #find most recent creation entry for our build and crop there + # find most recent creation entry for our build and crop there latest_ts = None for i in range(len(timeline)-1, -1, -1): - #searching in reverse cronological order + # searching in reverse cronological order event_id, is_create, entry = timeline[i] if entry['build_id'] == binfo['id'] and is_create: latest_ts = event_id break if not latest_ts: - #really shouldn't happen + # really shouldn't happen raise koji.GenericError("No creation event found for %s in %s" % (nvr, tag_name)) our_entry = entry if options.debug: print(_histline(event_id, our_entry)) - #now go through the events since most recent creation entry + # now go through the events since most recent creation entry timeline = timeline[i+1:] if not timeline: is_latest = True if options.debug: print("%s is latest in tag %s" % (nvr, tag_name)) break - #before we go any further, is this a protected tag? + # before we go any further, is this a protected tag? protect_tag = False for pattern in options.protect_tag: if fnmatch.fnmatch(tag_name, pattern): @@ -1680,13 +1680,13 @@ def handle_prune_signed_copies(options, session, args): # if this build was in this tag within that limit, then we will # not prune its signed copies if our_entry['revoke_event'] is None: - #we're still tagged with a protected tag + # we're still tagged with a protected tag if options.debug: print("Build %s has protected tag %s" % (nvr, tag_name)) is_protected = True break elif our_entry['revoke_ts'] > cutoff_ts: - #we were still tagged here sometime before the cutoff + # we were still tagged here sometime before the cutoff if options.debug: print("Build %s had protected tag %s until %s" \ % (nvr, tag_name, time.asctime(time.localtime(our_entry['revoke_ts'])))) @@ -1696,40 +1696,40 @@ def handle_prune_signed_copies(options, session, args): revoke_ts = None others = {} for event_id, is_create, entry in timeline: - #So two things can knock this build from the title of latest: + # So two things can knock this build from the title of latest: # - it could be untagged (entry revoked) # - another build could become latest (replaced) - #Note however that if the superceding entry is itself revoked, then - #our build could become latest again + # Note however that if the superceding entry is itself revoked, then + # our build could become latest again if options.debug: print(_histline(event_id, entry)) if entry['build_id'] == binfo['id']: if is_create: - #shouldn't happen + # shouldn't happen raise koji.GenericError("Duplicate creation event found for %s in %s" \ % (nvr, tag_name)) else: - #we've been revoked + # we've been revoked revoke_ts = entry['revoke_ts'] break else: if is_create: - #this build has become latest + # this build has become latest replaced_ts = entry['create_ts'] if entry['active']: - #this entry not revoked yet, so we're done for this tag + # this entry not revoked yet, so we're done for this tag break - #since this entry is revoked later, our build might eventually be - #uncovered, so we have to keep looking + # since this entry is revoked later, our build might eventually be + # uncovered, so we have to keep looking others[entry['build_id']] = 1 else: - #other build revoked - #see if our build has resurfaced + # other build revoked + # see if our build has resurfaced if entry['build_id'] in others: del others[entry['build_id']] if replaced_ts is not None and not others: - #we've become latest again - #(note: we're not revoked yet because that triggers a break above) + # we've become latest again + # (note: we're not revoked yet because that triggers a break above) replaced_ts = None latest_ts = entry['revoke_ts'] if last_latest is None: @@ -1738,25 +1738,25 @@ def handle_prune_signed_copies(options, session, args): timestamps = [last_latest] if revoke_ts is None: if replaced_ts is None: - #turns out we are still latest + # turns out we are still latest is_latest = True if options.debug: print("%s is latest (again) in tag %s" % (nvr, tag_name)) break else: - #replaced (but not revoked) + # replaced (but not revoked) timestamps.append(replaced_ts) if options.debug: print("tag %s: %s not latest (replaced %s)" \ % (tag_name, nvr, time.asctime(time.localtime(replaced_ts)))) elif replaced_ts is None: - #revoked but not replaced + # revoked but not replaced timestamps.append(revoke_ts) if options.debug: print("tag %s: %s not latest (revoked %s)" \ % (tag_name, nvr, time.asctime(time.localtime(revoke_ts)))) else: - #revoked AND replaced + # revoked AND replaced timestamps.append(min(revoke_ts, replaced_ts)) if options.debug: print("tag %s: %s not latest (revoked %s, replaced %s)" \ @@ -1772,13 +1772,13 @@ def handle_prune_signed_copies(options, session, args): continue if is_protected: continue - #not latest anywhere since cutoff, so we can remove all signed copies + # not latest anywhere since cutoff, so we can remove all signed copies rpms = session.listRPMs(buildID=binfo['id']) session.multicall = True for rpminfo in rpms: session.queryRPMSigs(rpm_id=rpminfo['id']) by_sig = {} - #index by sig + # index by sig for rpminfo, [sigs] in zip(rpms, session.multiCall()): for sig in sigs: sigkey = sig['sigkey'] @@ -1799,7 +1799,7 @@ def handle_prune_signed_copies(options, session, args): except OSError: continue if not stat.S_ISREG(st.st_mode): - #warn about this + # warn about this print("Skipping %s. Not a regular file" % signedpath) continue if st.st_mtime > cutoff_ts: @@ -1819,7 +1819,7 @@ def handle_prune_signed_copies(options, session, args): mycount +=1 build_files += 1 build_space += st.st_size - #XXX - this makes some layout assumptions, but + # XXX - this makes some layout assumptions, but # pathinfo doesn't report what we need mydir = os.path.dirname(signedpath) archdirs[mydir] = 1 @@ -2078,7 +2078,7 @@ def handle_list_signed(goptions, session, args): for rinfo in rpms: rpm_idx.setdefault(rinfo['id'], rinfo) tagged[rinfo['id']] = 1 - #Now figure out which sig entries actually have live copies + # Now figure out which sig entries actually have live copies for sig in sigs: rpm_id = sig['rpm_id'] sigkey = sig['sigkey'] @@ -2862,7 +2862,7 @@ def anon_handle_list_pkgs(goptions, session, args): # no limiting clauses were specified allpkgs = True opts['inherited'] = not options.noinherit - #hiding dups only makes sense if we're querying a tag + # hiding dups only makes sense if we're querying a tag if options.tag: opts['with_dups'] = options.show_dups else: @@ -3736,7 +3736,7 @@ def handle_add_target(goptions, session, args): if len(args) > 2: dest_tag = args[2] else: - #most targets have the same name as their destination + # most targets have the same name as their destination dest_tag = name activate_session(session, goptions) if not (session.hasPerm('admin') or session.hasPerm('target')): @@ -3866,7 +3866,7 @@ def anon_handle_list_targets(goptions, session, args): targets = [x[1] for x in tmp_list] for target in targets: print(fmt % target) - #pprint.pprint(session.getBuildTargets()) + # pprint.pprint(session.getBuildTargets()) def _printInheritance(tags, sibdepths=None, reverse=False): @@ -3992,7 +3992,7 @@ def anon_handle_list_tags(goptions, session, args): tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None)) tags.sort(key=lambda x: x['name']) - #if options.verbose: + # if options.verbose: # fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s" if options.show_id: fmt = "%(name)s [%(id)i]" @@ -4094,14 +4094,14 @@ def _print_histline(entry, **kwargs): if len(edit) != 1: bad_edit = "%i elements" % (len(edit)+1) other = edit[0] - #check edit for sanity + # check edit for sanity if create or not other[2]: bad_edit = "out of order" if event_id != other[0]: bad_edit = "non-matching" if bad_edit: print("Warning: unusual edit at event %i in table %s (%s)" % (event_id, table, bad_edit)) - #we'll simply treat them as separate events + # we'll simply treat them as separate events pprint.pprint(entry) pprint.pprint(edit) _print_histline(entry, **kwargs) @@ -4415,11 +4415,11 @@ def anon_handle_list_history(goptions, session, args): if x['revoke_event'] is not None: if distinguish_match(x, 'revoked'): timeline.append((x['revoke_event'], table, 0, x.copy())) - #pprint.pprint(timeline[-1]) + # pprint.pprint(timeline[-1]) if distinguish_match(x, 'created'): timeline.append((x['create_event'], table, 1, x)) timeline.sort(key=lambda entry: entry[:3]) - #group edits together + # group edits together new_timeline = [] last_event = None edit_index = {} @@ -4892,7 +4892,7 @@ def handle_edit_tag(goptions, session, args): opts['extra'] = extra if options.remove_extra: opts['remove_extra'] = options.remove_extra - #XXX change callname + # XXX change callname session.editTag2(tag, **opts) @@ -4927,7 +4927,7 @@ def handle_lock_tag(goptions, session, args): selected = [session.getTag(name, strict=True) for name in args] for tag in selected: if options.master: - #set the master lock + # set the master lock if tag['locked']: print(_("Tag %s: master lock already set") % tag['name']) continue @@ -5293,12 +5293,12 @@ def anon_handle_list_external_repos(goptions, session, args): def _pick_external_repo_priority(session, tag): """pick priority after current ones, leaving space for later insertions""" repolist = session.getTagExternalRepos(tag_info=tag) - #ordered by priority + # ordered by priority if not repolist: priority = 5 else: priority = (repolist[-1]['priority'] + 7) // 5 * 5 - #at least 3 higher than current max and a multiple of 5 + # at least 3 higher than current max and a multiple of 5 return priority @@ -5404,7 +5404,7 @@ def handle_remove_external_repo(goptions, session, args): return 0 tags = current_tags if delete: - #removing entirely + # removing entirely if current_tags and not options.force: print(_("Error: external repo %s used by tag(s): %s") % (repo, ', '.join(current_tags))) print(_("Use --force to remove anyway")) @@ -5708,10 +5708,10 @@ def _build_image_indirection(options, task_opts, session, args): if not options.quiet: print("Created task: %d" % task_id) print("Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)) - #if task_opts.wait or (task_opts.wait is None and not _running_in_bg()): + # if task_opts.wait or (task_opts.wait is None and not _running_in_bg()): # session.logout() # return watch_tasks(session, [task_id], quiet=options.quiet) - #else: + # else: # return @@ -6045,7 +6045,7 @@ def handle_win_build(options, session, args): opts[key] = val priority = None if build_opts.background: - #relative to koji.PRIO_DEFAULT + # relative to koji.PRIO_DEFAULT priority = 5 task_id = session.winBuild(vm_name, scmurl, target, opts, priority=priority) if not build_opts.quiet: @@ -6376,7 +6376,7 @@ def handle_tag_build(opts, session, args): tasks = [] for pkg in args[1:]: task_id = session.tagBuild(args[0], pkg, force=options.force) - #XXX - wait on task + # XXX - wait on task tasks.append(task_id) print("Created task %d" % task_id) if _running_in_bg() or options.nowait: @@ -6468,7 +6468,7 @@ def handle_untag_build(goptions, session, args): builds = [] for binfo in tagged: if binfo['name'] not in seen_pkg: - #latest for this package + # latest for this package if options.verbose: print(_("Leaving latest build for package %(name)s: %(nvr)s") % binfo) else: diff --git a/cli/koji_cli/lib.py b/cli/koji_cli/lib.py index f6fceba8..4d3213ca 100644 --- a/cli/koji_cli/lib.py +++ b/cli/koji_cli/lib.py @@ -72,7 +72,7 @@ def arg_filter(arg): pass if arg in ARGMAP: return ARGMAP[arg] - #handle lists/dicts? + # handle lists/dicts? return arg @@ -148,7 +148,7 @@ class TaskWatcher(object): self.level = level self.quiet = quiet - #XXX - a bunch of this stuff needs to adapt to different tasks + # XXX - a bunch of this stuff needs to adapt to different tasks def str(self): if self.info: @@ -189,7 +189,7 @@ class TaskWatcher(object): sys.exit(1) state = self.info['state'] if last: - #compare and note status changes + # compare and note status changes laststate = last['state'] if laststate != state: if not self.quiet: @@ -555,7 +555,7 @@ def activate_session(session, options): noauth = options.authtype == "noauth" or getattr(options, 'noauth', False) runas = getattr(options, 'runas', None) if noauth: - #skip authentication + # skip authentication pass elif options.authtype == "ssl" or os.path.isfile(options.cert) and options.authtype is None: # authenticate using SSL client cert @@ -626,7 +626,7 @@ def _list_tasks(options, session): tasklist = session.listTasks(callopts, qopts) tasks = dict([(x['id'], x) for x in tasklist]) - #thread the tasks + # thread the tasks for t in tasklist: if t['parent'] is not None: parent = tasks.get(t['parent']) diff --git a/hub/kojihub.py b/hub/kojihub.py index 55184a81..b9cba9ff 100644 --- a/hub/kojihub.py +++ b/hub/kojihub.py @@ -128,8 +128,8 @@ class Task(object): if host_id is None: return False task_id = self.id - #getting a row lock on this task to ensure task assignment sanity - #no other concurrent transaction should be altering this row + # getting a row lock on this task to ensure task assignment sanity + # no other concurrent transaction should be altering this row q = """SELECT state,host_id FROM task WHERE id=%(task_id)s FOR UPDATE""" r = _fetchSingle(q, locals()) if not r: @@ -153,7 +153,7 @@ class Task(object): if user_id is None: return False task_id = self.id - #getting a row lock on this task to ensure task state sanity + # getting a row lock on this task to ensure task state sanity q = """SELECT owner FROM task WHERE id=%(task_id)s FOR UPDATE""" r = _fetchSingle(q, locals()) if not r: @@ -172,8 +172,8 @@ class Task(object): info = self.getInfo(request=True) self.runCallbacks('preTaskStateChange', info, 'state', koji.TASK_STATES[newstate]) self.runCallbacks('preTaskStateChange', info, 'host_id', host_id) - #we use row-level locks to keep things sane - #note the SELECT...FOR UPDATE + # we use row-level locks to keep things sane + # note the SELECT...FOR UPDATE task_id = self.id if not force: q = """SELECT state,host_id FROM task WHERE id=%(task_id)i FOR UPDATE""" @@ -192,15 +192,15 @@ class Task(object): % (task_id)) return False elif otherhost != host_id: - #task is assigned to someone else + # task is assigned to someone else return False - #otherwise the task is assigned to host_id, so keep going + # otherwise the task is assigned to host_id, so keep going else: if otherhost is None: log_error("Error: task %i is non-free but unlocked (state %i)" % (task_id, state)) return False - #if we reach here, task is either + # if we reach here, task is either # - free and unlocked # - assigned to host_id # - force option is enabled @@ -360,8 +360,8 @@ class Task(object): _dml(update, locals()) self.runCallbacks('postTaskStateChange', info, 'state', koji.TASK_STATES['CANCELED']) self.runCallbacks('postTaskStateChange', info, 'completion_ts', now) - #cancel associated builds (only if state is 'BUILDING') - #since we check build state, we avoid loops with cancel_build on our end + # cancel associated builds (only if state is 'BUILDING') + # since we check build state, we avoid loops with cancel_build on our end b_building = koji.BUILD_STATES['BUILDING'] q = """SELECT id FROM build WHERE task_id = %(task_id)i AND state = %(b_building)i @@ -369,7 +369,7 @@ class Task(object): for (build_id,) in _fetchMulti(q, locals()): cancel_build(build_id, cancel_task=False) if recurse: - #also cancel child tasks + # also cancel child tasks self.cancelChildren() return True @@ -392,7 +392,7 @@ class Task(object): if parent is not None: if strict: raise koji.GenericError("Task %d is not top-level (parent=%d)" % (task_id, parent)) - #otherwise, find the top-level task and go from there + # otherwise, find the top-level task and go from there seen = {task_id:1} while parent is not None: if parent in seen: @@ -401,15 +401,15 @@ class Task(object): seen[task_id] = 1 parent = _singleValue(q, locals()) return Task(task_id).cancelFull(strict=True) - #We handle the recursion ourselves, since self.cancel will stop at - #canceled or closed tasks. + # We handle the recursion ourselves, since self.cancel will stop at + # canceled or closed tasks. tasklist = [task_id] seen = {} - #query for use in loop + # query for use in loop q_children = """SELECT id FROM task WHERE parent = %(task_id)i""" for task_id in tasklist: if task_id in seen: - #shouldn't happen + # shouldn't happen raise koji.GenericError("Task LOOP at task %i" % task_id) seen[task_id] = 1 Task(task_id).cancel(recurse=False) @@ -527,14 +527,14 @@ def make_task(method, arglist, **opts): pdata = dict(zip(fields, r)) if pdata['state'] != koji.TASK_STATES['OPEN']: raise koji.GenericError("Parent task (id %(parent)s) is not open" % opts) - #default to a higher priority than parent + # default to a higher priority than parent opts.setdefault('priority', pdata['priority'] - 1) for f in ('owner', 'arch'): opts.setdefault(f, pdata[f]) opts.setdefault('label', None) else: opts.setdefault('priority', koji.PRIO_DEFAULT) - #calling function should enforce priority limitations, if applicable + # calling function should enforce priority limitations, if applicable opts.setdefault('arch', 'noarch') if not context.session.logged_in: raise koji.GenericError('task must have an owner') @@ -542,7 +542,7 @@ def make_task(method, arglist, **opts): opts['owner'] = context.session.user_id opts['label'] = None opts['parent'] = None - #determine channel from policy + # determine channel from policy policy_data = {} policy_data['method'] = method for key in 'arch', 'parent', 'label', 'owner': @@ -662,7 +662,7 @@ def readGlobalInheritance(event=None): ORDER BY priority """ % (",".join(fields), eventCondition(event)) c.execute(q, locals()) - #convert list of lists into a list of dictionaries + # convert list of lists into a list of dictionaries return [dict(zip(fields, x)) for x in c.fetchall()] def readInheritanceData(tag_id, event=None): @@ -673,7 +673,7 @@ def readInheritanceData(tag_id, event=None): ORDER BY priority """ % (",".join(fields), eventCondition(event)) c.execute(q, locals()) - #convert list of lists into a list of dictionaries + # convert list of lists into a list of dictionaries data = [dict(zip(fields, x)) for x in c.fetchall()] # include the current tag_id as child_id, so we can retrace the inheritance chain later for datum in data: @@ -688,7 +688,7 @@ def readDescendantsData(tag_id, event=None): ORDER BY priority """ % (",".join(fields), eventCondition(event)) c.execute(q, locals()) - #convert list of lists into a list of dictionaries + # convert list of lists into a list of dictionaries data = [dict(zip(fields, x)) for x in c.fetchall()] return data @@ -733,7 +733,7 @@ def _writeInheritanceData(tag_id, changes, clear=False): elif not orig or clear: data[parent_id] = link else: - #not a delete request and we have a previous link to parent + # not a delete request and we have a previous link to parent for f in fields: if orig[f] != link[f]: data[parent_id] = link @@ -752,7 +752,7 @@ def _writeInheritanceData(tag_id, changes, clear=False): # nothing to do log_error("No inheritance changes") return - #check for duplicate priorities + # check for duplicate priorities pri_index = {} for link in six.itervalues(data): if link.get('delete link'): @@ -761,7 +761,7 @@ def _writeInheritanceData(tag_id, changes, clear=False): for pri, dups in six.iteritems(pri_index): if len(dups) <= 1: continue - #oops, duplicate entries for a single priority + # oops, duplicate entries for a single priority dup_ids = [link['parent_id'] for link in dups] raise koji.GenericError("Inheritance priorities must be unique (pri %s: %r )" % (pri, dup_ids)) for parent_id, link in six.iteritems(data): @@ -799,8 +799,8 @@ def readFullInheritance(tag_id, event=None, reverse=False, stops=None, jumps=Non def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdepth, maxdepth, noconfig, pfilter, reverse, jumps): if maxdepth is not None and maxdepth < 1: return - #note: maxdepth is relative to where we are, but currdepth is absolute from - #the top. + # note: maxdepth is relative to where we are, but currdepth is absolute from + # the top. currdepth += 1 top = top.copy() top[tag_id] = 1 @@ -816,11 +816,11 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept if id in jumps: id = jumps[id] if id in top: - #LOOP! + # LOOP! if event is None: # only log if the issue is current log_error("Warning: INHERITANCE LOOP detected at %s -> %s, pruning" % (tag_id, id)) - #auto prune + # auto prune continue if id in prunes: # ignore pruned tags @@ -829,16 +829,16 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept # ignore intransitive inheritance links, except at root continue if link['priority'] < 0: - #negative priority indicates pruning, rather than inheritance + # negative priority indicates pruning, rather than inheritance prunes[id] = 1 continue if reverse: - #maxdepth logic is different in this case. no propagation + # maxdepth logic is different in this case. no propagation if link['maxdepth'] is not None and link['maxdepth'] < currdepth - 1: continue nextdepth = None else: - #propagate maxdepth + # propagate maxdepth nextdepth = link['maxdepth'] if nextdepth is None: if maxdepth is not None: @@ -847,7 +847,7 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept nextdepth = min(nextdepth, maxdepth) - 1 link['nextdepth'] = nextdepth link['currdepth'] = currdepth - #propagate noconfig and pkg_filter controls + # propagate noconfig and pkg_filter controls if link['noconfig']: noconfig = True filter = list(pfilter) # copy @@ -857,10 +857,10 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept link['filter'] = filter # check history to avoid redundant entries if id in hist: - #already been there - #BUT, options may have been different + # already been there + # BUT, options may have been different rescan = True - #since rescans are possible, we might have to consider more than one previous hit + # since rescans are possible, we might have to consider more than one previous hit for previous in hist[id]: sufficient = True # is previous sufficient? # if last depth was less than current, then previous insufficient @@ -941,8 +941,8 @@ def pkglist_add(taginfo, pkginfo, owner=None, block=None, extra_arches=None, for def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force, update, policy=False): """Like pkglist_add, but without policy or access check""" - #access control comes a little later (via an assert_policy) - #should not make any changes until after policy is checked + # access control comes a little later (via an assert_policy) + # should not make any changes until after policy is checked tag = get_tag(taginfo, strict=True) tag_id = tag['id'] pkg = lookup_package(pkginfo, strict=False) @@ -959,7 +959,7 @@ def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force, if policy: context.session.assertLogin() policy_data = {'tag' : tag_id, 'action' : action, 'package' : pkginfo, 'force' : force} - #don't check policy for admins using force + # don't check policy for admins using force if not (force and context.session.hasPerm('admin')): assert_policy('package_list', policy_data) if not pkg: @@ -981,11 +981,11 @@ def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force, if previous is None: block = bool(block) if update and not force: - #if update flag is true, require that there be a previous entry + # if update flag is true, require that there be a previous entry raise koji.GenericError("cannot update: tag %s has no data for package %s" \ % (tag['name'], pkg['name'])) else: - #already there (possibly via inheritance) + # already there (possibly via inheritance) if owner is None: owner = previous['owner_id'] changed_owner = previous['owner_id'] != owner @@ -995,14 +995,14 @@ def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force, block = bool(block) if extra_arches is None: extra_arches = previous['extra_arches'] - #see if the data is the same + # see if the data is the same for key, value in (('blocked', block), ('extra_arches', extra_arches)): if previous[key] != value: changed = True break if not changed and not changed_owner and not force: - #no point in adding it again with the same data + # no point in adding it again with the same data return if previous['blocked'] and not block and not force: raise koji.GenericError("package %s is blocked in tag %s" % (pkg['name'], tag['name'])) @@ -1038,7 +1038,7 @@ def _direct_pkglist_remove(taginfo, pkginfo, force=False, policy=False): if policy: context.session.assertLogin() policy_data = {'tag' : tag['id'], 'action' : 'remove', 'package' : pkg['id'], 'force' : force} - #don't check policy for admins using force + # don't check policy for admins using force if not (force and context.session.hasPerm('admin')): assert_policy('package_list', policy_data) user = get_user(context.session.user_id) @@ -1067,7 +1067,7 @@ def pkglist_unblock(taginfo, pkginfo, force=False): pkg = lookup_package(pkginfo, strict=True) context.session.assertLogin() policy_data = {'tag' : tag['id'], 'action' : 'unblock', 'package' : pkg['id'], 'force' : force} - #don't check policy for admins using force + # don't check policy for admins using force if not (force and context.session.hasPerm('admin')): assert_policy('package_list', policy_data) user = get_user(context.session.user_id) @@ -1084,10 +1084,10 @@ def pkglist_unblock(taginfo, pkginfo, force=False): if previous['tag_id'] != tag_id: _pkglist_add(tag_id, pkg_id, previous['owner_id'], False, previous['extra_arches']) else: - #just remove the blocking entry + # just remove the blocking entry _pkglist_remove(tag_id, pkg_id) - #it's possible this was the only entry in the inheritance or that the next entry - #back is also a blocked entry. if so, we need to add it back as unblocked + # it's possible this was the only entry in the inheritance or that the next entry + # back is also a blocked entry. if so, we need to add it back as unblocked pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True) if pkg_id not in pkglist or pkglist[pkg_id]['blocked']: _pkglist_add(tag_id, pkg_id, previous['owner_id'], False, previous['extra_arches']) @@ -1174,7 +1174,7 @@ def readPackageList(tagID=None, userID=None, pkgID=None, event=None, inherit=Fal for p in _multiRow(q, locals(), [pair[1] for pair in fields]): pkgid = p['package_id'] if not with_dups and pkgid in packages: - #previous data supercedes + # previous data supercedes continue # apply package filters skip = False @@ -1279,11 +1279,11 @@ def readTaggedBuilds(tag, event=None, inherit=False, latest=False, package=None, if inherit: taglist += [link['parent_id'] for link in readFullInheritance(tag, event)] - #regardless of inherit setting, we need to use inheritance to read the - #package list + # regardless of inherit setting, we need to use inheritance to read the + # package list packages = readPackageList(tagID=tag, event=event, inherit=True, pkgID=package) - #these values are used for each iteration + # these values are used for each iteration fields = [('tag.id', 'tag_id'), ('tag.name', 'tag_name'), ('build.id', 'id'), ('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'), ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'), @@ -1344,7 +1344,7 @@ def readTaggedBuilds(tag, event=None, inherit=False, latest=False, package=None, builds = [] seen = {} # used to enforce the 'latest' option for tagid in taglist: - #log_error(koji.db._quoteparams(q,locals())) + # log_error(koji.db._quoteparams(q,locals())) for build in _multiRow(q, locals(), [pair[1] for pair in fields]): pkgid = build['package_id'] pinfo = packages.get(pkgid, None) @@ -1377,15 +1377,15 @@ def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, late """ taglist = [tag] if inherit: - #XXX really should cache this - it gets called several places + # XXX really should cache this - it gets called several places # (however, it is fairly quick) taglist += [link['parent_id'] for link in readFullInheritance(tag, event)] builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package, owner=owner, type=type) - #index builds + # index builds build_idx = dict([(b['build_id'], b) for b in builds]) - #the following query is run for each tag in the inheritance + # the following query is run for each tag in the inheritance fields = [('rpminfo.name', 'name'), ('rpminfo.version', 'version'), ('rpminfo.release', 'release'), @@ -1432,17 +1432,17 @@ def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, late def _iter_rpms(): for tagid in taglist: if tagid in tags_seen: - #certain inheritance trees can (legitimately) have the same tag - #appear more than once (perhaps once with a package filter and once - #without). The hard part of that was already done by readTaggedBuilds. - #We only need consider each tag once. Note how we use build_idx below. - #(Without this, we could report the same rpm twice) + # certain inheritance trees can (legitimately) have the same tag + # appear more than once (perhaps once with a package filter and once + # without). The hard part of that was already done by readTaggedBuilds. + # We only need consider each tag once. Note how we use build_idx below. + # (Without this, we could report the same rpm twice) continue else: tags_seen[tagid] = 1 query.values['tagid'] = tagid for rpminfo in query.iterate(): - #note: we're checking against the build list because + # note: we're checking against the build list because # it has been filtered by the package list. The tag # tools should endeavor to keep tag_listing sane w.r.t. # the package list, but if there is disagreement the package @@ -1451,7 +1451,7 @@ def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, late if build is None: continue elif build['tag_id'] != tagid: - #wrong tag + # wrong tag continue yield rpminfo return [_iter_rpms(), builds] @@ -1469,16 +1469,16 @@ def readTaggedArchives(tag, package=None, event=None, inherit=False, latest=True """ taglist = [tag] if inherit: - #XXX really should cache this - it gets called several places + # XXX really should cache this - it gets called several places # (however, it is fairly quick) taglist += [link['parent_id'] for link in readFullInheritance(tag, event)] # If type == 'maven', we require that both the build *and* the archive have Maven metadata builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package, type=type) - #index builds + # index builds build_idx = dict([(b['build_id'], b) for b in builds]) - #the following query is run for each tag in the inheritance + # the following query is run for each tag in the inheritance fields = [('archiveinfo.id', 'id'), ('archiveinfo.type_id', 'type_id'), ('archiveinfo.btype_id', 'btype_id'), @@ -1527,17 +1527,17 @@ def readTaggedArchives(tag, package=None, event=None, inherit=False, latest=True tags_seen = {} for tagid in taglist: if tagid in tags_seen: - #certain inheritance trees can (legitimately) have the same tag - #appear more than once (perhaps once with a package filter and once - #without). The hard part of that was already done by readTaggedBuilds. - #We only need consider each tag once. Note how we use build_idx below. - #(Without this, we could report the same rpm twice) + # certain inheritance trees can (legitimately) have the same tag + # appear more than once (perhaps once with a package filter and once + # without). The hard part of that was already done by readTaggedBuilds. + # We only need consider each tag once. Note how we use build_idx below. + # (Without this, we could report the same rpm twice) continue else: tags_seen[tagid] = 1 query.values = {'tagid': tagid, 'package': package} for archiveinfo in query.execute(): - #note: we're checking against the build list because + # note: we're checking against the build list because # it has been filtered by the package list. The tag # tools should endeavor to keep tag_listing sane w.r.t. # the package list, but if there is disagreement the package @@ -1546,7 +1546,7 @@ def readTaggedArchives(tag, package=None, event=None, inherit=False, latest=True if build is None: continue elif build['tag_id'] != tagid: - #wrong tag + # wrong tag continue archives.append(archiveinfo) return [archives, builds] @@ -1600,7 +1600,7 @@ def _tag_build(tag, build, user_id=None, force=False): else: # use the user associated with the current session user = get_user(context.session.user_id, strict=True) - #access check + # access check assert_tag_access(tag['id'], user_id=user_id, force=force) return _direct_tag_build(tag, build, user, force) @@ -1623,19 +1623,19 @@ def _direct_tag_build(tag, build, user, force=False): query = QueryProcessor(columns=['build_id'], tables=[table], clauses=('active = TRUE',)+clauses, values=locals(), opts={'rowlock':True}) - #note: tag_listing is unique on (build_id, tag_id, active) + # note: tag_listing is unique on (build_id, tag_id, active) if query.executeOne(): - #already tagged + # already tagged if not force: raise koji.TagError("build %s already tagged (%s)" % (nvr, tag['name'])) - #otherwise we retag + # otherwise we retag retag = True if retag: - #revoke the old tag first + # revoke the old tag first update = UpdateProcessor(table, values=locals(), clauses=clauses) update.make_revoke(user_id=user_id) update.execute() - #tag the package + # tag the package insert = InsertProcessor(table) insert.set(tag_id=tag_id, build_id=build_id) insert.make_create(user_id=user_id) @@ -1687,7 +1687,7 @@ def _direct_untag_build(tag, build, user, strict=True, force=False): def grplist_add(taginfo, grpinfo, block=False, force=False, **opts): """Add to (or update) group list for tag""" - #only admins.... + # only admins.... context.session.assertPerm('tag') _grplist_add(taginfo, grpinfo, block, force, **opts) @@ -1702,13 +1702,13 @@ def _grplist_add(taginfo, grpinfo, block, force, **opts): previous = groups.get(group['id'], None) cfg_fields = ('exported', 'display_name', 'is_default', 'uservisible', 'description', 'langonly', 'biarchonly', 'blocked') - #prevent user-provided opts from doing anything strange + # prevent user-provided opts from doing anything strange opts = dslice(opts, cfg_fields, strict=False) if previous is not None: - #already there (possibly via inheritance) + # already there (possibly via inheritance) if previous['blocked'] and not force: raise koji.GenericError("group %s is blocked in tag %s" % (group['name'], tag['name'])) - #check for duplication and grab old data for defaults + # check for duplication and grab old data for defaults changed = False for field in cfg_fields: old = previous[field] @@ -1718,9 +1718,9 @@ def _grplist_add(taginfo, grpinfo, block, force, **opts): else: opts[field] = old if not changed: - #no point in adding it again with the same data + # no point in adding it again with the same data return - #provide available defaults and sanity check data + # provide available defaults and sanity check data opts.setdefault('display_name', group['name']) opts.setdefault('biarchonly', False) opts.setdefault('exported', True) @@ -1728,12 +1728,12 @@ def _grplist_add(taginfo, grpinfo, block, force, **opts): # XXX ^^^ opts['tag_id'] = tag['id'] opts['group_id'] = group['id'] - #revoke old entry (if present) + # revoke old entry (if present) update = UpdateProcessor('group_config', values=opts, clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s']) update.make_revoke() update.execute() - #add new entry + # add new entry insert = InsertProcessor('group_config', data=opts) insert.make_create() insert.execute() @@ -1755,7 +1755,7 @@ def grplist_remove(taginfo, grpinfo, force=False): Really this shouldn't be used except in special cases Most of the time you really want to use the block or unblock functions """ - #only admins.... + # only admins.... context.session.assertPerm('tag') _grplist_remove(taginfo, grpinfo, force) @@ -1826,7 +1826,7 @@ def _grplist_unblock(taginfo, grpinfo): def grp_pkg_add(taginfo, grpinfo, pkg_name, block=False, force=False, **opts): """Add package to group for tag""" - #only admins.... + # only admins.... context.session.assertPerm('tag') _grp_pkg_add(taginfo, grpinfo, pkg_name, block, force, **opts) @@ -1845,14 +1845,14 @@ def _grp_pkg_add(taginfo, grpinfo, pkg_name, block, force, **opts): raise koji.GenericError("group %s is blocked in tag %s" % (group['name'], tag['name'])) previous = grp_cfg['packagelist'].get(pkg_name, None) cfg_fields = ('type', 'basearchonly', 'requires') - #prevent user-provided opts from doing anything strange + # prevent user-provided opts from doing anything strange opts = dslice(opts, cfg_fields, strict=False) if previous is not None: - #already there (possibly via inheritance) + # already there (possibly via inheritance) if previous['blocked'] and not force: raise koji.GenericError("package %s blocked in group %s, tag %s" \ % (pkg_name, group['name'], tag['name'])) - #check for duplication and grab old data for defaults + # check for duplication and grab old data for defaults changed = False for field in cfg_fields: old = previous[field] @@ -1862,23 +1862,23 @@ def _grp_pkg_add(taginfo, grpinfo, pkg_name, block, force, **opts): else: opts[field] = old if block: - #from condition above, either previous is not blocked or force is on, - #either way, we should add the entry + # from condition above, either previous is not blocked or force is on, + # either way, we should add the entry changed = True if not changed and not force: - #no point in adding it again with the same data (unless force is on) + # no point in adding it again with the same data (unless force is on) return opts.setdefault('type', 'mandatory') opts['group_id'] = group['id'] opts['tag_id'] = tag['id'] opts['package'] = pkg_name opts['blocked'] = block - #revoke old entry (if present) + # revoke old entry (if present) update = UpdateProcessor('group_package_listing', values=opts, clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s', 'package=%(package)s']) update.make_revoke() update.execute() - #add new entry + # add new entry insert = InsertProcessor('group_package_listing', data=opts) insert.make_create() insert.execute() @@ -1890,7 +1890,7 @@ def grp_pkg_remove(taginfo, grpinfo, pkg_name, force=False): Really this shouldn't be used except in special cases Most of the time you really want to use the block or unblock functions """ - #only admins.... + # only admins.... context.session.assertPerm('tag') _grp_pkg_remove(taginfo, grpinfo, pkg_name, force) @@ -1949,7 +1949,7 @@ def _grp_pkg_unblock(taginfo, grpinfo, pkg_name): def grp_req_add(taginfo, grpinfo, reqinfo, block=False, force=False, **opts): """Add group requirement to group for tag""" - #only admins.... + # only admins.... context.session.assertPerm('tag') _grp_req_add(taginfo, grpinfo, reqinfo, block, force, **opts) @@ -1969,14 +1969,14 @@ def _grp_req_add(taginfo, grpinfo, reqinfo, block, force, **opts): raise koji.GenericError("group %s is blocked in tag %s" % (group['name'], tag['name'])) previous = grp_cfg['grouplist'].get(req['id'], None) cfg_fields = ('type', 'is_metapkg') - #prevent user-provided opts from doing anything strange + # prevent user-provided opts from doing anything strange opts = dslice(opts, cfg_fields, strict=False) if previous is not None: - #already there (possibly via inheritance) + # already there (possibly via inheritance) if previous['blocked'] and not force: raise koji.GenericError("requirement on group %s blocked in group %s, tag %s" \ % (req['name'], group['name'], tag['name'])) - #check for duplication and grab old data for defaults + # check for duplication and grab old data for defaults changed = False for field in cfg_fields: old = previous[field] @@ -1986,23 +1986,23 @@ def _grp_req_add(taginfo, grpinfo, reqinfo, block, force, **opts): else: opts[field] = old if block: - #from condition above, either previous is not blocked or force is on, - #either way, we should add the entry + # from condition above, either previous is not blocked or force is on, + # either way, we should add the entry changed = True if not changed: - #no point in adding it again with the same data + # no point in adding it again with the same data return opts.setdefault('type', 'mandatory') opts['group_id'] = group['id'] opts['tag_id'] = tag['id'] opts['req_id'] = req['id'] opts['blocked'] = block - #revoke old entry (if present) + # revoke old entry (if present) update = UpdateProcessor('group_req_listing', values=opts, clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s', 'req_id=%(req_id)s']) update.make_revoke() update.execute() - #add new entry + # add new entry insert = InsertProcessor('group_req_listing', data=opts) insert.make_create() insert.execute() @@ -2014,7 +2014,7 @@ def grp_req_remove(taginfo, grpinfo, reqinfo, force=False): Really this shouldn't be used except in special cases Most of the time you really want to use the block or unblock functions """ - #only admins.... + # only admins.... context.session.assertPerm('tag') _grp_req_remove(taginfo, grpinfo, reqinfo, force) @@ -2111,11 +2111,11 @@ def get_tag_groups(tag, event=None, inherit=True, incl_pkgs=True, incl_reqs=True for grp_pkg in _multiRow(q, locals(), fields): grp_id = grp_pkg['group_id'] if grp_id not in groups: - #tag does not have this group + # tag does not have this group continue group = groups[grp_id] if group['blocked']: - #ignore blocked groups + # ignore blocked groups continue pkg_name = grp_pkg['package'] group['packagelist'].setdefault(pkg_name, grp_pkg) @@ -2132,18 +2132,18 @@ def get_tag_groups(tag, event=None, inherit=True, incl_pkgs=True, incl_reqs=True for grp_req in _multiRow(q, locals(), fields): grp_id = grp_req['group_id'] if grp_id not in groups: - #tag does not have this group + # tag does not have this group continue group = groups[grp_id] if group['blocked']: - #ignore blocked groups + # ignore blocked groups continue req_id = grp_req['req_id'] if req_id not in groups: - #tag does not have this group + # tag does not have this group continue elif groups[req_id]['blocked']: - #ignore blocked groups + # ignore blocked groups continue group['grouplist'].setdefault(req_id, grp_req) @@ -2159,7 +2159,7 @@ def readTagGroups(tag, event=None, inherit=True, incl_pkgs=True, incl_reqs=True, groups = get_tag_groups(tag, event, inherit, incl_pkgs, incl_reqs) groups = to_list(groups.values()) for group in groups: - #filter blocked entries and collapse to a list + # filter blocked entries and collapse to a list if 'packagelist' in group: if incl_blocked: group['packagelist'] = to_list(group['packagelist'].values()) @@ -2170,7 +2170,7 @@ def readTagGroups(tag, event=None, inherit=True, incl_pkgs=True, incl_reqs=True, group['grouplist'] = to_list(group['grouplist'].values()) else: group['grouplist'] = [x for x in group['grouplist'].values() if not x['blocked']] - #filter blocked entries and collapse to a list + # filter blocked entries and collapse to a list if incl_blocked: return groups else: @@ -2267,7 +2267,7 @@ def remove_channel(channel_name, force=False): # check for task references query = QueryProcessor(tables=['task'], clauses=['channel_id=%(channel_id)i'], values=locals(), columns=['id'], opts={'limit':1}) - #XXX slow query + # XXX slow query if query.execute(): raise koji.GenericError('channel %s has task references' % channel_name) query = QueryProcessor(tables=['host_channels'], clauses=['channel_id=%(channel_id)i'], @@ -2316,8 +2316,8 @@ def get_all_arches(): if arches is None: continue for arch in arches.split(): - #in a perfect world, this list would only include canonical - #arches, but not all admins will undertand that. + # in a perfect world, this list would only include canonical + # arches, but not all admins will undertand that. ret[koji.canonArch(arch)] = 1 return to_list(ret.keys()) @@ -2471,7 +2471,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa if event is None: event_id = _singleValue("SELECT get_event()") else: - #make sure event is valid + # make sure event is valid q = "SELECT time FROM events WHERE id=%(event)s" event_time = _singleValue(q, locals(), strict=True) event_id = event @@ -2490,7 +2490,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa repodir = koji.pathinfo.repo(repo_id, tinfo['name']) os.makedirs(repodir) #should not already exist - #generate comps and groups.spec + # generate comps and groups.spec groupsdir = "%s/groups" % (repodir) koji.ensuredir(groupsdir) comps = koji.generate_comps(groups, expand_groups=True) @@ -2510,13 +2510,13 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa with open('%s/repo.json' % repodir, 'w') as fp: json.dump(repo_info, fp, indent=2) - #get build dirs + # get build dirs relpathinfo = koji.PathInfo(topdir='toplink') builddirs = {} for build in builds: relpath = relpathinfo.build(build) builddirs[build['id']] = relpath.lstrip('/') - #generate pkglist files + # generate pkglist files pkglist = {} for repoarch in repo_arches: archdir = joinpath(repodir, repoarch) @@ -2526,7 +2526,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa top_link = joinpath(archdir, 'toplink') os.symlink(top_relpath, top_link) pkglist[repoarch] = open(joinpath(archdir, 'pkglist'), 'w') - #NOTE - rpms is now an iterator + # NOTE - rpms is now an iterator for rpminfo in rpms: if not with_debuginfo and koji.is_debuginfo(rpminfo['name']): continue @@ -2552,7 +2552,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa for repoarch in repo_arches: pkglist[repoarch].close() - #write blocked package lists + # write blocked package lists for repoarch in repo_arches: blocklist = open(joinpath(repodir, repoarch, 'blocklist'), 'w') for pkg in blocks: @@ -2730,7 +2730,7 @@ def repo_delete(repo_id): """Attempt to mark repo deleted, return number of references If the number of references is nonzero, no change is made""" - #get a row lock on the repo + # get a row lock on the repo q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE""" _singleValue(q, locals()) references = repo_references(repo_id) @@ -2768,7 +2768,7 @@ def repo_references(repo_id): clauses = ['repo_id=%(repo_id)s', 'retire_event IS NULL'] query = QueryProcessor(columns=fields, aliases=aliases, tables=['standard_buildroot'], clauses=clauses, values=values) - #check results for bad states + # check results for bad states ret = [] for data in query.execute(): if data['state'] == koji.BR_STATES['EXPIRED']: @@ -2813,14 +2813,14 @@ def tag_changed_since_event(event, taglist): Returns: True or False """ data = locals().copy() - #first check the tag_updates table + # first check the tag_updates table clauses = ['update_event > %(event)i', 'tag_id IN %(taglist)s'] query = QueryProcessor(tables=['tag_updates'], columns=['id'], clauses=clauses, values=data, opts={'limit': 1}) if query.execute(): return True - #also check these versioned tables + # also check these versioned tables tables = ( 'tag_listing', 'tag_inheritance', @@ -2890,8 +2890,8 @@ def _create_build_target(name, build_tag, dest_tag): raise koji.GenericError("destination tag '%s' does not exist" % dest_tag) dest_tag = dest_tag_object['id'] - #build targets are versioned, so if the target has previously been deleted, it - #is possible the name is in the system + # build targets are versioned, so if the target has previously been deleted, it + # is possible the name is in the system id = get_build_target_id(name, create=True) insert = InsertProcessor('build_target_config') @@ -3137,7 +3137,7 @@ def _create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_s if not context.opts.get('EnableMaven') and (maven_support or maven_include_all): raise koji.GenericError("Maven support not enabled") - #see if there is already a tag by this name (active) + # see if there is already a tag by this name (active) if get_tag(name): raise koji.GenericError("A tag with the name '%s' already exists" % name) @@ -3150,7 +3150,7 @@ def _create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_s else: parent_id = None - #there may already be an id for a deleted tag, this will reuse it + # there may already be an id for a deleted tag, this will reuse it tag_id = get_tag_id(name, create=True) insert = InsertProcessor('tag_config') @@ -3296,8 +3296,8 @@ def _edit_tag(tagInfo, **kwargs): name = kwargs.get('name') if name and tag['name'] != name: - #attempt to update tag name - #XXX - I'm not sure we should allow this sort of renaming anyway. + # attempt to update tag name + # XXX - I'm not sure we should allow this sort of renaming anyway. # while I can see the convenience, it is an untracked change (granted # a cosmetic one). The more versioning-friendly way would be to create # a new tag with duplicate data and revoke the old tag. This is more @@ -3309,7 +3309,7 @@ def _edit_tag(tagInfo, **kwargs): q = """SELECT id FROM tag WHERE name=%(name)s""" id = _singleValue(q, values, strict=False) if id is not None: - #new name is taken + # new name is taken raise koji.GenericError("Name %s already taken by tag %s" % (name, id)) update = """UPDATE tag SET name = %(name)s @@ -3321,7 +3321,7 @@ WHERE id = %(tagID)i""" if arches and tag['arches'] != arches: kwargs['arches'] = koji.parse_arches(arches, strict=True, allow_none=True) - #check for changes + # check for changes data = tag.copy() changed = False for key in ('perm_id', 'arches', 'locked', 'maven_support', 'maven_include_all'): @@ -3394,7 +3394,7 @@ def delete_tag(tagInfo): def _delete_tag(tagInfo): """Delete the specified tag.""" - #We do not ever DELETE tag data. It is versioned -- we revoke it instead. + # We do not ever DELETE tag data. It is versioned -- we revoke it instead. def _tagDelete(tableName, value, columnName='tag_id'): update = UpdateProcessor(tableName, clauses=["%s = %%(value)i" % columnName], @@ -3406,8 +3406,8 @@ def _delete_tag(tagInfo): tagID = tag['id'] _tagDelete('tag_config', tagID) - #technically, to 'delete' the tag we only have to revoke the tag_config entry - #these remaining revocations are more for cleanup. + # technically, to 'delete' the tag we only have to revoke the tag_config entry + # these remaining revocations are more for cleanup. _tagDelete('tag_extra', tagID) _tagDelete('tag_inheritance', tagID) _tagDelete('tag_inheritance', tagID, 'parent_id') @@ -3890,10 +3890,10 @@ def find_build_id(X, strict=False): AND build.release=%(release)s """ # contraints should ensure this is unique - #log_error(koji.db._quoteparams(q,data)) + # log_error(koji.db._quoteparams(q,data)) c.execute(q, data) r = c.fetchone() - #log_error("%r" % r ) + # log_error("%r" % r ) if not r: if strict: raise koji.GenericError('No matching build found: %r' % X) @@ -4060,7 +4060,7 @@ def _fix_rpm_row(row): row['extra'] = parse_json(row['extra'], desc='rpm extra') return row -#alias for now, may change in the future +# alias for now, may change in the future _fix_archive_row = _fix_rpm_row @@ -4138,7 +4138,7 @@ def get_rpm(rpminfo, strict=False, multi=False): data['external_repo_id'] = get_external_repo_id(data['location'], strict=True) clauses.append("""external_repo_id = %(external_repo_id)i""") elif not multi: - #try to match internal first, otherwise first matching external + # try to match internal first, otherwise first matching external retry = True #if no internal match orig_clauses = list(clauses) #copy clauses.append("""external_repo_id = 0""") @@ -4154,7 +4154,7 @@ def get_rpm(rpminfo, strict=False, multi=False): if ret: return ret if retry: - #at this point we have just an NVRA with no internal match. Open it up to externals + # at this point we have just an NVRA with no internal match. Open it up to externals query.clauses = orig_clauses ret = query.executeOne() if not ret: @@ -4339,7 +4339,7 @@ def get_build_type(buildInfo, strict=False): for (btype,) in query.execute(): ret[btype] = extra.get('typeinfo', {}).get(btype) - #deal with legacy types + # deal with legacy types l_funcs = [['maven', get_maven_build], ['win', get_win_build], ['image', get_image_build]] for ltype, func in l_funcs: @@ -4928,7 +4928,7 @@ def _singleRow(query, values, fields, strict=False): if row: return dict(zip(fields, row)) else: - #strict enforced by _fetchSingle + # strict enforced by _fetchSingle return None def _singleValue(query, values=None, strict=True): @@ -5157,7 +5157,7 @@ def get_buildroot(buildrootID, strict=False): else: return None if len(result) > 1: - #this should be impossible + # this should be impossible raise koji.GenericError("More that one buildroot with id: %i" % buildrootID) return result[0] @@ -5247,7 +5247,7 @@ def _set_build_volume(binfo, volinfo, strict=True): if strict: raise koji.GenericError("Build %(nvr)s already on volume %(volume_name)s" % binfo) else: - #nothing to do + # nothing to do return state = koji.BUILD_STATES[binfo['state']] if state not in ['COMPLETE', 'DELETED']: @@ -5256,7 +5256,7 @@ def _set_build_volume(binfo, volinfo, strict=True): if not os.path.isdir(voldir): raise koji.GenericError("Directory entry missing for volume %(name)s" % volinfo) - #more sanity checks + # more sanity checks for check_vol in list_volumes(): check_binfo = binfo.copy() check_binfo['volume_id'] = check_vol['id'] @@ -5304,7 +5304,7 @@ def _set_build_volume(binfo, volinfo, strict=True): for olddir, newdir in dir_moves: koji.util.rmtree(olddir) - #Fourth, maintain a symlink if appropriate + # Fourth, maintain a symlink if appropriate if volinfo['name'] and volinfo['name'] != 'DEFAULT': base_vol = lookup_name('volume', 'DEFAULT', strict=True) base_binfo = binfo.copy() @@ -5431,7 +5431,7 @@ def new_build(data, strict=False): if 'pkg_id' in data: data['name'] = lookup_package(data['pkg_id'], strict=True)['name'] else: - #see if there's a package name + # see if there's a package name name = data.get('name') if not name: raise koji.GenericError("No name or package id provided for build") @@ -5450,7 +5450,7 @@ def new_build(data, strict=False): else: data['extra'] = None - #provide a few default values + # provide a few default values data.setdefault('state', koji.BUILD_STATES['COMPLETE']) data.setdefault('start_time', 'NOW') data.setdefault('completion_time', 'NOW') @@ -5459,7 +5459,7 @@ def new_build(data, strict=False): data.setdefault('task_id', None) data.setdefault('volume_id', 0) - #check for existing build + # check for existing build old_binfo = get_build(data) if old_binfo: if strict: @@ -5469,7 +5469,7 @@ def new_build(data, strict=False): return old_binfo['id'] koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=None, new=data['state'], info=data) - #insert the new data + # insert the new data insert_data = dslice(data, ['pkg_id', 'version', 'release', 'epoch', 'state', 'volume_id', 'task_id', 'owner', 'start_time', 'completion_time', 'source', 'extra']) if 'cg_id' in data: @@ -5479,7 +5479,7 @@ def new_build(data, strict=False): insert.execute() new_binfo = get_build(data['id'], strict=True) koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=None, new=data['state'], info=new_binfo) - #return build_id + # return build_id return data['id'] @@ -5490,7 +5490,7 @@ def recycle_build(old, data): if st_desc == 'BUILDING': # check to see if this is the controlling task if data['state'] == old['state'] and data.get('task_id', '') == old['task_id']: - #the controlling task must have restarted (and called initBuild again) + # the controlling task must have restarted (and called initBuild again) return raise koji.GenericError("Build already in progress (task %(task_id)d)" % old) @@ -5606,7 +5606,7 @@ def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None) koji.plugin.run_callbacks('preImport', type='build', srpm=srpm, rpms=rpms, brmap=brmap, task_id=task_id, build_id=build_id, build=None, logs=logs) uploadpath = koji.pathinfo.work() - #verify files exist + # verify files exist for relpath in [srpm] + rpms: fn = "%s/%s" % (uploadpath, relpath) if not os.path.exists(fn): @@ -5614,13 +5614,13 @@ def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None) rpms = check_noarch_rpms(uploadpath, rpms, logs=logs) - #verify buildroot ids from brmap + # verify buildroot ids from brmap found = {} for br_id in brmap.values(): if br_id in found: continue found[br_id] = 1 - #this will raise an exception if the buildroot id is invalid + # this will raise an exception if the buildroot id is invalid BuildRoot(br_id) # get build informaton @@ -5648,7 +5648,7 @@ def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None) binfo = get_build(build_id, strict=True) new_typed_build(binfo, 'rpm') else: - #build_id was passed in - sanity check + # build_id was passed in - sanity check binfo = get_build(build_id, strict=True) st_complete = koji.BUILD_STATES['COMPLETE'] st_old = binfo['state'] @@ -5659,7 +5659,7 @@ def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None) if binfo['state'] != koji.BUILD_STATES['BUILDING']: raise koji.GenericError("Unable to complete build: state is %s" \ % koji.BUILD_STATES[binfo['state']]) - #update build state + # update build state update = UpdateProcessor('build', clauses=['id=%(id)s'], values=binfo) update.set(state=st_complete) update.rawset(completion_time='NOW()') @@ -5697,21 +5697,21 @@ def import_rpm(fn, buildinfo=None, brootid=None, wrapper=False, fileinfo=None): if not os.path.exists(fn): raise koji.GenericError("no such file: %s" % fn) - #read rpm info + # read rpm info hdr = koji.get_rpm_header(fn) rpminfo = koji.get_header_fields(hdr, ['name', 'version', 'release', 'epoch', 'sourcepackage', 'arch', 'buildtime', 'sourcerpm']) if rpminfo['sourcepackage'] == 1: rpminfo['arch'] = "src" - #sanity check basename + # sanity check basename basename = os.path.basename(fn) expected = "%(name)s-%(version)s-%(release)s.%(arch)s.rpm" % rpminfo if basename != expected: raise koji.GenericError("bad filename: %s (expected %s)" % (basename, expected)) if buildinfo is None: - #figure it out for ourselves + # figure it out for ourselves if rpminfo['sourcepackage'] == 1: buildinfo = get_build(rpminfo, strict=False) if not buildinfo: @@ -5720,10 +5720,10 @@ def import_rpm(fn, buildinfo=None, brootid=None, wrapper=False, fileinfo=None): # we add the rpm build type below buildinfo = get_build(build_id, strict=True) else: - #figure it out from sourcerpm string + # figure it out from sourcerpm string buildinfo = get_build(koji.parse_NVRA(rpminfo['sourcerpm'])) if buildinfo is None: - #XXX - handle case where package is not a source rpm + # XXX - handle case where package is not a source rpm # and we still need to create a new build raise koji.GenericError('No matching build') state = koji.BUILD_STATES[buildinfo['state']] @@ -5733,8 +5733,8 @@ def import_rpm(fn, buildinfo=None, brootid=None, wrapper=False, fileinfo=None): elif not wrapper: # only enforce the srpm name matching the build for non-wrapper rpms srpmname = "%(name)s-%(version)s-%(release)s.src.rpm" % buildinfo - #either the sourcerpm field should match the build, or the filename - #itself (for the srpm) + # either the sourcerpm field should match the build, or the filename + # itself (for the srpm) if rpminfo['sourcepackage'] != 1: if rpminfo['sourcerpm'] != srpmname: raise koji.GenericError("srpm mismatch for %s: %s (expected %s)" \ @@ -5747,7 +5747,7 @@ def import_rpm(fn, buildinfo=None, brootid=None, wrapper=False, fileinfo=None): # harmless if build already has this type new_typed_build(buildinfo, 'rpm') - #add rpminfo entry + # add rpminfo entry rpminfo['id'] = _singleValue("""SELECT nextval('rpminfo_id_seq')""") rpminfo['build_id'] = buildinfo['id'] rpminfo['size'] = os.path.getsize(fn) @@ -5773,7 +5773,7 @@ def import_rpm(fn, buildinfo=None, brootid=None, wrapper=False, fileinfo=None): koji.plugin.run_callbacks('postImport', type='rpm', rpm=rpminfo, build=buildinfo, filepath=fn, fileinfo=fileinfo) - #extra fields for return + # extra fields for return rpminfo['build'] = buildinfo rpminfo['brootid'] = brootid return rpminfo @@ -5967,7 +5967,7 @@ class CG_Importer(object): raise koji.GenericError("Invalid metadata, cannot encode: %r" % metadata) return metadata if metadata is None: - #default to looking for uploaded file + # default to looking for uploaded file metadata = 'metadata.json' if not isinstance(metadata, six.string_types): raise koji.GenericError("Invalid metadata value: %r" % metadata) @@ -6238,7 +6238,7 @@ class CG_Importer(object): br = BuildRoot() br.cg_new(entry['brinfo']) - #buildroot components + # buildroot components br.setList(entry['rpmlist']) br.updateArchiveList(entry['archives']) @@ -6284,8 +6284,8 @@ class CG_Importer(object): if rinfo['payloadhash'] != comp['sigmd5']: # XXX - this is a temporary workaround until we can better track external refs logger.warning("IGNORING rpm component (md5 mismatch): %r", comp) - #nvr = "%(name)s-%(version)s-%(release)s" % rinfo - #raise koji.GenericError("md5sum mismatch for %s: %s != %s" + # nvr = "%(name)s-%(version)s-%(release)s" % rinfo + # raise koji.GenericError("md5sum mismatch for %s: %s != %s" # % (nvr, comp['sigmd5'], rinfo['payloadhash'])) # TODO - should we check the signature field? return rinfo @@ -6305,7 +6305,7 @@ class CG_Importer(object): continue if archive['checksum'] == comp['checksum']: return archive - #else + # else logger.error("Failed to match archive %(filename)s (size %(filesize)s, sum %(checksum)s", comp) if type_mismatches: logger.error("Match failed with %i type mismatches", type_mismatches) @@ -6313,7 +6313,7 @@ class CG_Importer(object): # XXX - this is a temporary workaround until we can better track external refs logger.warning("IGNORING unmatched archive: %r", comp) return None - #raise koji.GenericError("No match: %(filename)s (size %(filesize)s, sum %(checksum)s" % comp) + # raise koji.GenericError("No match: %(filename)s (size %(filesize)s, sum %(checksum)s" % comp) def match_kojifile(self, comp): """Look up the file by archive id and sanity check the other data""" @@ -6520,7 +6520,7 @@ def add_external_rpm(rpminfo, external_repo, strict=True): if field not in rpminfo: raise koji.GenericError("%s field missing: %r" % (field, rpminfo)) if not isinstance(rpminfo[field], allowed): - #this will catch unwanted NULLs + # this will catch unwanted NULLs raise koji.GenericError("Invalid value for %s: %r" % (field, rpminfo[field])) # strip extra fields rpminfo = dslice(rpminfo, [x[0] for x in dtypes]) @@ -6762,7 +6762,7 @@ def get_archive_type(filename=None, type_name=None, type_id=None, strict=False): elif len(results) > 1: # this should never happen, and is a misconfiguration in the database raise koji.GenericError('multiple matches for file extension: %s' % ext) - #otherwise + # otherwise if strict: raise koji.GenericError('unsupported file extension: %s' % ext) else: @@ -7085,7 +7085,7 @@ def _generate_maven_metadata(mavendir): def add_rpm_sig(an_rpm, sighdr): """Store a signature header for an rpm""" - #calling function should perform permission checks, if applicable + # calling function should perform permission checks, if applicable rinfo = get_rpm(an_rpm, strict=True) if rinfo['external_repo_id']: raise koji.GenericError("Not an internal rpm: %s (from %s)" \ @@ -7116,7 +7116,7 @@ def add_rpm_sig(an_rpm, sighdr): raise koji.GenericError("wrong md5 for %s: %s" % (nvra, sigmd5)) if not sigkey: sigkey = '' - #we use the sigkey='' to represent unsigned in the db (so that uniqueness works) + # we use the sigkey='' to represent unsigned in the db (so that uniqueness works) else: sigkey = koji.get_sigpacket_key_id(sigkey) sighash = hashlib.md5(sighdr).hexdigest() @@ -7125,7 +7125,7 @@ def add_rpm_sig(an_rpm, sighdr): q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s""" rows = _fetchMulti(q, locals()) if rows: - #TODO[?] - if sighash is the same, handle more gracefully + # TODO[?] - if sighash is the same, handle more gracefully nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo raise koji.GenericError("Signature already exists for package %s, key %s" % (nvra, sigkey)) koji.plugin.run_callbacks('preRPMSign', sigkey=sigkey, sighash=sighash, build=binfo, rpm=rinfo) @@ -7146,24 +7146,24 @@ def _scan_sighdr(sighdr, fn): raise koji.GenericError("No such path: %s" % fn) if not os.path.isfile(fn): raise koji.GenericError("Not a regular file: %s" % fn) - #XXX should probably add an option to splice_rpm_sighdr to handle this instead + # XXX should probably add an option to splice_rpm_sighdr to handle this instead sig_start, sigsize = koji.find_rpm_sighdr(fn) hdr_start = sig_start + sigsize hdrsize = koji.rpm_hdr_size(fn, hdr_start) inp = open(fn, 'rb') outp = tempfile.TemporaryFile(mode='w+b') - #before signature + # before signature outp.write(inp.read(sig_start)) - #signature + # signature outp.write(sighdr) inp.seek(sigsize, 1) - #main header + # main header outp.write(inp.read(hdrsize)) inp.close() outp.seek(0, 0) ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS) - #(we have no payload, so verifies would fail otherwise) + # (we have no payload, so verifies would fail otherwise) hdr = ts.hdrFromFdno(outp.fileno()) outp.close() sig = koji.get_header_field(hdr, 'siggpg') @@ -7172,7 +7172,7 @@ def _scan_sighdr(sighdr, fn): return koji.get_header_field(hdr, 'sigmd5'), sig def check_rpm_sig(an_rpm, sigkey, sighdr): - #verify that the provided signature header matches the key and rpm + # verify that the provided signature header matches the key and rpm rinfo = get_rpm(an_rpm, strict=True) binfo = get_build(rinfo['build_id']) builddir = koji.pathinfo.build(binfo) @@ -7234,7 +7234,7 @@ def write_signed_rpm(an_rpm, sigkey, force=False): raise koji.GenericError("No such path: %s" % rpm_path) if not os.path.isfile(rpm_path): raise koji.GenericError("Not a regular file: %s" % rpm_path) - #make sure we have it in the db + # make sure we have it in the db rpm_id = rinfo['id'] q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s""" row = _fetchSingle(q, locals()) @@ -7244,7 +7244,7 @@ def write_signed_rpm(an_rpm, sigkey, force=False): signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rinfo, sigkey)) if os.path.exists(signedpath): if not force: - #already present + # already present return else: os.unlink(signedpath) @@ -7289,7 +7289,7 @@ def query_history(tables=None, **kwargs): cg: only relating to a content generator """ common_fields = { - #fields:aliases common to all versioned tables + # fields:aliases common to all versioned tables 'active': 'active', 'create_event': 'create_event', 'revoke_event': 'revoke_event', @@ -7329,12 +7329,12 @@ def query_history(tables=None, **kwargs): 'group_package_listing': ['group_id', 'tag_id', 'package', 'blocked', 'type', 'basearchonly', 'requires'], } name_joins = { - #joins triggered by table fields for name lookup - #field : [table, join-alias, alias] + # joins triggered by table fields for name lookup + # field : [table, join-alias, alias] 'user_id': ['users', 'users', 'user'], 'perm_id': ['permissions', 'permission'], 'cg_id': ['content_generator'], - #group_id is overloaded (special case below) + # group_id is overloaded (special case below) 'tag_id': ['tag'], 'host_id': ['host'], 'channel_id': ['channels'], @@ -7374,7 +7374,7 @@ def query_history(tables=None, **kwargs): joined[tbl] = join_as fullname = "%s.name" % join_as if len(name_join) > 2: - #apply alias + # apply alias fields[fullname] = "%s.name" % name_join[2] else: fields[fullname] = fullname @@ -7383,7 +7383,7 @@ def query_history(tables=None, **kwargs): else: joins.append('LEFT OUTER JOIN %s AS %s ON %s = %s.id' % (tbl, join_as, field, join_as)) elif field == 'build_id': - #special case + # special case fields.update({ 'package.name': 'name', #XXX? 'build.version': 'version', @@ -7417,7 +7417,7 @@ def query_history(tables=None, **kwargs): break data['tag_id'] = get_tag_id(value, strict=True) if table == 'tag_inheritance': - #special cased because there are two tag columns + # special cased because there are two tag columns clauses.append("tag_id = %(tag_id)i OR parent_id = %(tag_id)i") else: clauses.append("%s.id = %%(tag_id)i" % joined['tag']) @@ -7504,7 +7504,7 @@ def query_history(tables=None, **kwargs): clauses.append('ev1.time > %(after)s OR ev2.time > %(after)s') fields['ev1.time > %(after)s'] = '_created_after' fields['ev2.time > %(after)s'] = '_revoked_after' - #clauses.append('EXTRACT(EPOCH FROM ev1.time) > %(after)s OR EXTRACT(EPOCH FROM ev2.time) > %(after)s') + # clauses.append('EXTRACT(EPOCH FROM ev1.time) > %(after)s OR EXTRACT(EPOCH FROM ev2.time) > %(after)s') elif arg == 'afterEvent': data['afterEvent'] = value c_test = '%s.create_event > %%(afterEvent)i' % table @@ -7517,7 +7517,7 @@ def query_history(tables=None, **kwargs): value = datetime.datetime.fromtimestamp(value).isoformat(' ') data['before'] = value clauses.append('ev1.time < %(before)s OR ev2.time < %(before)s') - #clauses.append('EXTRACT(EPOCH FROM ev1.time) < %(before)s OR EXTRACT(EPOCH FROM ev2.time) < %(before)s') + # clauses.append('EXTRACT(EPOCH FROM ev1.time) < %(before)s OR EXTRACT(EPOCH FROM ev2.time) < %(before)s') fields['ev1.time < %(before)s'] = '_created_before' fields['ev2.time < %(before)s'] = '_revoked_before' elif arg == 'beforeEvent': @@ -7605,13 +7605,13 @@ def untagged_builds(name=None, queryOpts=None): joins.append("""LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id AND tag_listing.active = TRUE""") clauses = ["tag_listing.tag_id IS NULL", "build.state = %(st_complete)i"] - #q = """SELECT build.id, package.name, build.version, build.release - #FROM build + # q = """SELECT build.id, package.name, build.version, build.release + # FROM build # JOIN package on package.id = build.pkg_id # LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id # AND tag_listing.active IS TRUE - #WHERE tag_listing.tag_id IS NULL AND build.state = %(st_complete)i""" - #return _multiRow(q, locals(), aliases) + # WHERE tag_listing.tag_id IS NULL AND build.state = %(st_complete)i""" + # return _multiRow(q, locals(), aliases) query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, joins=joins, clauses=clauses, values=locals(), opts=queryOpts) @@ -7640,7 +7640,7 @@ def build_references(build_id, limit=None, lazy=False): if lazy and ret['tags']: return ret - #we'll need the component rpm and archive ids for the rest + # we'll need the component rpm and archive ids for the rest q = """SELECT id FROM rpminfo WHERE build_id=%(build_id)i""" build_rpm_ids = _fetchMulti(q, locals()) q = """SELECT id FROM archiveinfo WHERE build_id=%(build_id)i""" @@ -7869,7 +7869,7 @@ def reset_build(build): context.session.assertPerm('admin') binfo = get_build(build) if not binfo: - #nothing to do + # nothing to do return st_old = binfo['state'] koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=koji.BUILD_STATES['CANCELED'], info=binfo) @@ -8045,7 +8045,7 @@ def get_notification_recipients(build, tag_id, state): 'user_id': owner['id'], 'email': '%s@%s' % (owner['name'], email_domain) }) - #FIXME - if tag_id is None, we don't have a good way to get the package owner. + # FIXME - if tag_id is None, we don't have a good way to get the package owner. # using all package owners from all tags would be way overkill. if not recipients: @@ -8156,7 +8156,7 @@ def add_group_member(group, user, strict=True): raise koji.GenericError("Not an user: %s" % user) if uinfo['usertype'] == koji.USERTYPES['GROUP']: raise koji.GenericError("Groups cannot be members of other groups") - #check to see if user is already a member + # check to see if user is already a member data = {'user_id' : uinfo['id'], 'group_id' : ginfo['id']} table = 'user_groups' clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s') @@ -8945,8 +8945,8 @@ def policy_get_pkg(data): if 'package' in data: pkginfo = lookup_package(data['package'], strict=False) if not pkginfo: - #for some operations (e.g. adding a new package), the package - #entry may not exist yet + # for some operations (e.g. adding a new package), the package + # entry may not exist yet if isinstance(data['package'], six.string_types): return {'id' : None, 'name' : data['package']} else: @@ -8955,7 +8955,7 @@ def policy_get_pkg(data): if 'build' in data: binfo = get_build(data['build'], strict=True) return {'id' : binfo['package_id'], 'name' : binfo['name']} - #else + # else raise koji.GenericError("policy requires package data") @@ -8968,7 +8968,7 @@ def policy_get_version(data): return data['version'] if 'build' in data: return get_build(data['build'], strict=True)['version'] - #else + # else raise koji.GenericError("policy requires version data") @@ -8981,7 +8981,7 @@ def policy_get_release(data): return data['release'] if 'build' in data: return get_build(data['build'], strict=True)['release'] - #else + # else raise koji.GenericError("policy requires release data") @@ -9053,7 +9053,7 @@ class PackageTest(koji.policy.MatchTest): name = 'package' field = '_package' def run(self, data): - #we need to find the package name from the base data + # we need to find the package name from the base data data[self.field] = policy_get_pkg(data)['name'] return super(PackageTest, self).run(data) @@ -9072,7 +9072,7 @@ class ReleaseTest(koji.policy.MatchTest): name = 'release' field = '_release' def run(self, data): - #we need to find the build NVR from the base data + # we need to find the build NVR from the base data data[self.field] = policy_get_release(data) return super(ReleaseTest, self).run(data) @@ -9082,7 +9082,7 @@ class VolumeTest(koji.policy.MatchTest): name = 'volume' field = '_volume' def run(self, data): - #we need to find the volume name from the base data + # we need to find the volume name from the base data volinfo = None if 'volume' in data: volinfo = lookup_name('volume', data['volume'], strict=False) @@ -9105,7 +9105,7 @@ class CGMatchAnyTest(koji.policy.BaseSimpleTest): name = 'cg_match_any' def run(self, data): - #we need to find the volume name from the base data + # we need to find the volume name from the base data cgs = policy_get_cgs(data) patterns = self.str.split()[1:] for cg_name in cgs: @@ -9128,7 +9128,7 @@ class CGMatchAllTest(koji.policy.BaseSimpleTest): name = 'cg_match_all' def run(self, data): - #we need to find the volume name from the base data + # we need to find the volume name from the base data cgs = policy_get_cgs(data) if not cgs: return False @@ -9157,7 +9157,7 @@ class TagTest(koji.policy.MatchTest): return get_tag(tag, strict=False) def run(self, data): - #we need to find the tag name from the base data + # we need to find the tag name from the base data tinfo = self.get_tag(data) if tinfo is None: return False @@ -9179,13 +9179,13 @@ class HasTagTest(koji.policy.BaseSimpleTest): if 'build' not in data: return False tags = list_tags(build=data['build']) - #True if any of these tags match any of the patterns + # True if any of these tags match any of the patterns args = self.str.split()[1:] for tag in tags: for pattern in args: if fnmatch.fnmatch(tag['name'], pattern): return True - #otherwise... + # otherwise... return False class SkipTagTest(koji.policy.BaseSimpleTest): @@ -9213,7 +9213,7 @@ class BuildTagTest(koji.policy.BaseSimpleTest): continue if multi_fnmatch(tagname, args): return True - #otherwise... + # otherwise... return False @@ -9289,7 +9289,7 @@ class IsBuildOwnerTest(koji.policy.BaseSimpleTest): # owner is a group, check to see if user is a member if owner['id'] in koji.auth.get_user_groups(user['id']): return True - #otherwise... + # otherwise... return False class UserInGroupTest(koji.policy.BaseSimpleTest): @@ -9309,7 +9309,7 @@ class UserInGroupTest(koji.policy.BaseSimpleTest): for pattern in args: if fnmatch.fnmatch(group, pattern): return True - #otherwise... + # otherwise... return False class HasPermTest(koji.policy.BaseSimpleTest): @@ -9329,7 +9329,7 @@ class HasPermTest(koji.policy.BaseSimpleTest): for pattern in args: if fnmatch.fnmatch(perm, pattern): return True - #otherwise... + # otherwise... return False class SourceTest(koji.policy.MatchTest): @@ -9351,11 +9351,11 @@ class SourceTest(koji.policy.MatchTest): # no source to match against return False else: - #crack open the build task + # crack open the build task task = Task(build['task_id']) info = task.getInfo() params = task.getRequest() - #signatures: + # signatures: # build - (src, target, opts=None) # maven - (url, target, opts=None) # winbuild - (name, source_url, target, opts=None) @@ -9391,7 +9391,7 @@ class PolicyTest(koji.policy.BaseSimpleTest): def run(self, data): args = self.str.split()[1:] if self.depth != 0: - #LOOP! + # LOOP! raise koji.GenericError("encountered policy loop at %s" % self.str) ruleset = context.policy.get(args[0]) if not ruleset: @@ -9914,18 +9914,18 @@ class RootExports(object): return _singleRow(q, values, fields, strict=True) def makeTask(self, *args, **opts): - #this is mainly for debugging - #only an admin can make arbitrary tasks + # this is mainly for debugging + # only an admin can make arbitrary tasks context.session.assertPerm('admin') return make_task(*args, **opts) def uploadFile(self, path, name, size, md5sum, offset, data, volume=None): - #path: the relative path to upload to - #name: the name of the file - #size: size of contents (bytes) - #md5: md5sum (hex digest) of contents - #data: base64 encoded file contents - #offset: the offset of the chunk + # path: the relative path to upload to + # name: the name of the file + # size: size of contents (bytes) + # md5: md5sum (hex digest) of contents + # data: base64 encoded file contents + # offset: the offset of the chunk # files can be uploaded in chunks, if so the md5 and size describe # the chunk rather than the whole file. the offset indicates where # the chunk belongs @@ -9963,7 +9963,7 @@ class RootExports(object): if not stat.S_ISREG(st.st_mode): raise koji.GenericError("destination not a file: %s" % fn) elif offset == 0: - #first chunk, so file should not exist yet + # first chunk, so file should not exist yet if not fn.endswith('.log'): # but we allow .log files to be uploaded multiple times to support # realtime log-file viewing @@ -9972,7 +9972,7 @@ class RootExports(object): # log_error("fd=%r" %fd) try: if offset == 0 or (offset == -1 and size == len(contents)): - #truncate file + # truncate file fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) try: os.ftruncate(fd, 0) @@ -9983,7 +9983,7 @@ class RootExports(object): os.lseek(fd, 0, 2) else: os.lseek(fd, offset, 0) - #write contents + # write contents fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2) try: os.write(fd, contents) @@ -9992,7 +9992,7 @@ class RootExports(object): fcntl.lockf(fd, fcntl.LOCK_UN, len(contents), 0, 2) if offset == -1: if size is not None: - #truncate file + # truncate file fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB) try: os.ftruncate(fd, size) @@ -10000,7 +10000,7 @@ class RootExports(object): finally: fcntl.lockf(fd, fcntl.LOCK_UN) if verify is not None: - #check final digest + # check final digest chksum = sum_cls() fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB) try: @@ -10323,7 +10323,7 @@ class RootExports(object): The return value is the task id """ context.session.assertLogin() - #first some lookups and basic sanity checks + # first some lookups and basic sanity checks build = get_build(build, strict=True) tag = get_tag(tag, strict=True) if fromtag: @@ -10359,11 +10359,11 @@ class RootExports(object): policy_data['operation'] = 'tag' else: policy_data['operation'] = 'move' - #don't check policy for admins using force + # don't check policy for admins using force if not (force and context.session.hasPerm('admin')): assert_policy('tag', policy_data) - #XXX - we're running this check twice, here and in host.tagBuild (called by the task) - #spawn the tagging task + # XXX - we're running this check twice, here and in host.tagBuild (called by the task) + # spawn the tagging task return make_task('tagBuild', [tag_id, build_id, force, fromtag_id], priority=10) def untagBuild(self, tag, build, strict=True, force=False): @@ -10371,7 +10371,7 @@ class RootExports(object): Unlike tagBuild, this does not create a task No return value""" - #we can't staticmethod this one -- we're limiting the options + # we can't staticmethod this one -- we're limiting the options context.session.assertLogin() user_id = context.session.user_id tag_id = get_tag(tag, strict=True)['id'] @@ -10379,7 +10379,7 @@ class RootExports(object): policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id} policy_data['operation'] = 'untag' try: - #don't check policy for admins using force + # don't check policy for admins using force if not (force and context.session.hasPerm('admin')): assert_policy('tag', policy_data) _untag_build(tag, build, strict=strict, force=force) @@ -10420,7 +10420,7 @@ class RootExports(object): Returns the task id of the task performing the move""" context.session.assertLogin() - #lookups and basic sanity checks + # lookups and basic sanity checks pkg_id = get_package_id(package, strict=True) tag1_id = get_tag_id(tag1, strict=True) tag2_id = get_tag_id(tag2, strict=True) @@ -10440,7 +10440,7 @@ class RootExports(object): else: raise koji.TagError(pkg_error) - #access check + # access check assert_tag_access(tag1_id, user_id=None, force=force) assert_tag_access(tag2_id, user_id=None, force=force) @@ -10448,14 +10448,14 @@ class RootExports(object): # we want 'ORDER BY tag_listing.create_event ASC' not DESC so reverse build_list.reverse() - #policy check + # policy check policy_data = {'tag' : tag2, 'fromtag' : tag1, 'operation' : 'move'} - #don't check policy for admins using force + # don't check policy for admins using force if not (force and context.session.hasPerm('admin')): for build in build_list: policy_data['build'] = build['id'] assert_policy('tag', policy_data) - #XXX - we're running this check twice, here and in host.tagBuild (called by the task) + # XXX - we're running this check twice, here and in host.tagBuild (called by the task) wait_on = [] tasklist = [] @@ -10629,13 +10629,13 @@ class RootExports(object): if not task.verifyOwner() and not task.verifyHost(): if not context.session.hasPerm('admin'): raise koji.ActionNotAllowed('Cannot cancel task, not owner') - #non-admins can also use cancelBuild + # non-admins can also use cancelBuild task.cancel(recurse=recurse) def cancelTaskFull(self, task_id, strict=True): """Cancel a task and all tasks in its group""" context.session.assertPerm('admin') - #non-admins can use cancelBuild or cancelTask + # non-admins can use cancelBuild or cancelTask Task(task_id).cancelFull(strict=strict) def cancelTaskChildren(self, task_id): @@ -10657,7 +10657,7 @@ class RootExports(object): def listTagged(self, tag, event=None, inherit=False, prefix=None, latest=False, package=None, owner=None, type=None): """List builds tagged with tag""" - #lookup tag id + # lookup tag id tag = get_tag(tag, strict=True, event=event)['id'] results = readTaggedBuilds(tag, event, inherit=inherit, latest=latest, package=package, owner=owner, type=type) if prefix: @@ -10667,7 +10667,7 @@ class RootExports(object): def listTaggedRPMS(self, tag, event=None, inherit=False, latest=False, package=None, arch=None, rpmsigs=False, owner=None, type=None): """List rpms and builds within tag""" - #lookup tag id + # lookup tag id tag = get_tag(tag, strict=True, event=event)['id'] return readTaggedRPMS(tag, event=event, inherit=inherit, latest=latest, package=package, arch=arch, rpmsigs=rpmsigs, owner=owner, type=type) @@ -10853,14 +10853,14 @@ class RootExports(object): def getLatestBuilds(self, tag, event=None, package=None, type=None): """List latest builds for tag (inheritance enabled)""" if not isinstance(tag, six.integer_types): - #lookup tag id + # lookup tag id tag = get_tag_id(tag, strict=True) return readTaggedBuilds(tag, event, inherit=True, latest=True, package=package, type=type) def getLatestRPMS(self, tag, package=None, arch=None, event=None, rpmsigs=False, type=None): """List latest RPMS for tag (inheritance enabled)""" if not isinstance(tag, six.integer_types): - #lookup tag id + # lookup tag id tag = get_tag_id(tag, strict=True) return readTaggedRPMS(tag, package=package, arch=arch, event=event, inherit=True, latest=True, rpmsigs=rpmsigs, type=type) @@ -10950,7 +10950,7 @@ class RootExports(object): if jumps is None: jumps = {} if not isinstance(tag, six.integer_types): - #lookup tag id + # lookup tag id tag = get_tag_id(tag, strict=True) for mapping in [stops, jumps]: for key in to_list(mapping.keys()): @@ -10977,7 +10977,7 @@ class RootExports(object): If no build has the given ID, or the build generated no RPMs, an empty list is returned.""" if not isinstance(build, six.integer_types): - #lookup build id + # lookup build id build = self.findBuildID(build, strict=True) return self.listRPMs(buildID=build) @@ -11170,7 +11170,7 @@ class RootExports(object): def writeSignedRPM(self, an_rpm, sigkey, force=False): """Write a signed copy of the rpm""" context.session.assertPerm('sign') - #XXX - still not sure if this is the right restriction + # XXX - still not sure if this is the right restriction return write_signed_rpm(an_rpm, sigkey, force) def addRPMSig(self, an_rpm, data): @@ -11287,7 +11287,7 @@ class RootExports(object): if pkg_id not in pkgs: return False else: - #still might be blocked + # still might be blocked return not pkgs[pkg_id]['blocked'] def getPackageConfig(self, tag, pkg, event=None): @@ -11380,7 +11380,7 @@ class RootExports(object): grantCGAccess = staticmethod(grant_cg_access) revokeCGAccess = staticmethod(revoke_cg_access) - #group management calls + # group management calls newGroup = staticmethod(new_group) addGroupMember = staticmethod(add_group_member) dropGroupMember = staticmethod(drop_group_member) @@ -11423,7 +11423,7 @@ class RootExports(object): """Return build configuration associated with a tag""" taginfo = get_tag(tag, strict=True, event=event) order = readFullInheritance(taginfo['id'], event=event) - #follow inheritance for arches and extra + # follow inheritance for arches and extra for link in order: if link['noconfig']: continue @@ -11742,7 +11742,7 @@ class RootExports(object): if val: try: if val.find('//task_ scratchdir = koji.pathinfo.scratch() username = get_user(task.getOwner())['name'] @@ -13168,10 +13168,10 @@ class HostExports(object): """ host = Host() host.verify() - #sanity checks + # sanity checks task = Task(data['task_id']) task.assertHost(host.id) - #prep the data + # prep the data data['owner'] = task.getOwner() data['state'] = koji.BUILD_STATES['BUILDING'] data['completion_time'] = None @@ -13182,7 +13182,7 @@ class HostExports(object): def completeBuild(self, task_id, build_id, srpm, rpms, brmap=None, logs=None): """Import final build contents into the database""" - #sanity checks + # sanity checks host = Host() host.verify() task = Task(task_id) @@ -13424,7 +13424,7 @@ class HostExports(object): raise koji.GenericError('Windows support not enabled') host = Host() host.verify() - #sanity checks + # sanity checks task = Task(task_id) task.assertHost(host.id) # build_info must contain name, version, and release @@ -13863,22 +13863,22 @@ class HostExports(object): repo_expire(repo_id) koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire) return - #else: + # else: repo_ready(repo_id) repo_expire_older(rinfo['tag_id'], rinfo['create_event'], rinfo['dist']) - #make a latest link + # make a latest link if rinfo['dist']: latestrepolink = koji.pathinfo.distrepo('latest', rinfo['tag_name']) else: latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name']) - #XXX - this is a slight abuse of pathinfo + # XXX - this is a slight abuse of pathinfo try: if os.path.lexists(latestrepolink): os.unlink(latestrepolink) os.symlink(str(repo_id), latestrepolink) except OSError: - #making this link is nonessential + # making this link is nonessential log_error("Unable to create latest link for repo: %s" % repodir) koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire) @@ -14061,7 +14061,7 @@ def handle_upload(environ): if not context.session.logged_in: raise koji.ActionNotAllowed('you must be logged-in to upload a file') args = parse_qs(environ.get('QUERY_STRING', ''), strict_parsing=True) - #XXX - already parsed by auth + # XXX - already parsed by auth name = args['filename'][0] path = args.get('filepath', ('',))[0] verify = args.get('fileverify', ('',))[0] diff --git a/hub/kojixmlrpc.py b/hub/kojixmlrpc.py index 8515a1ae..28ebee4f 100644 --- a/hub/kojixmlrpc.py +++ b/hub/kojixmlrpc.py @@ -64,7 +64,7 @@ class HandlerRegistry(object): def __init__(self): self.funcs = {} - #introspection functions + # introspection functions self.register_function(self.list_api, name="_listapi") self.register_function(self.system_listMethods, name="system.listMethods") self.register_function(self.system_methodSignature, name="system.methodSignature") @@ -106,7 +106,7 @@ class HandlerRegistry(object): """ for v in six.itervalues(vars(plugin)): if isinstance(v, type): - #skip classes + # skip classes continue if callable(v): if getattr(v, 'exported', False): @@ -138,8 +138,8 @@ class HandlerRegistry(object): def list_api(self): funcs = [] for name, func in self.funcs.items(): - #the keys in self.funcs determine the name of the method as seen over xmlrpc - #func.__name__ might differ (e.g. for dotted method names) + # the keys in self.funcs determine the name of the method as seen over xmlrpc + # func.__name__ might differ (e.g. for dotted method names) args = self._getFuncArgs(func) argspec = self.getargspec(func) funcs.append({'name': name, @@ -164,7 +164,7 @@ class HandlerRegistry(object): return koji.util.to_list(self.funcs.keys()) def system_methodSignature(self, method): - #it is not possible to autogenerate this data + # it is not possible to autogenerate this data return 'signatures not supported' def system_methodHelp(self, method): @@ -268,7 +268,7 @@ class ModXMLRPCRequestHandler(object): return response def handle_upload(self, environ): - #uploads can't be in a multicall + # uploads can't be in a multicall context.method = None self.check_session() self.enforce_lockout() @@ -280,13 +280,13 @@ class ModXMLRPCRequestHandler(object): def check_session(self): if not hasattr(context, "session"): - #we may be called again by one of our meta-calls (like multiCall) - #so we should only create a session if one does not already exist + # we may be called again by one of our meta-calls (like multiCall) + # so we should only create a session if one does not already exist context.session = koji.auth.Session() try: context.session.validate() except koji.AuthLockError: - #might be ok, depending on method + # might be ok, depending on method if context.method not in ('exclusiveSession', 'login', 'krbLogin', 'logout'): raise @@ -359,7 +359,7 @@ class ModXMLRPCRequestHandler(object): """Handle a single XML-RPC request""" pass - #XXX no longer used + # XXX no longer used def offline_reply(start_response, msg=None): @@ -395,13 +395,13 @@ def load_config(environ): - all PythonOptions (except ConfigFile) are now deprecated and support for them will disappear in a future version of Koji """ - #get our config file(s) + # get our config file(s) cf = environ.get('koji.hub.ConfigFile', '/etc/koji-hub/hub.conf') cfdir = environ.get('koji.hub.ConfigDir', '/etc/koji-hub/hub.conf.d') config = koji.read_config_files([cfdir, (cf, True)], raw=True) cfgmap = [ - #option, type, default + # option, type, default ['DBName', 'string', None], ['DBUser', 'string', None], ['DBHost', 'string', None], @@ -479,7 +479,7 @@ def load_config(environ): # load policies # (only from config file) if config and config.has_section('policy'): - #for the moment, we simply transfer the policy conf to opts + # for the moment, we simply transfer the policy conf to opts opts['policy'] = dict(config.items('policy')) else: opts['policy'] = {} @@ -504,7 +504,7 @@ def load_plugins(opts): tracker.load(name) except Exception: logger.error(''.join(traceback.format_exception(*sys.exc_info()))) - #make this non-fatal, but set ServerOffline + # make this non-fatal, but set ServerOffline opts['ServerOffline'] = True opts['OfflineMessage'] = 'configuration error' return tracker @@ -542,7 +542,7 @@ _default_policies = { def get_policy(opts, plugins): if not opts.get('policy'): return - #first find available policy tests + # first find available policy tests alltests = [koji.policy.findSimpleTests([vars(kojihub), vars(koji.policy)])] # we delay merging these to allow a test to be overridden for a specific policy for plugin_name in opts.get('Plugins', '').split(): @@ -552,7 +552,7 @@ def get_policy(opts, plugins): alltests.append(koji.policy.findSimpleTests(vars(plugin))) policy = {} for pname, text in six.iteritems(opts['policy']): - #filter/merge tests + # filter/merge tests merged = {} for tests in alltests: # tests can be limited to certain policies by setting a class variable @@ -598,7 +598,7 @@ def setup_logging1(): global log_handler logger = logging.getLogger("koji") logger.setLevel(logging.WARNING) - #stderr logging (stderr goes to httpd logs) + # stderr logging (stderr goes to httpd logs) log_handler = logging.StreamHandler() log_format = '%(asctime)s [%(levelname)s] SETUP p=%(process)s %(name)s: %(message)s' log_handler.setFormatter(HubFormatter(log_format)) @@ -608,7 +608,7 @@ def setup_logging1(): def setup_logging2(opts): global log_handler """Adjust logging based on configuration options""" - #determine log level + # determine log level level = opts['LogLevel'] valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') # the config value can be a single level name or a series of @@ -624,7 +624,7 @@ def setup_logging2(opts): default = level if level not in valid_levels: raise koji.GenericError("Invalid log level: %s" % level) - #all our loggers start with koji + # all our loggers start with koji if name == '': name = 'koji' default = level @@ -639,9 +639,9 @@ def setup_logging2(opts): if opts.get('KojiDebug'): logger.setLevel(logging.DEBUG) elif default is None: - #LogLevel did not configure a default level + # LogLevel did not configure a default level logger.setLevel(logging.WARNING) - #log_handler defined in setup_logging1 + # log_handler defined in setup_logging1 log_handler.setFormatter(HubFormatter(opts['LogFormat'])) @@ -746,7 +746,7 @@ def application(environ, start_response): ] start_response('200 OK', headers) if h.traceback: - #rollback + # rollback context.cnx.rollback() elif context.commit_pending: # Currently there is not much data we can provide to the @@ -764,7 +764,7 @@ def application(environ, start_response): h.logger.debug("Returning %d bytes after %f seconds", len(response), time.time() - start) finally: - #make sure context gets cleaned up + # make sure context gets cleaned up if hasattr(context, 'cnx'): try: context.cnx.close() diff --git a/koji/__init__.py b/koji/__init__.py index 16720f57..8c3a677f 100644 --- a/koji/__init__.py +++ b/koji/__init__.py @@ -128,7 +128,7 @@ for h in ( 'RECOMMENDNAME', 'RECOMMENDVERSION', 'RECOMMENDFLAGS'): SUPPORTED_OPT_DEP_HDRS[h] = hasattr(rpm, 'RPMTAG_%s' % h) -## BEGIN kojikamid dup +# BEGIN kojikamid dup # class Enum(dict): """A simple class to track our enumerated constants @@ -167,7 +167,7 @@ class Enum(dict): # deprecated getvalue = _notImplemented - #read-only + # read-only __setitem__ = _notImplemented __delitem__ = _notImplemented clear = _notImplemented @@ -176,7 +176,7 @@ class Enum(dict): update = _notImplemented setdefault = _notImplemented -## END kojikamid dup +# END kojikamid dup # API_VERSION = 1 @@ -215,7 +215,7 @@ AUTHTYPE_KERB = 1 AUTHTYPE_SSL = 2 AUTHTYPE_GSSAPI = 3 -#dependency types +# dependency types DEP_REQUIRE = 0 DEP_PROVIDE = 1 DEP_OBSOLETE = 2 @@ -225,7 +225,7 @@ DEP_ENHANCE = 5 DEP_SUPPLEMENT = 6 DEP_RECOMMEND = 7 -#dependency flags +# dependency flags RPMSENSE_LESS = 2 RPMSENSE_GREATER = 4 RPMSENSE_EQUAL = 8 @@ -266,7 +266,7 @@ TAG_UPDATE_TYPES = Enum(( 'MANUAL', )) -## BEGIN kojikamid dup +# BEGIN kojikamid dup # CHECKSUM_TYPES = Enum(( 'md5', @@ -274,9 +274,9 @@ CHECKSUM_TYPES = Enum(( 'sha256', )) -## END kojikamid dup +# END kojikamid dup # -#PARAMETERS +# PARAMETERS BASEDIR = '/mnt/koji' # default task priority PRIO_DEFAULT = 20 @@ -285,9 +285,9 @@ PRIO_DEFAULT = 20 DEFAULT_REQUEST_TIMEOUT = 60 * 60 * 12 DEFAULT_AUTH_TIMEOUT = 60 -## BEGIN kojikamid dup +# BEGIN kojikamid dup # -#Exceptions +# Exceptions PythonImportError = ImportError # will be masked by koji's one class GenericError(Exception): @@ -302,7 +302,7 @@ class GenericError(Exception): return str(self.args[0]) except: return str(self.__dict__) -## END kojikamid dup +# END kojikamid dup # class LockError(GenericError): """Raised when there is a lock conflict""" @@ -320,12 +320,12 @@ class ActionNotAllowed(GenericError): """Raised when the session does not have permission to take some action""" faultCode = 1004 -## BEGIN kojikamid dup +# BEGIN kojikamid dup # class BuildError(GenericError): """Raised when a build fails""" faultCode = 1005 -## END kojikamid dup +# END kojikamid dup # class AuthLockError(AuthError): """Raised when a lock prevents authentication""" @@ -403,7 +403,7 @@ class MultiCallInProgress(object): pass -#A function to get create an exception from a fault +# A function to get create an exception from a fault def convertFault(fault): """Convert a fault to the corresponding Exception type, if possible""" code = getattr(fault, 'faultCode', None) @@ -415,7 +415,7 @@ def convertFault(fault): ret = v(fault.faultString) ret.fromFault = True return ret - #otherwise... + # otherwise... return fault def listFaults(): @@ -440,7 +440,7 @@ def listFaults(): ret.sort(key=lambda x: x['faultCode']) return ret -#functions for encoding/decoding optional arguments +# functions for encoding/decoding optional arguments def encode_args(*args, **opts): """The function encodes optional arguments as regular arguments. @@ -481,10 +481,10 @@ def decode_int(n): """If n is not an integer, attempt to convert it""" if isinstance(n, six.integer_types): return n - #else + # else return int(n) -#commonly used functions +# commonly used functions def safe_xmlrpc_loads(s): """Load xmlrpc data from a string, but catch faults""" @@ -493,7 +493,7 @@ def safe_xmlrpc_loads(s): except Fault as f: return f -## BEGIN kojikamid dup +# BEGIN kojikamid dup # def ensuredir(directory): @@ -528,7 +528,7 @@ def ensuredir(directory): raise return directory -## END kojikamid dup +# END kojikamid dup # def daemonize(): """Detach and run in background""" @@ -537,12 +537,12 @@ def daemonize(): os._exit(0) os.setsid() signal.signal(signal.SIGHUP, signal.SIG_IGN) - #fork again + # fork again pid = os.fork() if pid: os._exit(0) os.chdir("/") - #redirect stdin/stdout/sterr + # redirect stdin/stdout/sterr fd0 = os.open('/dev/null', os.O_RDONLY) fd1 = os.open('/dev/null', os.O_RDWR) fd2 = os.open('/dev/null', os.O_RDWR) @@ -597,7 +597,7 @@ def rpm_hdr_size(f, ofs=None): il = multibyte(data[0:4]) dl = multibyte(data[4:8]) - #this is what the section data says the size should be + # this is what the section data says the size should be hdrsize = 8 + 16 * il + dl # hdrsize rounded up to nearest 8 bytes @@ -624,7 +624,7 @@ class RawHeader(object): self._index() def version(self): - #fourth byte is the version + # fourth byte is the version return _ord(self.header[3]) def _index(self): @@ -635,7 +635,7 @@ class RawHeader(object): il = multibyte(data[:4]) dl = multibyte(data[4:8]) - #read the index (starts at offset 16) + # read the index (starts at offset 16) index = {} for i in range(il): entry = [] @@ -643,30 +643,31 @@ class RawHeader(object): ofs = 16 + i*16 + j*4 data = [_ord(x) for x in self.header[ofs:ofs+4]] entry.append(multibyte(data)) - #print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry)) + + # print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry)) index[entry[0]] = entry self.datalen = dl self.index = index def dump(self): print("HEADER DUMP:") - #calculate start of store + # calculate start of store il = len(self.index) store = 16 + il * 16 - #print("start is: %d" % start) - #print("index length: %d" % il) + # print("start is: %d" % start) + # print("index length: %d" % il) print("Store at offset %d (%0x)" % (store, store)) - #sort entries by offset, dtype - #also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count + # sort entries by offset, dtype + # also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count order = sorted([(x[2], x[1], x[0], x[3]) for x in six.itervalues(self.index)]) next = store - #map some rpmtag codes + # map some rpmtag codes tags = {} for name, code in six.iteritems(rpm.__dict__): if name.startswith('RPMTAG_') and isinstance(code, int): tags[code] = name[7:].lower() for entry in order: - #tag, dtype, offset, count = entry + # tag, dtype, offset, count = entry offset, dtype, tag, count = entry pos = store + offset if next is not None: @@ -679,17 +680,17 @@ class RawHeader(object): print("Tag: %d [%s], Type: %d, Offset: %x, Count: %d" \ % (tag, tags.get(tag, '?'), dtype, offset, count)) if dtype == 0: - #null + # null print("[NULL entry]") next = pos elif dtype == 1: - #char + # char for i in range(count): print("Char: %r" % self.header[pos]) pos += 1 next = pos elif dtype >= 2 and dtype <= 5: - #integer + # integer n = 1 << (dtype - 2) for i in range(count): data = [_ord(x) for x in self.header[pos:pos+n]] @@ -738,7 +739,7 @@ class RawHeader(object): return self._getitem(dtype, offset, count) def _getitem(self, dtype, offset, count): - #calculate start of store + # calculate start of store il = len(self.index) store = 16 + il * 16 pos = store + offset @@ -752,10 +753,10 @@ class RawHeader(object): end = self.header.find('\0', pos) return self.header[pos:end] elif dtype == 7: - #raw data + # raw data return self.header[pos:pos+count] else: - #XXX - not all valid data types are handled + # XXX - not all valid data types are handled raise GenericError("Unable to read header data type: %x" % dtype) def get(self, key, default=None): @@ -1108,7 +1109,7 @@ def is_debuginfo(name): def canonArch(arch): """Given an arch, return the "canonical" arch""" - #XXX - this could stand to be smarter, and we should probably + # XXX - this could stand to be smarter, and we should probably # have some other related arch-mangling functions. if fnmatch(arch, 'i?86') or arch == 'athlon': return 'i386' @@ -1295,12 +1296,12 @@ BuildArch: noarch #package requirements """] - #add a requires entry for all the packages in buildgroup, and in - #groups required by buildgroup + # add a requires entry for all the packages in buildgroup, and in + # groups required by buildgroup need = [buildgroup] seen_grp = {} seen_pkg = {} - #index groups + # index groups groups = dict([(g['name'], g) for g in grplist]) for group_name in need: if group_name in seen_grp: @@ -1375,7 +1376,7 @@ def generate_comps(groups, expand_groups=False): """ %s """ % boolean_text(True)) - #print grouplist, if any + # print grouplist, if any if g['grouplist'] and not expand_groups: data.append( """ @@ -1383,7 +1384,7 @@ def generate_comps(groups, expand_groups=False): grouplist = list(g['grouplist']) grouplist.sort(key=lambda x: x['name']) for x in grouplist: - #['req_id','type','is_metapkg','name'] + # ['req_id','type','is_metapkg','name'] name = x['name'] thetype = x['type'] tag = "groupreq" @@ -1401,9 +1402,9 @@ def generate_comps(groups, expand_groups=False): """ """) - #print packagelist, if any + # print packagelist, if any def package_entry(pkg): - #p['package_id','type','basearchonly','requires','name'] + # p['package_id','type','basearchonly','requires','name'] name = pkg['package'] opts = 'type="%s"' % pkg['type'] if pkg['basearchonly']: @@ -1424,7 +1425,7 @@ def generate_comps(groups, expand_groups=False): """ % package_entry(p)) # also include expanded list, if needed if expand_groups and g['grouplist']: - #add a requires entry for all packages in groups required by buildgroup + # add a requires entry for all packages in groups required by buildgroup need = [req['name'] for req in g['grouplist']] seen_grp = {g['name'] : 1} seen_pkg = {} @@ -1484,12 +1485,12 @@ def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts) raise GenericError("please provide a repo and tag") topurls = opts.get('topurls') if not topurls: - #cli command still passes plain topurl + # cli command still passes plain topurl topurl = opts.get('topurl') if topurl: topurls = [topurl] if topurls: - #XXX - PathInfo isn't quite right for this, but it will do for now + # XXX - PathInfo isn't quite right for this, but it will do for now pathinfos = [PathInfo(topdir=_u) for _u in topurls] urls = ["%s/%s" % (_p.repo(repoid, tag_name), arch) for _p in pathinfos] else: @@ -1539,7 +1540,7 @@ def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts) if mavenrc: files['etc/mavenrc'] = mavenrc - #generate yum.conf + # generate yum.conf yc_parts = ["[main]\n"] # HTTP proxy for yum if opts.get('yum_proxy'): @@ -1780,7 +1781,7 @@ def read_config(profile_name, user_config=None): result = config_defaults.copy() - #note: later config files override earlier ones + # note: later config files override earlier ones # /etc/koji.conf.d configs = ['/etc/koji.conf.d'] @@ -1807,9 +1808,9 @@ def read_config(profile_name, user_config=None): got_conf = True result['profile'] = profile_name for name, value in config.items(profile_name): - #note the config_defaults dictionary also serves to indicate which - #options *can* be set via the config file. Such options should - #not have a default value set in the option parser. + # note the config_defaults dictionary also serves to indicate which + # options *can* be set via the config file. Such options should + # not have a default value set in the option parser. if name in result: if name in ('anon_retry', 'offline_retry', 'use_fast_upload', 'krb_rdns', 'debug', @@ -1984,7 +1985,7 @@ class PathInfo(object): def volumedir(self, volume): if volume == 'DEFAULT' or volume is None: return self.topdir - #else + # else return self.topdir + ("/vol/%s" % volume) def build(self, build): @@ -2141,7 +2142,7 @@ def is_cert_error(e): 'certificate expired' in ssl_reason): return True - #otherwise + # otherwise return False @@ -2553,7 +2554,7 @@ class ClientSession(object): handler, headers, request = self._prepCall('logout', ()) self._sendCall(handler, headers, request) except AuthExpired: - #this can happen when an exclusive session is forced + # this can happen when an exclusive session is forced pass self.setSession(None) @@ -2578,10 +2579,10 @@ class ClientSession(object): return self.setSession(None) - #we've had some trouble with this method causing strange problems - #(like infinite recursion). Possibly triggered by initialization failure, - #and possibly due to some interaction with __getattr__. - #Re-enabling with a small improvement + # we've had some trouble with this method causing strange problems + # (like infinite recursion). Possibly triggered by initialization failure, + # and possibly due to some interaction with __getattr__. + # Re-enabling with a small improvement def __del__(self): if self.__dict__: try: @@ -2594,7 +2595,7 @@ class ClientSession(object): return self._callMethod(name, args, opts) def _prepCall(self, name, args, kwargs=None): - #pass named opts in a way the server can understand + # pass named opts in a way the server can understand if kwargs is None: kwargs = {} if name == 'rawUpload': @@ -2713,27 +2714,27 @@ class ClientSession(object): self.retries += 1 try: return self._sendCall(handler, headers, request) - #basically, we want to retry on most errors, with a few exceptions + # basically, we want to retry on most errors, with a few exceptions # - faults (this means the call completed and failed) # - SystemExit, KeyboardInterrupt # note that, for logged-in sessions the server should tell us (via a RetryError fault) # if the call cannot be retried. For non-logged-in sessions, all calls should be read-only # and hence retryable. except Fault as fault: - #try to convert the fault to a known exception + # try to convert the fault to a known exception err = convertFault(fault) if isinstance(err, ServerOffline): if self.opts.get('offline_retry', False): secs = self.opts.get('offline_retry_interval', interval) self.logger.debug("Server offline. Retrying in %i seconds", secs) time.sleep(secs) - #reset try count - this isn't a typical error, this is a running server - #correctly reporting an outage + # reset try count - this isn't a typical error, this is a running server + # correctly reporting an outage tries = 0 continue raise err except (SystemExit, KeyboardInterrupt): - #(depending on the python version, these may or may not be subclasses of Exception) + # (depending on the python version, these may or may not be subclasses of Exception) raise except Exception as e: tb_str = ''.join(traceback.format_exception(*sys.exc_info())) @@ -2744,8 +2745,8 @@ class ClientSession(object): raise if not self.logged_in: - #in the past, non-logged-in sessions did not retry. For compatibility purposes - #this behavior is governed by the anon_retry opt. + # in the past, non-logged-in sessions did not retry. For compatibility purposes + # this behavior is governed by the anon_retry opt. if not self.opts.get('anon_retry', False): raise @@ -2754,14 +2755,14 @@ class ClientSession(object): if tries > max_retries: raise - #otherwise keep retrying + # otherwise keep retrying if self.logger.isEnabledFor(logging.DEBUG): self.logger.debug(tb_str) self.logger.info("Try #%s for call %s (%s) failed: %s", tries, self.callnum, name, e) if tries > 1: # first retry is immediate, after that we honor retry_interval time.sleep(interval) - #not reached + # not reached def multiCall(self, strict=False, batch=None): """Execute a prepared multicall @@ -2816,7 +2817,7 @@ class ClientSession(object): else: ret = self._callMethod('multiCall', (calls,), {}) if strict: - #check for faults and raise first one + # check for faults and raise first one for entry in ret: if isinstance(entry, dict): fault = Fault(entry['faultCode'], entry['faultString']) @@ -2825,7 +2826,7 @@ class ClientSession(object): return ret def __getattr__(self, name): - #if name[:1] == '_': + # if name[:1] == '_': # raise AttributeError("no attribute %r" % name) if name == '_apidoc': return self.__dict__['_apidoc'] @@ -2953,7 +2954,7 @@ class ClientSession(object): start = time.time() # XXX - stick in a config or something retries = 3 - fo = open(localfile, "rb") #specify bufsize? + fo = open(localfile, "rb") # specify bufsize? totalsize = os.path.getsize(localfile) ofs = 0 md5sum = hashlib.md5() @@ -3207,15 +3208,15 @@ class DBHandler(logging.Handler): columns.append(key) values.append("%%(%s)s" % key) data[key] = value % record.__dict__ - #values.append(_quote(value % record.__dict__)) + # values.append(_quote(value % record.__dict__)) columns = ",".join(columns) values = ",".join(values) command = "INSERT INTO %s (%s) VALUES (%s)" % (self.table, columns, values) - #note we're letting cursor.execute do the escaping + # note we're letting cursor.execute do the escaping cursor.execute(command, data) cursor.close() - #self.cnx.commit() - #XXX - committing here is most likely wrong, but we need to set commit_pending or something + # self.cnx.commit() + # XXX - committing here is most likely wrong, but we need to set commit_pending or something # ...and this is really the wrong place for that except: self.handleError(record) @@ -3328,7 +3329,7 @@ def _taskLabel(taskInfo): extra = build_target['name'] elif method == 'winbuild': if 'request' in taskInfo: - #vm = taskInfo['request'][0] + # vm = taskInfo['request'][0] url = taskInfo['request'][1] target = taskInfo['request'][2] module_info = _module_info(url) diff --git a/koji/arch.py b/koji/arch.py index 431c9bc6..a36046fc 100644 --- a/koji/arch.py +++ b/koji/arch.py @@ -33,7 +33,7 @@ arches = { "amd64": "x86_64", "ia32e": "x86_64", - #ppc64le + # ppc64le "ppc64le": "noarch", # ppc @@ -73,7 +73,7 @@ arches = { "armv5tejl": "armv5tel", "armv5tel": "noarch", - #arm hardware floating point + # arm hardware floating point "armv7hnl": "armv7hl", "armv7hl": "armv6hl", "armv6hl": "noarch", @@ -86,7 +86,7 @@ arches = { "sh4": "noarch", "sh3": "noarch", - #itanium + # itanium "ia64": "noarch", } diff --git a/koji/auth.py b/koji/auth.py index 85200698..c2a0bdd5 100644 --- a/koji/auth.py +++ b/koji/auth.py @@ -79,7 +79,7 @@ class Session(object): self._perms = None self._groups = None self._host_id = '' - #get session data from request + # get session data from request if args is None: environ = getattr(context, 'environ', {}) args = environ.get('QUERY_STRING', '') @@ -97,7 +97,7 @@ class Session(object): callnum = args['callnum'][0] except: callnum = None - #lookup the session + # lookup the session c = context.cnx.cursor() fields = { 'authtype': 'authtype', @@ -125,10 +125,10 @@ class Session(object): if not row: raise koji.AuthError('Invalid session or bad credentials') session_data = dict(zip(aliases, row)) - #check for expiration + # check for expiration if session_data['expired']: raise koji.AuthExpired('session "%i" has expired' % id) - #check for callnum sanity + # check for callnum sanity if callnum is not None: try: callnum = int(callnum) @@ -140,14 +140,14 @@ class Session(object): raise koji.SequenceError("%d > %d (session %d)" \ % (lastcall, callnum, id)) elif lastcall == callnum: - #Some explanation: - #This function is one of the few that performs its own commit. - #However, our storage of the current callnum is /after/ that - #commit. This means the the current callnum only gets committed if - #a commit happens afterward. - #We only schedule a commit for dml operations, so if we find the - #callnum in the db then a previous attempt succeeded but failed to - #return. Data was changed, so we cannot simply try the call again. + # Some explanation: + # This function is one of the few that performs its own commit. + # However, our storage of the current callnum is /after/ that + # commit. This means the the current callnum only gets committed if + # a commit happens afterward. + # We only schedule a commit for dml operations, so if we find the + # callnum in the db then a previous attempt succeeded but failed to + # return. Data was changed, so we cannot simply try the call again. method = getattr(context, 'method', 'UNKNOWN') if method not in RetryWhitelist: raise koji.RetryError( @@ -155,7 +155,7 @@ class Session(object): % (callnum, method, id)) # read user data - #historical note: + # historical note: # we used to get a row lock here as an attempt to maintain sanity of exclusive # sessions, but it was an imperfect approach and the lock could cause some # performance issues. @@ -166,25 +166,25 @@ class Session(object): if user_data['status'] != koji.USER_STATUS['NORMAL']: raise koji.AuthError('logins by %s are not allowed' % user_data['name']) - #check for exclusive sessions + # check for exclusive sessions if session_data['exclusive']: - #we are the exclusive session for this user + # we are the exclusive session for this user self.exclusive = True else: - #see if an exclusive session exists + # see if an exclusive session exists q = """SELECT id FROM sessions WHERE user_id=%(user_id)s AND "exclusive" = TRUE AND expired = FALSE""" - #should not return multiple rows (unique constraint) + # should not return multiple rows (unique constraint) c.execute(q, session_data) row = c.fetchone() if row: (excl_id,) = row if excl_id == session_data['master']: - #(note excl_id cannot be None) - #our master session has the lock + # (note excl_id cannot be None) + # our master session has the lock self.exclusive = True else: - #a session unrelated to us has the lock + # a session unrelated to us has the lock self.lockerror = "User locked by another session" # we don't enforce here, but rely on the dispatcher to enforce # if appropriate (otherwise it would be impossible to steal @@ -193,11 +193,11 @@ class Session(object): # update timestamp q = """UPDATE sessions SET update_time=NOW() WHERE id = %(id)i""" c.execute(q, locals()) - #save update time + # save update time context.cnx.commit() - #update callnum (this is deliberately after the commit) - #see earlier note near RetryError + # update callnum (this is deliberately after the commit) + # see earlier note near RetryError if callnum is not None: q = """UPDATE sessions SET callnum=%(callnum)i WHERE id = %(id)i""" c.execute(q, locals()) @@ -218,7 +218,7 @@ class Session(object): # grab perm and groups data on the fly if name == 'perms': if self._perms is None: - #in a dict for quicker lookup + # in a dict for quicker lookup self._perms = dict([[name, 1] for name in get_user_perms(self.user_id)]) return self._perms elif name == 'groups': @@ -254,7 +254,7 @@ class Session(object): return override else: hostip = context.environ['REMOTE_ADDR'] - #XXX - REMOTE_ADDR not promised by wsgi spec + # XXX - REMOTE_ADDR not promised by wsgi spec if hostip == '127.0.0.1': hostip = socket.gethostbyname(socket.gethostname()) return hostip @@ -294,7 +294,7 @@ class Session(object): self.checkLoginAllowed(user_id) - #create session and return + # create session and return sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_NORMAL) session_id = sinfo['session-id'] context.cnx.commit() @@ -386,7 +386,7 @@ class Session(object): # so get the local ip via a different method local_ip = socket.gethostbyname(context.environ['SERVER_NAME']) remote_ip = context.environ['REMOTE_ADDR'] - #XXX - REMOTE_ADDR not promised by wsgi spec + # XXX - REMOTE_ADDR not promised by wsgi spec # it appears that calling setports() with *any* value results in authentication # failing with "Incorrect net address", so return 0 (which prevents @@ -466,11 +466,11 @@ class Session(object): if self.master is not None: raise koji.GenericError("subsessions cannot become exclusive") if self.exclusive: - #shouldn't happen + # shouldn't happen raise koji.GenericError("session is already exclusive") user_id = self.user_id session_id = self.id - #acquire a row lock on the user entry + # acquire a row lock on the user entry q = """SELECT id FROM users WHERE id=%(user_id)s FOR UPDATE""" c.execute(q, locals()) # check that no other sessions for this user are exclusive @@ -481,13 +481,13 @@ class Session(object): row = c.fetchone() if row: if force: - #expire the previous exclusive session and try again + # expire the previous exclusive session and try again (excl_id,) = row q = """UPDATE sessions SET expired=TRUE,"exclusive"=NULL WHERE id=%(excl_id)s""" c.execute(q, locals()) else: raise koji.AuthLockError("Cannot get exclusive session") - #mark this session exclusive + # mark this session exclusive q = """UPDATE sessions SET "exclusive"=TRUE WHERE id=%(session_id)s""" c.execute(q, locals()) context.cnx.commit() @@ -503,12 +503,12 @@ class Session(object): def logout(self): """expire a login session""" if not self.logged_in: - #XXX raise an error? + # XXX raise an error? raise koji.AuthError("Not logged in") update = """UPDATE sessions SET expired=TRUE,exclusive=NULL WHERE id = %(id)i OR master = %(id)i""" - #note we expire subsessions as well + # note we expire subsessions as well c = context.cnx.cursor() c.execute(update, {'id': self.id}) context.cnx.commit() @@ -517,7 +517,7 @@ class Session(object): def logoutChild(self, session_id): """expire a subsession""" if not self.logged_in: - #XXX raise an error? + # XXX raise an error? raise koji.AuthError("Not logged in") update = """UPDATE sessions SET expired=TRUE,exclusive=NULL @@ -547,7 +547,7 @@ class Session(object): (session_id,) = c.fetchone() - #add session id to database + # add session id to database q = """ INSERT INTO sessions (id, user_id, key, hostip, authtype, master) VALUES (%(session_id)i, %(user_id)i, %(key)s, %(hostip)s, %(authtype)i, %(master)s) @@ -555,7 +555,7 @@ class Session(object): c.execute(q, locals()) context.cnx.commit() - #return session info + # return session info return {'session-id' : session_id, 'session-key' : key} def subsession(self): @@ -589,7 +589,7 @@ class Session(object): def hasGroup(self, group_id): if not self.logged_in: return False - #groups indexed by id + # groups indexed by id return group_id in self.groups def isUser(self, user_id): @@ -616,7 +616,7 @@ class Session(object): return None def getHostId(self): - #for compatibility + # for compatibility return self.host_id def getUserId(self, username): @@ -805,7 +805,7 @@ def get_user_perms(user_id): FROM user_perms JOIN permissions ON perm_id = permissions.id WHERE active = TRUE AND user_id=%(user_id)s""" c.execute(q, locals()) - #return a list of permissions by name + # return a list of permissions by name return [row[0] for row in c.fetchall()] def get_user_data(user_id): diff --git a/koji/daemon.py b/koji/daemon.py index 8bb3a2e0..9f5fb56b 100644 --- a/koji/daemon.py +++ b/koji/daemon.py @@ -171,7 +171,7 @@ def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, a return status[1] -## BEGIN kojikamid dup +# BEGIN kojikamid dup # class SCM(object): "SCM abstraction class" @@ -397,7 +397,7 @@ class SCM(object): env = None def _run(cmd, chdir=None, fatal=False, log=True, _count=[0]): if globals().get('KOJIKAMID'): - #we've been inserted into kojikamid, use its run() + # we've been inserted into kojikamid, use its run() return run(cmd, chdir=chdir, fatal=fatal, log=log) # noqa: F821 else: append = (_count[0] > 0) @@ -546,7 +546,7 @@ class SCM(object): # just use the same url r['source'] = self.url return r -## END kojikamid dup +# END kojikamid dup # class TaskManager(object): @@ -613,7 +613,7 @@ class TaskManager(object): If nolocal is True, do not try to scan local buildroots. """ - #query buildroots in db that are not expired + # query buildroots in db that are not expired states = [koji.BR_STATES[x] for x in ('INIT', 'WAITING', 'BUILDING')] db_br = self.session.listBuildroots(hostID=self.host_id, state=tuple(states)) # index by id @@ -627,8 +627,8 @@ class TaskManager(object): self.logger.warn("Expiring taskless buildroot: %(id)i/%(tag_name)s/%(arch)s" % br) self.session.host.setBuildRootState(id, st_expired) elif task_id not in self.tasks: - #task not running - expire the buildroot - #TODO - consider recycling hooks here (with strong sanity checks) + # task not running - expire the buildroot + # TODO - consider recycling hooks here (with strong sanity checks) self.logger.info("Expiring buildroot: %(id)i/%(tag_name)s/%(arch)s" % br) self.logger.debug("Buildroot task: %r, Current tasks: %r" % (task_id, to_list(self.tasks.keys()))) self.session.host.setBuildRootState(id, st_expired) @@ -640,13 +640,13 @@ class TaskManager(object): local_only = [id for id in local_br if id not in db_br] if local_only: missed_br = self.session.listBuildroots(buildrootID=tuple(local_only)) - #get all the task info in one call + # get all the task info in one call tasks = [] for br in missed_br: task_id = br['task_id'] if task_id: tasks.append(task_id) - #index + # index missed_br = dict([(row['id'], row) for row in missed_br]) tasks = dict([(row['id'], row) for row in self.session.getTaskInfo(tasks)]) for id in local_only: @@ -671,7 +671,7 @@ class TaskManager(object): self.logger.warn("%s: invalid task %s" % (desc, br['task_id'])) continue if (task['state'] == koji.TASK_STATES['FAILED'] and age < self.options.failed_buildroot_lifetime): - #XXX - this could be smarter + # XXX - this could be smarter # keep buildroots for failed tasks around for a little while self.logger.debug("Keeping failed buildroot: %s" % desc) continue @@ -689,17 +689,17 @@ class TaskManager(object): continue else: age = min(age, time.time() - st.st_mtime) - #note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153) - #If rpmlib is installing in this chroot, removing it entirely - #can lead to a world of hurt. - #We remove the rootdir contents but leave the rootdir unless it - #is really old + # note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153) + # If rpmlib is installing in this chroot, removing it entirely + # can lead to a world of hurt. + # We remove the rootdir contents but leave the rootdir unless it + # is really old if age > 3600*24: - #dir untouched for a day + # dir untouched for a day self.logger.info("Removing buildroot: %s" % desc) if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0: continue - #also remove the config + # also remove the config try: os.unlink(data['cfg']) except OSError as e: @@ -726,7 +726,7 @@ class TaskManager(object): self.logger.debug("Expired/stray buildroots: %d" % len(local_only)) def _scanLocalBuildroots(self): - #XXX + # XXX configdir = '/etc/mock/koji' buildroots = {} for f in os.listdir(configdir): @@ -785,13 +785,13 @@ class TaskManager(object): # by this host. id = task['id'] if id not in self.pids: - #We don't have a process for this - #Expected to happen after a restart, otherwise this is an error + # We don't have a process for this + # Expected to happen after a restart, otherwise this is an error stale.append(id) continue tasks[id] = task if task.get('alert', False): - #wake up the process + # wake up the process self.logger.info("Waking up task: %r" % task) os.kill(self.pids[id], signal.SIGUSR2) if not task['waiting']: @@ -801,8 +801,8 @@ class TaskManager(object): self.tasks = tasks self.logger.debug("Current tasks: %r" % self.tasks) if len(stale) > 0: - #A stale task is one which is opened to us, but we know nothing - #about). This will happen after a daemon restart, for example. + # A stale task is one which is opened to us, but we know nothing + # about). This will happen after a daemon restart, for example. self.logger.info("freeing stale tasks: %r" % stale) self.session.host.freeTasks(stale) for id, pid in list(self.pids.items()): @@ -844,15 +844,15 @@ class TaskManager(object): self.logger.debug("Load Data:") self.logger.debug(" hosts: %r" % hosts) self.logger.debug(" tasks: %r" % tasks) - #now we organize this data into channel-arch bins + # now we organize this data into channel-arch bins bin_hosts = {} #hosts indexed by bin bins = {} #bins for this host our_avail = None for host in hosts: host['bins'] = [] if host['id'] == self.host_id: - #note: task_load reported by server might differ from what we - #sent due to precision variation + # note: task_load reported by server might differ from what we + # sent due to precision variation our_avail = host['capacity'] - host['task_load'] for chan in host['channels']: for arch in host['arches'].split() + ['noarch']: @@ -867,7 +867,7 @@ class TaskManager(object): elif not bins: self.logger.info("No bins for this host. Missing channel/arch config?") # Note: we may still take an assigned task below - #sort available capacities for each of our bins + # sort available capacities for each of our bins avail = {} for bin in bins: avail[bin] = [host['capacity'] - host['task_load'] for host in bin_hosts[bin]] @@ -889,7 +889,7 @@ class TaskManager(object): if task['state'] == koji.TASK_STATES['ASSIGNED']: self.logger.debug("task is assigned") if self.host_id == task['host_id']: - #assigned to us, we can take it regardless + # assigned to us, we can take it regardless if self.takeTask(task): return True elif task['state'] == koji.TASK_STATES['FREE']: @@ -897,18 +897,18 @@ class TaskManager(object): self.logger.debug("task is free, bin=%r" % bin) if bin not in bins: continue - #see where our available capacity is compared to other hosts for this bin - #(note: the hosts in this bin are exactly those that could - #accept this task) + # see where our available capacity is compared to other hosts for this bin + # (note: the hosts in this bin are exactly those that could + # accept this task) bin_avail = avail.get(bin, [0]) if self.checkAvailDelay(task, bin_avail, our_avail): # decline for now and give the upper half a chance continue - #otherwise, we attempt to open the task + # otherwise, we attempt to open the task if self.takeTask(task): return True else: - #should not happen + # should not happen raise Exception("Invalid task state reported by server") return False @@ -968,11 +968,11 @@ class TaskManager(object): try: (childpid, status) = os.waitpid(pid, os.WNOHANG) except OSError as e: - #check errno + # check errno if e.errno != errno.ECHILD: - #should not happen + # should not happen raise - #otherwise assume the process is gone + # otherwise assume the process is gone self.logger.info("%s: %s" % (prefix, e)) return True if childpid != 0: @@ -1118,7 +1118,7 @@ class TaskManager(object): if children: self._killChildren(task_id, children, sig=signal.SIGKILL, timeout=3.0) - #expire the task's subsession + # expire the task's subsession session_id = self.subsessions.get(task_id) if session_id: self.logger.info("Expiring subsession %i (task %i)" % (session_id, task_id)) @@ -1126,7 +1126,7 @@ class TaskManager(object): self.session.logoutChild(session_id) del self.subsessions[task_id] except: - #not much we can do about it + # not much we can do about it pass if wait: return self._waitTask(task_id, pid) @@ -1200,7 +1200,7 @@ class TaskManager(object): self.status = "Load average %.2f > %.2f" % (loadavgs[0], maxload) self.logger.info(self.status) return False - #XXX - add more checks + # XXX - add more checks return True def takeTask(self, task): @@ -1250,7 +1250,7 @@ class TaskManager(object): if state != 'OPEN': self.logger.warn("Task %i changed is %s", task_id, state) return False - #otherwise... + # otherwise... raise if handler.Foreground: self.logger.info("running task in foreground") @@ -1263,27 +1263,27 @@ class TaskManager(object): return True def forkTask(self, handler): - #get the subsession before we fork + # get the subsession before we fork newhub = self.session.subsession() session_id = newhub.sinfo['session-id'] pid = os.fork() if pid: newhub._forget() return pid, session_id - #in no circumstance should we return after the fork - #nor should any exceptions propagate past here + # in no circumstance should we return after the fork + # nor should any exceptions propagate past here try: self.session._forget() - #set process group + # set process group os.setpgrp() - #use the subsession + # use the subsession self.session = newhub handler.session = self.session - #set a do-nothing handler for sigusr2 + # set a do-nothing handler for sigusr2 signal.signal(signal.SIGUSR2, lambda *args: None) self.runTask(handler) finally: - #diediedie + # diediedie try: self.session.logout() finally: @@ -1302,10 +1302,10 @@ class TaskManager(object): tb = ''.join(traceback.format_exception(*sys.exc_info())).replace(r"\n", "\n") self.logger.warn("FAULT:\n%s" % tb) except (SystemExit, koji.tasks.ServerExit, KeyboardInterrupt): - #we do not trap these + # we do not trap these raise except koji.tasks.ServerRestart: - #freeing this task will allow the pending restart to take effect + # freeing this task will allow the pending restart to take effect self.session.host.freeTasks([handler.id]) return except: @@ -1315,7 +1315,7 @@ class TaskManager(object): e_class, e = sys.exc_info()[:2] faultCode = getattr(e_class, 'faultCode', 1) if issubclass(e_class, koji.GenericError): - #just pass it through + # just pass it through tb = str(e) response = koji.xmlrpcplus.dumps(koji.xmlrpcplus.Fault(faultCode, tb)) diff --git a/koji/db.py b/koji/db.py index 8a085bfc..3d36b876 100644 --- a/koji/db.py +++ b/koji/db.py @@ -75,8 +75,8 @@ class DBWrapper: if not self.cnx: raise Exception('connection is closed') self.cnx.cursor().execute('ROLLBACK') - #We do this rather than cnx.rollback to avoid opening a new transaction - #If our connection gets recycled cnx.rollback will be called then. + # We do this rather than cnx.rollback to avoid opening a new transaction + # If our connection gets recycled cnx.rollback will be called then. self.cnx = None @@ -177,7 +177,7 @@ def connect(): return DBWrapper(conn) except psycopg2.Error: del _DBconn.conn - #create a fresh connection + # create a fresh connection opts = _DBopts if opts is None: opts = {} diff --git a/koji/plugin.py b/koji/plugin.py index d6e85554..e8183de0 100644 --- a/koji/plugin.py +++ b/koji/plugin.py @@ -62,7 +62,7 @@ class PluginTracker(object): def __init__(self, path=None, prefix='_koji_plugin__'): self.searchpath = path - #prefix should not have a '.' in it, this can cause problems. + # prefix should not have a '.' in it, this can cause problems. self.prefix = prefix self.plugins = {} @@ -71,9 +71,9 @@ class PluginTracker(object): return self.plugins[name] mod_name = name if self.prefix: - #mod_name determines how the module is named in sys.modules - #Using a prefix helps prevent overlap with other modules - #(no '.' -- it causes problems) + # mod_name determines how the module is named in sys.modules + # Using a prefix helps prevent overlap with other modules + # (no '.' -- it causes problems) mod_name = self.prefix + name if mod_name in sys.modules and not reload: raise koji.PluginError('module name conflict: %s' % mod_name) diff --git a/koji/policy.py b/koji/policy.py index fa1f9882..27dbdf69 100644 --- a/koji/policy.py +++ b/koji/policy.py @@ -31,7 +31,7 @@ from koji.util import to_list class BaseSimpleTest(object): """Abstract base class for simple tests""" - #Provide the name of the test + # Provide the name of the test name = None def __init__(self, str): @@ -62,12 +62,12 @@ class FalseTest(BaseSimpleTest): class AllTest(TrueTest): name = 'all' - #alias for true + # alias for true class NoneTest(FalseTest): name = 'none' - #alias for false + # alias for false class HasTest(BaseSimpleTest): @@ -233,11 +233,11 @@ class SimpleRuleSet(object): for line in lines: rule = self.parse_line(line) if rule is None: - #blank/etc + # blank/etc continue tests, negate, action = rule if action == '{': - #nested rules + # nested rules child = [] cursor.append([tests, negate, child]) stack.append(cursor) @@ -275,11 +275,11 @@ class SimpleRuleSet(object): """ line = line.split('#', 1)[0].strip() if not line: - #blank or all comment + # blank or all comment return None if line == '}': return None, False, '}' - #?? allow }} ?? + # ?? allow }} ?? negate = False pos = line.rfind('::') if pos == -1: @@ -328,7 +328,7 @@ class SimpleRuleSet(object): if not check: break else: - #all tests in current rule passed + # all tests in current rule passed value = True if negate: value = not value @@ -393,11 +393,11 @@ def findSimpleTests(namespace): if isinstance(value, type(BaseSimpleTest)) and issubclass(value, BaseSimpleTest): name = getattr(value, 'name', None) if not name: - #use the class name + # use the class name name = key - #but trim 'Test' from the end + # but trim 'Test' from the end if name.endswith('Test') and len(name) > 4: name = name[:-4] ret.setdefault(name, value) - #...so first test wins in case of name overlap + # ...so first test wins in case of name overlap return ret diff --git a/koji/rpmdiff.py b/koji/rpmdiff.py index 12f72af6..efa2dfc2 100644 --- a/koji/rpmdiff.py +++ b/koji/rpmdiff.py @@ -48,7 +48,7 @@ class Rpmdiff: PRCO = ( 'REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES') - #{fname : (size, mode, mtime, flags, dev, inode, + # {fname : (size, mode, mtime, flags, dev, inode, # nlink, state, vflags, user, group, digest)} __FILEIDX = [ ['S', 0], ['M', 1], @@ -71,7 +71,7 @@ class Rpmdiff: try: PREREQ_FLAG=rpm.RPMSENSE_PREREQ except: - #(proyvind): This seems ugly, but then again so does + # (proyvind): This seems ugly, but then again so does # this whole check as well. PREREQ_FLAG=False diff --git a/koji/tasks.py b/koji/tasks.py index af603a0c..a86d0a69 100644 --- a/koji/tasks.py +++ b/koji/tasks.py @@ -51,7 +51,7 @@ def scan_mounts(topdir): logger.warning('Found deleted mountpoint: %s' % path) mplist.append(path) fo.close() - #reverse sort so deeper dirs come first + # reverse sort so deeper dirs come first mplist.sort(reverse=True) return mplist @@ -64,7 +64,7 @@ def umount_all(topdir): rv = os.spawnvp(os.P_WAIT, cmd[0], cmd) if rv != 0: raise koji.GenericError('umount failed (exit code %r) for %s' % (rv, path)) - #check mounts again + # check mounts again remain = scan_mounts(topdir) if remain: raise koji.GenericError("Unmounting incomplete: %r" % remain) @@ -340,7 +340,7 @@ class BaseTaskHandler(object): if self.workdir is None: return safe_rmtree(self.workdir, unmount=False, strict=True) - #os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir]) + # os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir]) def wait(self, subtasks=None, all=False, failany=False, canfail=None, timeout=None): @@ -385,7 +385,7 @@ class BaseTaskHandler(object): while True: finished, unfinished = self.session.host.taskWait(self.id) if len(unfinished) == 0: - #all done + # all done break elif len(finished) > 0: if all: @@ -561,7 +561,7 @@ class BaseTaskHandler(object): repo_info = self.session.getRepo(tag) taginfo = self.session.getTag(tag, strict=True) if not repo_info: - #make sure there is a target + # make sure there is a target targets = self.session.getBuildTargets(buildTagID=taginfo['id']) if not targets: raise koji.BuildError('no repo (and no target) for tag %s' % taginfo['name']) @@ -666,7 +666,7 @@ class ShutdownTask(BaseTaskHandler): _taskWeight = 0.0 Foreground = True def handler(self): - #note: this is a foreground task + # note: this is a foreground task raise ServerExit @@ -677,7 +677,7 @@ class RestartTask(BaseTaskHandler): _taskWeight = 0.1 Foreground = True def handler(self, host): - #note: this is a foreground task + # note: this is a foreground task if host['id'] != self.session.host.getID(): raise koji.GenericError("Host mismatch") self.manager.restart_pending = True @@ -691,7 +691,7 @@ class RestartVerifyTask(BaseTaskHandler): _taskWeight = 0.1 Foreground = True def handler(self, task_id, host): - #note: this is a foreground task + # note: this is a foreground task tinfo = self.session.getTaskInfo(task_id) state = koji.TASK_STATES[tinfo['state']] if state != 'CLOSED': @@ -754,7 +754,7 @@ class RestartHostsTask(BaseTaskHandler): class DependantTask(BaseTaskHandler): Methods = ['dependantTask'] - #mostly just waiting on other tasks + # mostly just waiting on other tasks _taskWeight = 0.2 def handler(self, wait_list, task_list): diff --git a/koji/util.py b/koji/util.py index ec6c83d3..350c03fe 100644 --- a/koji/util.py +++ b/koji/util.py @@ -189,7 +189,7 @@ def dslice(dict_, keys, strict=True): ret = {} for key in keys: if strict or key in dict_: - #for strict we skip the has_key check and let the dict generate the KeyError + # for strict we skip the has_key check and let the dict generate the KeyError ret[key] = dict_[key] return ret @@ -639,13 +639,13 @@ def setup_rlimits(opts, logger=None): class adler32_constructor(object): - #mimicing the hashlib constructors + # mimicing the hashlib constructors def __init__(self, arg=''): if six.PY3 and isinstance(arg, str): arg = bytes(arg, 'utf-8') self._value = adler32(arg) & 0xffffffff - #the bitwise and works around a bug in some versions of python - #see: https://bugs.python.org/issue1202 + # the bitwise and works around a bug in some versions of python + # see: https://bugs.python.org/issue1202 def update(self, arg): if six.PY3 and isinstance(arg, str): diff --git a/plugins/builder/runroot.py b/plugins/builder/runroot.py index 1a3c0dbb..40fb5364 100644 --- a/plugins/builder/runroot.py +++ b/plugins/builder/runroot.py @@ -118,9 +118,10 @@ class RunRootTask(koji.tasks.BaseTaskHandler): if weight is not None: weight = max(weight, 0.5) self.session.host.setTaskWeight(self.id, weight) - #noarch is funny + + # noarch is funny if arch == "noarch": - #we need a buildroot arch. Pick one that: + # we need a buildroot arch. Pick one that: # a) this host can handle # b) the build tag can support # c) is canonical @@ -130,16 +131,16 @@ class RunRootTask(koji.tasks.BaseTaskHandler): tag_arches = self.session.getBuildConfig(root)['arches'] if not tag_arches: raise koji.BuildError("No arch list for tag: %s" % root) - #index canonical host arches + # index canonical host arches host_arches = set([koji.canonArch(a) for a in host_arches.split()]) - #pick the first suitable match from tag's archlist + # pick the first suitable match from tag's archlist for br_arch in tag_arches.split(): br_arch = koji.canonArch(br_arch) if br_arch in host_arches: - #we're done + # we're done break else: - #no overlap + # no overlap raise koji.BuildError("host does not match tag arches: %s (%s)" % (root, tag_arches)) else: br_arch = arch @@ -152,7 +153,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler): else: repo_info = self.session.getRepo(root) if not repo_info: - #wait for it + # wait for it task_id = self.session.host.subtask(method='waitrepo', arglist=[root, None, None], parent=self.id) @@ -163,13 +164,13 @@ class RunRootTask(koji.tasks.BaseTaskHandler): broot.workdir = self.workdir broot.init() rootdir = broot.rootdir() - #workaround for rpm oddness + # workaround for rpm oddness os.system('rm -f "%s"/var/lib/rpm/__db.*' % rootdir) - #update buildroot state (so that updateBuildRootList() will work) + # update buildroot state (so that updateBuildRootList() will work) self.session.host.setBuildRootState(broot.id, 'BUILDING') try: if packages: - #pkglog = '%s/%s' % (broot.resultdir(), 'packages.log') + # pkglog = '%s/%s' % (broot.resultdir(), 'packages.log') pkgcmd = ['--install'] + packages status = broot.mock(pkgcmd) self.session.host.updateBuildRootList(broot.id, broot.getPackageList()) @@ -179,9 +180,9 @@ class RunRootTask(koji.tasks.BaseTaskHandler): if isinstance(command, str): cmdstr = command else: - #we were passed an arglist - #we still have to run this through the shell (for redirection) - #but we can preserve the list structure precisely with careful escaping + # we were passed an arglist + # we still have to run this through the shell (for redirection) + # but we can preserve the list structure precisely with careful escaping cmdstr = ' '.join(["'%s'" % arg.replace("'", r"'\''") for arg in command]) # A nasty hack to put command output into its own file until mock can be # patched to do something more reasonable than stuff everything into build.log @@ -198,7 +199,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler): elif new_chroot is False: # None -> no option added mock_cmd.append('--old-chroot') if skip_setarch: - #we can't really skip it, but we can set it to the current one instead of of the chroot one + # we can't really skip it, but we can set it to the current one instead of of the chroot one myarch = platform.uname()[5] mock_cmd.extend(['--arch', myarch]) mock_cmd.append('--') @@ -235,9 +236,9 @@ class RunRootTask(koji.tasks.BaseTaskHandler): if mount.startswith(safe_root): break else: - #no match + # no match raise koji.GenericError("read-write mount point is not safe: %s" % mount) - #normpath should have removed any .. dirs, but just in case... + # normpath should have removed any .. dirs, but just in case... if mount.find('/../') != -1: raise koji.GenericError("read-write mount point is not safe: %s" % mount) @@ -266,7 +267,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler): else: opts = opts.split(',') if 'bind' in opts: - #make sure dir exists + # make sure dir exists if not os.path.isdir(dev): error = koji.GenericError("No such directory or mount: %s" % dev) break @@ -297,7 +298,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler): with open(fn, 'r') as fslog: for line in fslog.readlines(): mounts.add(line.strip()) - #also, check /proc/mounts just in case + # also, check /proc/mounts just in case mounts |= set(scan_mounts(rootdir)) mounts = sorted(mounts) # deeper directories first diff --git a/plugins/hub/runroot_hub.py b/plugins/hub/runroot_hub.py index eacbd686..8dfda89e 100644 --- a/plugins/hub/runroot_hub.py +++ b/plugins/hub/runroot_hub.py @@ -1,4 +1,4 @@ -#koji hub plugin +# koji hub plugin # There is a kojid plugin that goes with this hub plugin. The kojid builder # plugin has a config file. This hub plugin has no config file. @@ -15,7 +15,6 @@ import kojihub from koji.context import context from koji.plugin import export - __all__ = ('runroot',) @@ -41,11 +40,11 @@ def runroot(tagInfo, arch, command, channel=None, **opts): tag = kojihub.get_tag(tagInfo, strict=True) if arch == 'noarch': - #not all arches can generate a proper buildroot for all tags + # not all arches can generate a proper buildroot for all tags if not tag['arches']: raise koji.GenericError('no arches defined for tag %s' % tag['name']) - #get all known arches for the system + # get all known arches for the system fullarches = kojihub.get_all_arches() tagarches = tag['arches'].split() diff --git a/setup.py b/setup.py index bbb7bcc4..a3443bcc 100644 --- a/setup.py +++ b/setup.py @@ -16,9 +16,9 @@ def get_install_requires(): 'requests', 'requests-kerberos', 'six', - #'libcomps', - #'rpm-py-installer', # it is optional feature - #'rpm', + # 'libcomps', + # 'rpm-py-installer', # it is optional feature + # 'rpm', ] if sys.version_info[0] < 3: # optional auth library for older hubs @@ -62,9 +62,9 @@ setup( 'koji_cli_plugins': 'plugins/cli', }, # doesn't make sense, as we have only example config - #data_files=[ - # ('/etc', ['cli/koji.conf']), - #], + # data_files=[ + # ('/etc', ['cli/koji.conf']), + # ], scripts=[ 'cli/koji', 'util/koji-gc', diff --git a/util/kojira b/util/kojira index 23360203..fdf492b1 100755 --- a/util/kojira +++ b/util/kojira @@ -50,7 +50,7 @@ def getTag(session, tag, event=None): if (tag, event) in cache: ts, info = cache[(tag,event)] if now - ts < 600: - #use the cache + # use the cache return info info = session.getTag(tag, event=event) if info: @@ -83,7 +83,7 @@ class ManagedRepo(object): self.first_seen = time.time() if self.current: order = self.session.getFullInheritance(self.tag_id, event=self.event_id) - #order may contain same tag more than once + # order may contain same tag more than once tags = {self.tag_id : 1} for x in order: tags[x['parent_id']] = 1 @@ -156,13 +156,13 @@ class ManagedRepo(object): - timestamp really, really old """ timeout = 36000 - #XXX - config + # XXX - config if self.state != koji.REPO_INIT: return False age = time.time() - max(self.event_ts, self.first_seen) - #the first_seen timestamp is also factored in because a repo can be - #created from an older event and should not be expired based solely on - #that event's timestamp. + # the first_seen timestamp is also factored in because a repo can be + # created from an older event and should not be expired based solely on + # that event's timestamp. return age > timeout def tryDelete(self): @@ -177,8 +177,8 @@ class ManagedRepo(object): lifetime = self.options.deleted_repo_lifetime # (should really be called expired_repo_lifetime) try: - #also check dir age. We do this because a repo can be created from an older event - #and should not be removed based solely on that event's timestamp. + # also check dir age. We do this because a repo can be created from an older event + # and should not be removed based solely on that event's timestamp. mtime = os.stat(path).st_mtime except OSError as e: if e.errno == 2: @@ -200,7 +200,7 @@ class ManagedRepo(object): if self.state != koji.REPO_EXPIRED: raise koji.GenericError("Repo not expired") if self.session.repoDelete(self.repo_id) > 0: - #cannot delete, we are referenced by a buildroot + # cannot delete, we are referenced by a buildroot self.logger.debug("Cannot delete repo %s, still referenced" % self.repo_id) return False self.logger.info("Deleted repo %s" % self.repo_id) @@ -299,9 +299,9 @@ class RepoManager(object): (childpid, status) = os.waitpid(pid, os.WNOHANG) except OSError as e: if e.errno != errno.ECHILD: - #should not happen + # should not happen raise - #otherwise assume the process is gone + # otherwise assume the process is gone self.logger.info("%s: %s" % (prefix, e)) return True if childpid != 0: @@ -345,7 +345,7 @@ class RepoManager(object): repo_id = data['id'] repo = self.repos.get(repo_id) if repo: - #we're already tracking it + # we're already tracking it if repo.state != data['state']: self.logger.info('State changed for repo %s: %s -> %s' %(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']])) @@ -383,7 +383,7 @@ class RepoManager(object): repo.current = False if repo.expire_ts is None: repo.expire_ts = time.time() - #also no point in further checking + # also no point in further checking continue to_check.append(repo) if self.logger.isEnabledFor(logging.DEBUG): @@ -441,7 +441,7 @@ class RepoManager(object): Also, warn about any oddities""" if self.delete_pids: - #skip + # skip return if not os.path.exists(topdir): self.logger.debug("%s doesn't exist, skipping", topdir) @@ -466,14 +466,14 @@ class RepoManager(object): self.logger.debug("%s/%s not an int, skipping", tagdir, repo_id) continue if repo_id in self.repos: - #we're already managing it, no need to deal with it here + # we're already managing it, no need to deal with it here continue repodir = "%s/%s" % (tagdir, repo_id) try: # lstat because it could be link to another volume dirstat = os.lstat(repodir) except OSError: - #just in case something deletes the repo out from under us + # just in case something deletes the repo out from under us self.logger.debug("%s deleted already?!", repodir) continue symlink = False @@ -513,18 +513,18 @@ class RepoManager(object): stats = self.tag_use_stats.get(tag_id) now = time.time() if stats and now - stats['ts'] < 3600: - #use the cache + # use the cache return stats data = self.session.listBuildroots(tagID=tag_id, queryOpts={'order': '-create_event_id', 'limit' : 100}) - #XXX magic number (limit) + # XXX magic number (limit) if data: tag_name = data[0]['tag_name'] else: tag_name = "#%i" % tag_id stats = {'data': data, 'ts': now, 'tag_name': tag_name} recent = [x for x in data if now - x['create_ts'] < 3600 * 24] - #XXX magic number + # XXX magic number stats ['n_recent'] = len(recent) self.tag_use_stats[tag_id] = stats self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent))) @@ -593,7 +593,7 @@ class RepoManager(object): if n_deletes >= self.options.delete_batch_size: break if repo.expired(): - #try to delete + # try to delete if repo.tryDelete(): n_deletes += 1 del self.repos[repo.repo_id] @@ -652,7 +652,7 @@ class RepoManager(object): t['build_tag'] for t in self.session.getBuildTargets() if not koji.util.multi_fnmatch(t['build_tag_name'], ignore) ]) - #index repos by tag + # index repos by tag tag_repos = {} for repo in to_list(self.repos.values()): tag_repos.setdefault(repo.tag_id, []).append(repo) @@ -931,7 +931,7 @@ def get_options(): 'repo_tasks_limit' : 10, 'delete_batch_size' : 3, 'deleted_repo_lifetime': 7*24*3600, - #XXX should really be called expired_repo_lifetime + # XXX should really be called expired_repo_lifetime 'dist_repo_lifetime': 7*24*3600, 'recent_tasks_lifetime': 600, 'sleeptime' : 15, @@ -1003,7 +1003,7 @@ if __name__ == "__main__": sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile) sys.exit(1) koji.add_file_logger("koji", options.logfile) - #note we're setting logging for koji.* + # note we're setting logging for koji.* logger = logging.getLogger("koji") if options.debug: logger.setLevel(logging.DEBUG) @@ -1024,7 +1024,7 @@ if __name__ == "__main__": session.login() elif koji.krbV and options.principal and options.keytab: session.krb_login(options.principal, options.keytab, options.ccache) - #get an exclusive session + # get an exclusive session try: session.exclusiveSession(force=options.force_lock) except koji.AuthLockError: diff --git a/vm/fix_kojikamid.sh b/vm/fix_kojikamid.sh index 5b0f5c17..f0063a72 100755 --- a/vm/fix_kojikamid.sh +++ b/vm/fix_kojikamid.sh @@ -1,10 +1,10 @@ #!/bin/bash -awk '/^## INSERT kojikamid dup/ {exit} {print $0}' kojikamid.py +awk '/^# INSERT kojikamid dup #/ {exit} {print $0}' kojikamid.py for fn in ../koji/__init__.py ../koji/daemon.py do - awk '/^## END kojikamid dup/ {p=0} p {print $0} /^## BEGIN kojikamid dup/ {p=1}' $fn + awk '/^# END kojikamid dup #/ {p=0} p {print $0} /^# BEGIN kojikamid dup #/ {p=1}' $fn done -awk 'p {print $0} /^## INSERT kojikamid dup/ {p=1}' kojikamid.py +awk 'p {print $0} /^# INSERT kojikamid dup #/ {p=1}' kojikamid.py diff --git a/vm/kojikamid.py b/vm/kojikamid.py index 311b2e60..0911a1a9 100755 --- a/vm/kojikamid.py +++ b/vm/kojikamid.py @@ -54,12 +54,12 @@ MANAGER_PORT = 7000 KOJIKAMID = True -## INSERT kojikamid dup +# INSERT kojikamid dup # class fakemodule(object): pass -#make parts of the above insert accessible as koji.X +# make parts of the above insert accessible as koji.X koji = fakemodule() koji.GenericError = GenericError # noqa: F821 koji.BuildError = BuildError # noqa: F821 @@ -68,7 +68,7 @@ def encode_int(n): """If n is too large for a 32bit signed, convert it to a string""" if n <= 2147483647: return n - #else + # else return str(n) class WindowsBuild(object): diff --git a/vm/kojivmd b/vm/kojivmd index c0fa3ff6..983885f2 100755 --- a/vm/kojivmd +++ b/vm/kojivmd @@ -101,7 +101,7 @@ def get_options(): if args: parser.error("incorrect number of arguments") - #not reached + # not reached assert False # pragma: no cover # load local config @@ -176,7 +176,7 @@ def get_options(): if os.path.exists(fn): setattr(options, name, fn) - #make sure workdir exists + # make sure workdir exists if not os.path.exists(options.workdir): koji.ensuredir(options.workdir) @@ -198,7 +198,7 @@ def main(options, session): tm = VMTaskManager(options, session) tm.findHandlers(globals()) if options.plugin: - #load plugins + # load plugins pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':')) for name in options.plugin: logger.info('Loading plugin: %s', name) @@ -1084,7 +1084,7 @@ class VMTaskManager(TaskManager): if __name__ == "__main__": koji.add_file_logger("koji", "/var/log/kojivmd.log") - #note we're setting logging params for all of koji* + # note we're setting logging params for all of koji* options = get_options() if options.debug: logging.getLogger("koji").setLevel(logging.DEBUG) @@ -1097,7 +1097,7 @@ if __name__ == "__main__": if options.admin_emails: koji.add_mail_logger("koji", options.admin_emails) - #start a session and login + # start a session and login session_opts = koji.grab_session_options(options) session = koji.ClientSession(options.server, session_opts) if options.cert and os.path.isfile(options.cert): @@ -1131,14 +1131,14 @@ if __name__ == "__main__": quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1]) else: quit("No username/password supplied and Kerberos missing or not configured") - #make session exclusive + # make session exclusive try: session.exclusiveSession(force=options.force_lock) except koji.AuthLockError: quit("Error: Unable to get lock. Trying using --force-lock") if not session.logged_in: quit("Error: Unknown login error") - #make sure it works + # make sure it works try: ret = session.echo("OK") except requests.exceptions.ConnectionError: @@ -1148,7 +1148,7 @@ if __name__ == "__main__": # run main if options.daemon: - #detach + # detach koji.daemonize() main(options, session) elif not options.skip_main: diff --git a/www/kojiweb/index.py b/www/kojiweb/index.py index 07f911e1..19414bf0 100644 --- a/www/kojiweb/index.py +++ b/www/kojiweb/index.py @@ -45,7 +45,7 @@ from kojiweb.util import _genHTML, _getValidTokens, _initValues # Convenience definition of a commonly-used sort function _sortbyname = lambda x: x['name'] -#loggers +# loggers authlogger = logging.getLogger('koji.auth') def _setUserCookie(environ, user): @@ -790,7 +790,7 @@ def getfile(environ, taskID, name, volume='DEFAULT', offset=None, size=None): if size > (file_size - offset): size = file_size - offset - #environ['koji.headers'].append(['Content-Length', str(size)]) + # environ['koji.headers'].append(['Content-Length', str(size)]) return _chunk_file(server, environ, taskID, name, offset, size, volume) diff --git a/www/kojiweb/wsgi_publisher.py b/www/kojiweb/wsgi_publisher.py index 7f15a559..50e34503 100644 --- a/www/kojiweb/wsgi_publisher.py +++ b/www/kojiweb/wsgi_publisher.py @@ -44,7 +44,7 @@ class URLNotFound(ServerError): class Dispatcher(object): def __init__(self): - #we can't do much setup until we get a request + # we can't do much setup until we get a request self.firstcall = True self.options = {} self.startup_error = None @@ -66,7 +66,7 @@ class Dispatcher(object): self.logger = logging.getLogger("koji.web") cfgmap = [ - #option, type, default + # option, type, default ['SiteName', 'string', None], ['KojiHubURL', 'string', 'http://localhost/kojihub'], ['KojiFilesURL', 'string', 'http://localhost/kojifiles'], @@ -156,7 +156,7 @@ class Dispatcher(object): def setup_logging2(self, environ): """Adjust logging based on configuration options""" opts = self.options - #determine log level + # determine log level level = opts['LogLevel'] valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') # the config value can be a single level name or a series of @@ -172,7 +172,7 @@ class Dispatcher(object): default = level if level not in valid_levels: raise koji.GenericError("Invalid log level: %s" % level) - #all our loggers start with koji + # all our loggers start with koji if name == '': name = 'koji' default = level @@ -187,7 +187,7 @@ class Dispatcher(object): if opts.get('KojiDebug'): logger.setLevel(logging.DEBUG) elif default is None: - #LogLevel did not configure a default level + # LogLevel did not configure a default level logger.setLevel(logging.WARNING) self.formatter = HubFormatter(opts['LogFormat']) self.formatter.environ = environ @@ -213,7 +213,7 @@ class Dispatcher(object): def prep_handler(self, environ): path_info = environ['PATH_INFO'] if not path_info: - #empty path info (no trailing slash) breaks our relative urls + # empty path info (no trailing slash) breaks our relative urls environ['koji.redirect'] = environ['REQUEST_URI'] + '/' raise ServerRedirect elif path_info == '/': @@ -225,7 +225,7 @@ class Dispatcher(object): func = self.handler_index.get(method) if not func: raise URLNotFound - #parse form args + # parse form args data = {} fs = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ.copy(), keep_blank_values=True) for field in fs.list: @@ -245,7 +245,7 @@ class Dispatcher(object): if not varkw: # remove any unexpected args data = dslice(data, args, strict=False) - #TODO (warning in header or something?) + # TODO (warning in header or something?) return func, data @@ -318,7 +318,7 @@ class Dispatcher(object): except (NameError, AttributeError): tb_str = ''.join(traceback.format_exception(*sys.exc_info())) self.logger.error(tb_str) - #fallback to simple error page + # fallback to simple error page return self.simple_error_page(message, err=tb_short) values = _initValues(environ, *desc) values['etype'] = etype diff --git a/www/lib/kojiweb/util.py b/www/lib/kojiweb/util.py index a318126d..f7709d8a 100644 --- a/www/lib/kojiweb/util.py +++ b/www/lib/kojiweb/util.py @@ -26,7 +26,7 @@ import hashlib import os import ssl import stat -#a bunch of exception classes that explainError needs +# a bunch of exception classes that explainError needs from socket import error as socket_error from xml.parsers.expat import ExpatError