flake8: apply E2 rules except E266
This commit is contained in:
parent
ce1f9928af
commit
05340b146b
28 changed files with 707 additions and 706 deletions
3
.flake8
3
.flake8
|
|
@ -1,5 +1,6 @@
|
|||
[flake8]
|
||||
select = I,C,F,E1,E265
|
||||
select = I,C,F,E1,E2
|
||||
ignore = E266
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__,
|
||||
|
|
|
|||
262
builder/kojid
262
builder/kojid
|
|
@ -105,7 +105,7 @@ try:
|
|||
import pykickstart.parser as ksparser
|
||||
import pykickstart.handlers.control as kscontrol
|
||||
import pykickstart.errors as kserrors
|
||||
import iso9660 # from pycdio
|
||||
import iso9660 # from pycdio
|
||||
image_enabled = True
|
||||
except ImportError: # pragma: no cover
|
||||
image_enabled = False
|
||||
|
|
@ -145,15 +145,15 @@ def main(options, session):
|
|||
def restart(*args):
|
||||
logger.warn("Initiating graceful restart")
|
||||
tm.restart_pending = True
|
||||
signal.signal(signal.SIGTERM,shutdown)
|
||||
signal.signal(signal.SIGUSR1,restart)
|
||||
signal.signal(signal.SIGTERM, shutdown)
|
||||
signal.signal(signal.SIGUSR1, restart)
|
||||
while 1:
|
||||
try:
|
||||
taken = False
|
||||
tm.updateBuildroots()
|
||||
tm.updateTasks()
|
||||
taken = tm.getNextTask()
|
||||
except (SystemExit,ServerExit,KeyboardInterrupt):
|
||||
except (SystemExit, ServerExit, KeyboardInterrupt):
|
||||
logger.warn("Exiting")
|
||||
break
|
||||
except ServerRestart:
|
||||
|
|
@ -174,7 +174,7 @@ def main(options, session):
|
|||
# The load-balancing code in getNextTask() will prevent a single builder
|
||||
# from getting overloaded.
|
||||
time.sleep(options.sleeptime)
|
||||
except (SystemExit,KeyboardInterrupt):
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
logger.warn("Exiting")
|
||||
break
|
||||
logger.warn("Shutting down, please wait...")
|
||||
|
|
@ -185,15 +185,15 @@ def main(options, session):
|
|||
|
||||
class BuildRoot(object):
|
||||
|
||||
def __init__(self,session,options,*args,**kwargs):
|
||||
def __init__(self, session, options, *args, **kwargs):
|
||||
self.logger = logging.getLogger("koji.build.buildroot")
|
||||
self.session = session
|
||||
self.options = options
|
||||
if len(args) + len(kwargs) == 1:
|
||||
# manage an existing mock buildroot
|
||||
self._load(*args,**kwargs)
|
||||
self._load(*args, **kwargs)
|
||||
else:
|
||||
self._new(*args,**kwargs)
|
||||
self._new(*args, **kwargs)
|
||||
|
||||
def _load(self, data):
|
||||
# manage an existing buildroot
|
||||
|
|
@ -263,7 +263,7 @@ class BuildRoot(object):
|
|||
def _writeMockConfig(self):
|
||||
# mock config
|
||||
configdir = '/etc/mock/koji'
|
||||
configfile = "%s/%s.cfg" % (configdir,self.name)
|
||||
configfile = "%s/%s.cfg" % (configdir, self.name)
|
||||
self.mockcfg = "koji/%s" % self.name
|
||||
|
||||
opts = {}
|
||||
|
|
@ -296,7 +296,7 @@ class BuildRoot(object):
|
|||
output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts)
|
||||
|
||||
# write config
|
||||
with open(configfile,'w') as fo:
|
||||
with open(configfile, 'w') as fo:
|
||||
fo.write(output)
|
||||
|
||||
def _repositoryEntries(self, pi, plugin=False):
|
||||
|
|
@ -400,7 +400,7 @@ class BuildRoot(object):
|
|||
|
||||
def mock(self, args):
|
||||
"""Run mock"""
|
||||
mockpath = getattr(self.options,"mockpath","/usr/bin/mock")
|
||||
mockpath = getattr(self.options, "mockpath", "/usr/bin/mock")
|
||||
cmd = [mockpath, "-r", self.mockcfg]
|
||||
# if self.options.debug_mock:
|
||||
# cmd.append('--debug')
|
||||
|
|
@ -520,13 +520,13 @@ class BuildRoot(object):
|
|||
fd = os.open(outfile, flags, 0o666)
|
||||
os.dup2(fd, 1)
|
||||
os.dup2(fd, 2)
|
||||
if os.getuid() == 0 and hasattr(self.options,"mockuser"):
|
||||
if os.getuid() == 0 and hasattr(self.options, "mockuser"):
|
||||
self.logger.info('Running mock as %s' % self.options.mockuser)
|
||||
uid,gid = pwd.getpwnam(self.options.mockuser)[2:4]
|
||||
uid, gid = pwd.getpwnam(self.options.mockuser)[2:4]
|
||||
os.setgroups([grp.getgrnam('mock')[2]])
|
||||
os.setregid(gid,gid)
|
||||
os.setreuid(uid,uid)
|
||||
os.execvp(cmd[0],cmd)
|
||||
os.setregid(gid, gid)
|
||||
os.setreuid(uid, uid)
|
||||
os.execvp(cmd[0], cmd)
|
||||
except:
|
||||
# diediedie
|
||||
print("Failed to exec mock")
|
||||
|
|
@ -546,7 +546,7 @@ class BuildRoot(object):
|
|||
raise koji.BuildrootError("could not init mock buildroot, %s" % self._mockResult(rv))
|
||||
# log kernel version
|
||||
self.mock(['--chroot', 'uname -r'])
|
||||
self.session.host.setBuildRootList(self.id,self.getPackageList())
|
||||
self.session.host.setBuildRootList(self.id, self.getPackageList())
|
||||
|
||||
def _mockResult(self, rv, logfile=None):
|
||||
if logfile:
|
||||
|
|
@ -559,7 +559,7 @@ class BuildRoot(object):
|
|||
return parseStatus(rv, 'mock') + msg
|
||||
|
||||
def rebuild_srpm(self, srpm):
|
||||
self.session.host.setBuildRootState(self.id,'BUILDING')
|
||||
self.session.host.setBuildRootState(self.id, 'BUILDING')
|
||||
|
||||
# unpack SRPM to tempdir
|
||||
srpm_dir = os.path.join(self.tmpdir(), 'srpm_unpacked')
|
||||
|
|
@ -592,7 +592,7 @@ class BuildRoot(object):
|
|||
|
||||
|
||||
def build_srpm(self, specfile, sourcedir, source_cmd):
|
||||
self.session.host.setBuildRootState(self.id,'BUILDING')
|
||||
self.session.host.setBuildRootState(self.id, 'BUILDING')
|
||||
if source_cmd:
|
||||
# call the command defined by source_cmd in the chroot so any required files not stored in
|
||||
# the SCM can be retrieved
|
||||
|
|
@ -619,16 +619,16 @@ class BuildRoot(object):
|
|||
self.expire()
|
||||
raise koji.BuildError("error building srpm, %s" % self._mockResult(rv))
|
||||
|
||||
def build(self,srpm,arch=None):
|
||||
def build(self, srpm, arch=None):
|
||||
# run build
|
||||
self.session.host.setBuildRootState(self.id,'BUILDING')
|
||||
self.session.host.setBuildRootState(self.id, 'BUILDING')
|
||||
args = ['--no-clean']
|
||||
if arch:
|
||||
args.extend(['--target', arch])
|
||||
args.extend(['--rebuild', srpm])
|
||||
rv = self.mock(args)
|
||||
|
||||
self.session.host.updateBuildRootList(self.id,self.getPackageList())
|
||||
self.session.host.updateBuildRootList(self.id, self.getPackageList())
|
||||
if rv:
|
||||
self.expire()
|
||||
raise koji.BuildError("error building package (arch %s), %s" % (arch, self._mockResult(rv)))
|
||||
|
|
@ -759,7 +759,7 @@ class BuildRoot(object):
|
|||
pathinfo = koji.PathInfo(topdir='')
|
||||
|
||||
repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name'])
|
||||
opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])
|
||||
opts = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
|
||||
opts['tempdir'] = self.options.workdir
|
||||
|
||||
# prefer librepo
|
||||
|
|
@ -792,8 +792,8 @@ class BuildRoot(object):
|
|||
repodata = repoMDObject.RepoMD('ourrepo', fo)
|
||||
except:
|
||||
raise koji.BuildError("Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch))
|
||||
data = repodata.getData('origin')
|
||||
pkgorigins = data.location[1]
|
||||
data = repodata.getData('origin')
|
||||
pkgorigins = data.location[1]
|
||||
else:
|
||||
# shouldn't occur
|
||||
raise koji.GenericError("install librepo or yum")
|
||||
|
|
@ -808,11 +808,11 @@ class BuildRoot(object):
|
|||
if six.PY3:
|
||||
fo2 = io.TextIOWrapper(fo2, encoding='utf-8')
|
||||
for line in fo2:
|
||||
parts=line.split(None, 2)
|
||||
parts = line.split(None, 2)
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
# first field is formated by yum as [e:]n-v-r.a
|
||||
nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0])
|
||||
nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0])
|
||||
origin_idx[nvra] = parts[1]
|
||||
fo2.close()
|
||||
# mergerepo starts from a local repo in the task workdir, so internal
|
||||
|
|
@ -872,7 +872,7 @@ class BuildRoot(object):
|
|||
return "%s%s" % (self.rootdir(), base)
|
||||
|
||||
def expire(self):
|
||||
self.session.host.setBuildRootState(self.id,'EXPIRED')
|
||||
self.session.host.setBuildRootState(self.id, 'EXPIRED')
|
||||
|
||||
|
||||
class ChainBuildTask(BaseTaskHandler):
|
||||
|
|
@ -984,16 +984,16 @@ class BuildTask(BaseTaskHandler):
|
|||
dest_tag = taginfo['id']
|
||||
# policy checks...
|
||||
policy_data = {
|
||||
'user_id' : task_info['owner'],
|
||||
'source' : src,
|
||||
'task_id' : self.id,
|
||||
'build_tag' : build_tag, #id
|
||||
'skip_tag' : bool(self.opts.get('skip_tag')),
|
||||
'user_id': task_info['owner'],
|
||||
'source': src,
|
||||
'task_id': self.id,
|
||||
'build_tag': build_tag, # id
|
||||
'skip_tag': bool(self.opts.get('skip_tag')),
|
||||
}
|
||||
if target_info:
|
||||
policy_data['target'] = target_info['id'],
|
||||
if not self.opts.get('skip_tag'):
|
||||
policy_data['tag'] = dest_tag #id
|
||||
policy_data['tag'] = dest_tag # id
|
||||
if not SCM.is_scm_url(src) and not opts.get('scratch'):
|
||||
# let hub policy decide
|
||||
self.session.host.assertPolicy('build_from_srpm', policy_data)
|
||||
|
|
@ -1002,11 +1002,11 @@ class BuildTask(BaseTaskHandler):
|
|||
self.session.host.assertPolicy('build_from_repo_id', policy_data)
|
||||
if not repo_info:
|
||||
repo_info = self.getRepo(build_tag, builds=opts.get('wait_builds'),
|
||||
wait=opts.get('wait_repo')) #(subtask)
|
||||
wait=opts.get('wait_repo')) # (subtask)
|
||||
self.event_id = self.session.getLastEvent()['id']
|
||||
srpm = self.getSRPM(src, build_tag, repo_info['id'])
|
||||
h = self.readSRPMHeader(srpm)
|
||||
data = koji.get_header_fields(h, ['name','version','release','epoch'])
|
||||
data = koji.get_header_fields(h, ['name', 'version', 'release', 'epoch'])
|
||||
data['task_id'] = self.id
|
||||
if getattr(self, 'source', False):
|
||||
data['source'] = self.source['source']
|
||||
|
|
@ -1014,7 +1014,7 @@ class BuildTask(BaseTaskHandler):
|
|||
|
||||
extra_arches = None
|
||||
self.logger.info("Reading package config for %(name)s" % data)
|
||||
pkg_cfg = self.session.getPackageConfig(dest_tag,data['name'],event=self.event_id)
|
||||
pkg_cfg = self.session.getPackageConfig(dest_tag, data['name'], event=self.event_id)
|
||||
self.logger.debug("%r" % pkg_cfg)
|
||||
if pkg_cfg is not None:
|
||||
extra_arches = pkg_cfg.get('extra_arches')
|
||||
|
|
@ -1036,16 +1036,16 @@ class BuildTask(BaseTaskHandler):
|
|||
failany = (self.opts.get('fail_fast', False)
|
||||
or not getattr(self.options, 'build_arch_can_fail', False))
|
||||
try:
|
||||
self.extra_information = { "src": src, "data": data, "target": target }
|
||||
srpm,rpms,brmap,logs = self.runBuilds(srpm, build_tag, archlist,
|
||||
repo_info['id'], failany=failany)
|
||||
self.extra_information = {"src": src, "data": data, "target": target}
|
||||
srpm, rpms, brmap, logs = self.runBuilds(srpm, build_tag, archlist,
|
||||
repo_info['id'], failany=failany)
|
||||
|
||||
if opts.get('scratch'):
|
||||
# scratch builds do not get imported
|
||||
self.session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs)
|
||||
self.session.host.moveBuildToScratch(self.id, srpm, rpms, logs=logs)
|
||||
else:
|
||||
self.session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs)
|
||||
except (SystemExit,ServerExit,KeyboardInterrupt):
|
||||
self.session.host.completeBuild(self.id, build_id, srpm, rpms, brmap, logs=logs)
|
||||
except (SystemExit, ServerExit, KeyboardInterrupt):
|
||||
# we do not trap these
|
||||
raise
|
||||
except:
|
||||
|
|
@ -1055,11 +1055,11 @@ class BuildTask(BaseTaskHandler):
|
|||
# reraise the exception
|
||||
raise
|
||||
if not self.opts.get('skip_tag') and not self.opts.get('scratch'):
|
||||
self.tagBuild(build_id,dest_tag)
|
||||
self.tagBuild(build_id, dest_tag)
|
||||
|
||||
def getSRPM(self, src, build_tag, repo_id):
|
||||
"""Get srpm from src"""
|
||||
if isinstance(src,str):
|
||||
if isinstance(src, str):
|
||||
if SCM.is_scm_url(src):
|
||||
return self.getSRPMFromSCM(src, build_tag, repo_id)
|
||||
else:
|
||||
|
|
@ -1107,7 +1107,7 @@ class BuildTask(BaseTaskHandler):
|
|||
# srpm arg should be a path relative to <BASEDIR>/work
|
||||
self.logger.debug("Reading SRPM")
|
||||
relpath = "work/%s" % srpm
|
||||
opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])
|
||||
opts = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
|
||||
opts['tempdir'] = self.workdir
|
||||
with koji.openRemoteFile(relpath, **opts) as fo:
|
||||
koji.check_rpm_file(fo)
|
||||
|
|
@ -1127,7 +1127,7 @@ class BuildTask(BaseTaskHandler):
|
|||
self.logger.debug('arches: %s' % arches)
|
||||
if extra:
|
||||
self.logger.debug('Got extra arches: %s' % extra)
|
||||
arches = "%s %s" % (arches,extra)
|
||||
arches = "%s %s" % (arches, extra)
|
||||
archlist = arches.split()
|
||||
self.logger.debug('base archlist: %r' % archlist)
|
||||
# - adjust arch list based on srpm macros
|
||||
|
|
@ -1138,14 +1138,14 @@ class BuildTask(BaseTaskHandler):
|
|||
archlist = buildarchs
|
||||
self.logger.debug('archlist after buildarchs: %r' % archlist)
|
||||
if exclusivearch:
|
||||
archlist = [ a for a in archlist if a in exclusivearch ]
|
||||
archlist = [a for a in archlist if a in exclusivearch]
|
||||
self.logger.debug('archlist after exclusivearch: %r' % archlist)
|
||||
if excludearch:
|
||||
archlist = [ a for a in archlist if a not in excludearch ]
|
||||
archlist = [a for a in archlist if a not in excludearch]
|
||||
self.logger.debug('archlist after excludearch: %r' % archlist)
|
||||
# noarch is funny
|
||||
if 'noarch' not in excludearch and \
|
||||
( 'noarch' in buildarchs or 'noarch' in exclusivearch ):
|
||||
('noarch' in buildarchs or 'noarch' in exclusivearch):
|
||||
archlist.append('noarch')
|
||||
override = self.opts.get('arch_override')
|
||||
if self.opts.get('scratch') and override:
|
||||
|
|
@ -1187,9 +1187,9 @@ class BuildTask(BaseTaskHandler):
|
|||
excludearch = [koji.canonArch(a) for a in excludearch]
|
||||
archlist = list(tag_arches)
|
||||
if exclusivearch:
|
||||
archlist = [ a for a in archlist if a in exclusivearch ]
|
||||
archlist = [a for a in archlist if a in exclusivearch]
|
||||
if excludearch:
|
||||
archlist = [ a for a in archlist if a not in excludearch ]
|
||||
archlist = [a for a in archlist if a not in excludearch]
|
||||
if not archlist:
|
||||
raise koji.BuildError("No valid arches were found. tag %r, "
|
||||
"exclusive %r, exclude %r" % (tag_arches,
|
||||
|
|
@ -1231,13 +1231,13 @@ class BuildTask(BaseTaskHandler):
|
|||
built_srpm = None
|
||||
for (arch, task_id) in six.iteritems(subtasks):
|
||||
result = results[task_id]
|
||||
self.logger.debug("DEBUG: %r : %r " % (arch,result,))
|
||||
self.logger.debug("DEBUG: %r : %r " % (arch, result,))
|
||||
brootid = result['brootid']
|
||||
for fn in result['rpms']:
|
||||
rpms.append(fn)
|
||||
brmap[fn] = brootid
|
||||
for fn in result['logs']:
|
||||
logs.setdefault(arch,[]).append(fn)
|
||||
logs.setdefault(arch, []).append(fn)
|
||||
if result['srpms']:
|
||||
if built_srpm:
|
||||
raise koji.BuildError("multiple builds returned a srpm. task %i" % self.id)
|
||||
|
|
@ -1249,14 +1249,14 @@ class BuildTask(BaseTaskHandler):
|
|||
else:
|
||||
raise koji.BuildError("could not find a built srpm")
|
||||
|
||||
return srpm,rpms,brmap,logs
|
||||
return srpm, rpms, brmap, logs
|
||||
|
||||
def tagBuild(self,build_id,dest_tag):
|
||||
def tagBuild(self, build_id, dest_tag):
|
||||
# XXX - need options to skip tagging and to force tagging
|
||||
# create the tagBuild subtask
|
||||
# this will handle the "post tests"
|
||||
task_id = self.session.host.subtask(method='tagBuild',
|
||||
arglist=[dest_tag,build_id,False,None,True],
|
||||
arglist=[dest_tag, build_id, False, None, True],
|
||||
label='tag',
|
||||
parent=self.id,
|
||||
arch='noarch')
|
||||
|
|
@ -1377,7 +1377,7 @@ class BuildArchTask(BaseBuildTask):
|
|||
|
||||
# run build
|
||||
self.logger.debug("Running build")
|
||||
broot.build(fn,arch)
|
||||
broot.build(fn, arch)
|
||||
|
||||
# extract results
|
||||
resultdir = broot.resultdir()
|
||||
|
|
@ -1424,7 +1424,7 @@ class BuildArchTask(BaseBuildTask):
|
|||
# upload files to storage server
|
||||
uploadpath = broot.getUploadPath()
|
||||
for f in rpm_files:
|
||||
self.uploadFile("%s/%s" % (resultdir,f))
|
||||
self.uploadFile("%s/%s" % (resultdir, f))
|
||||
self.logger.debug("keep srpm %i %s %s" % (self.id, keep_srpm, opts))
|
||||
if keep_srpm:
|
||||
if len(srpm_files) == 0:
|
||||
|
|
@ -1433,19 +1433,19 @@ class BuildArchTask(BaseBuildTask):
|
|||
raise koji.BuildError("multiple srpm files found for task %i: %s" % (self.id, srpm_files))
|
||||
|
||||
# Run sanity checks. Any failures will throw a BuildError
|
||||
self.srpm_sanity_checks("%s/%s" % (resultdir,srpm_files[0]))
|
||||
self.srpm_sanity_checks("%s/%s" % (resultdir, srpm_files[0]))
|
||||
|
||||
self.logger.debug("uploading %s/%s to %s" % (resultdir,srpm_files[0], uploadpath))
|
||||
self.uploadFile("%s/%s" % (resultdir,srpm_files[0]))
|
||||
self.logger.debug("uploading %s/%s to %s" % (resultdir, srpm_files[0], uploadpath))
|
||||
self.uploadFile("%s/%s" % (resultdir, srpm_files[0]))
|
||||
if rpm_files:
|
||||
ret['rpms'] = [ "%s/%s" % (uploadpath,f) for f in rpm_files ]
|
||||
ret['rpms'] = ["%s/%s" % (uploadpath, f) for f in rpm_files]
|
||||
else:
|
||||
ret['rpms'] = []
|
||||
if keep_srpm:
|
||||
ret['srpms'] = [ "%s/%s" % (uploadpath,f) for f in srpm_files ]
|
||||
ret['srpms'] = ["%s/%s" % (uploadpath, f) for f in srpm_files]
|
||||
else:
|
||||
ret['srpms'] = []
|
||||
ret['logs'] = [ "%s/%s" % (uploadpath,f) for f in log_files ]
|
||||
ret['logs'] = ["%s/%s" % (uploadpath, f) for f in log_files]
|
||||
if rpmdiff_hash[self.id]:
|
||||
self.uploadFile(noarch_hash_path)
|
||||
|
||||
|
|
@ -1563,7 +1563,7 @@ class BuildMavenTask(BaseBuildTask):
|
|||
st = os.lstat(filepath)
|
||||
mtime = time.localtime(st.st_mtime)
|
||||
info = zipfile.ZipInfo(filepath[roottrim:])
|
||||
info.external_attr |= 0o120000 << 16 # symlink file type
|
||||
info.external_attr |= 0o120000 << 16 # symlink file type
|
||||
info.compress_type = zipfile.ZIP_STORED
|
||||
info.date_time = mtime[:6]
|
||||
zfo.writestr(info, content)
|
||||
|
|
@ -2345,7 +2345,7 @@ class TagBuildTask(BaseTaskHandler):
|
|||
# computationally expensive 'post' tests.
|
||||
|
||||
# XXX - add more post tests
|
||||
self.session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag)
|
||||
self.session.host.tagBuild(self.id, tag_id, build_id, force=force, fromtag=fromtag)
|
||||
self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success)
|
||||
except Exception as e:
|
||||
exctype, value = sys.exc_info()[:2]
|
||||
|
|
@ -2478,7 +2478,7 @@ class BuildBaseImageTask(BuildImageTask):
|
|||
self.session.host.completeImageBuild(self.id, bld_info['id'],
|
||||
results)
|
||||
|
||||
except (SystemExit,ServerExit,KeyboardInterrupt):
|
||||
except (SystemExit, ServerExit, KeyboardInterrupt):
|
||||
# we do not trap these
|
||||
raise
|
||||
except:
|
||||
|
|
@ -2564,7 +2564,7 @@ class BuildApplianceTask(BuildImageTask):
|
|||
else:
|
||||
self.session.host.moveImageBuildToScratch(self.id, results)
|
||||
|
||||
except (SystemExit,ServerExit,KeyboardInterrupt):
|
||||
except (SystemExit, ServerExit, KeyboardInterrupt):
|
||||
# we do not trap these
|
||||
raise
|
||||
except:
|
||||
|
|
@ -2648,7 +2648,7 @@ class BuildLiveCDTask(BuildImageTask):
|
|||
else:
|
||||
self.session.host.moveImageBuildToScratch(self.id, results)
|
||||
|
||||
except (SystemExit,ServerExit,KeyboardInterrupt):
|
||||
except (SystemExit, ServerExit, KeyboardInterrupt):
|
||||
# we do not trap these
|
||||
raise
|
||||
except:
|
||||
|
|
@ -2822,7 +2822,7 @@ class ImageTask(BaseTaskHandler):
|
|||
Methods = []
|
||||
# default to bind mounting /dev, but allow subclasses to change
|
||||
# this
|
||||
bind_opts = {'dirs' : {'/dev' : '/dev',}}
|
||||
bind_opts = {'dirs': {'/dev': '/dev', }}
|
||||
|
||||
def makeImgBuildRoot(self, buildtag, repoinfo, arch, inst_group):
|
||||
"""
|
||||
|
|
@ -2887,8 +2887,8 @@ class ImageTask(BaseTaskHandler):
|
|||
else:
|
||||
kspath = self.localPath("work/%s" % ksfile)
|
||||
|
||||
self.uploadFile(kspath) # upload the original ks file
|
||||
return kspath # full absolute path to the file in the chroot
|
||||
self.uploadFile(kspath) # upload the original ks file
|
||||
return kspath # full absolute path to the file in the chroot
|
||||
|
||||
def readKickstart(self, kspath, opts):
|
||||
"""
|
||||
|
|
@ -2941,7 +2941,7 @@ class ImageTask(BaseTaskHandler):
|
|||
# in the kickstart file. If --repo wasn't specified, then we use the
|
||||
# repo associated with the target passed in initially.
|
||||
repo_class = kscontrol.dataMap[self.ks.version]['RepoData']
|
||||
self.ks.handler.repo.repoList = [] # delete whatever the ks file told us
|
||||
self.ks.handler.repo.repoList = [] # delete whatever the ks file told us
|
||||
if opts.get('repo'):
|
||||
user_repos = opts['repo']
|
||||
if isinstance(user_repos, six.string_types):
|
||||
|
|
@ -2973,7 +2973,7 @@ class ImageTask(BaseTaskHandler):
|
|||
if not os.path.exists(kskoji):
|
||||
raise koji.LiveCDError("KS file missing: %s" % kskoji)
|
||||
self.uploadFile(kskoji)
|
||||
return broot.path_without_to_within(kskoji) # absolute path within chroot
|
||||
return broot.path_without_to_within(kskoji) # absolute path within chroot
|
||||
|
||||
def getImagePackages(self, cachepath):
|
||||
"""
|
||||
|
|
@ -3281,7 +3281,7 @@ class LiveCDTask(ImageTask):
|
|||
if not opts.get('scratch'):
|
||||
hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
|
||||
cachedir[1:]))
|
||||
imgdata ['rpmlist'] = hdrlist
|
||||
imgdata['rpmlist'] = hdrlist
|
||||
broot.markExternalRPMs(hdrlist)
|
||||
|
||||
broot.expire()
|
||||
|
|
@ -3432,7 +3432,7 @@ class LiveMediaTask(ImageTask):
|
|||
'--iso-only',
|
||||
])
|
||||
|
||||
isoname='%s-%s-%s-%s.iso' % (name, arch, version, release)
|
||||
isoname = '%s-%s-%s-%s.iso' % (name, arch, version, release)
|
||||
cmd.extend(['--iso-name', isoname,
|
||||
'--releasever', version,
|
||||
])
|
||||
|
|
@ -3514,7 +3514,7 @@ class LiveMediaTask(ImageTask):
|
|||
# (getImagePackages doesn't work here)
|
||||
# hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
|
||||
# cachedir[1:]))
|
||||
imgdata ['rpmlist'] = []
|
||||
imgdata['rpmlist'] = []
|
||||
# broot.markExternalRPMs(hdrlist)
|
||||
|
||||
broot.expire()
|
||||
|
|
@ -3557,15 +3557,15 @@ class OzImageTask(BaseTaskHandler):
|
|||
srcdir=scmsrcdir)
|
||||
kspath = os.path.join(scmsrcdir, os.path.basename(ksfile))
|
||||
else:
|
||||
tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])
|
||||
tops = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
|
||||
tops['tempdir'] = self.workdir
|
||||
with koji.openRemoteFile(ksfile, **tops) as ks_src:
|
||||
kspath = os.path.join(self.workdir, os.path.basename(ksfile))
|
||||
with open(kspath, 'wb') as ks_dest:
|
||||
ks_dest.write(ks_src.read())
|
||||
self.logger.debug('uploading kickstart from here: %s' % kspath)
|
||||
self.uploadFile(kspath) # upload the original ks file
|
||||
return kspath # absolute path to the ks file
|
||||
self.uploadFile(kspath) # upload the original ks file
|
||||
return kspath # absolute path to the ks file
|
||||
|
||||
def readKickstart(self, kspath):
|
||||
"""
|
||||
|
|
@ -3611,7 +3611,7 @@ class OzImageTask(BaseTaskHandler):
|
|||
# url with --repo, then we substitute that in for the repo(s) specified
|
||||
# in the kickstart file. If --repo wasn't specified, then we use the
|
||||
# repo associated with the target passed in initially.
|
||||
ks.handler.repo.repoList = [] # delete whatever the ks file told us
|
||||
ks.handler.repo.repoList = [] # delete whatever the ks file told us
|
||||
repo_class = kscontrol.dataMap[ks.version]['RepoData']
|
||||
# TODO: sensibly use "url" and "repo" commands in kickstart
|
||||
if self.opts.get('repo'):
|
||||
|
|
@ -3654,7 +3654,7 @@ class OzImageTask(BaseTaskHandler):
|
|||
# put the new ksfile in the output directory
|
||||
if not os.path.exists(kspath):
|
||||
raise koji.BuildError("KS file missing: %s" % kspath)
|
||||
self.uploadFile(kspath) # upload the modified ks file
|
||||
self.uploadFile(kspath) # upload the modified ks file
|
||||
return kspath
|
||||
|
||||
def makeConfig(self):
|
||||
|
|
@ -3704,7 +3704,7 @@ class OzImageTask(BaseTaskHandler):
|
|||
# image and attempt to ssh in. This breaks docker image creation.
|
||||
# TODO: intelligently guess the distro based on the install tree URL
|
||||
distname, distver = self.parseDistro(self.opts.get('distro'))
|
||||
if self.arch in ['armhfp','armv7hnl','armv7hl']:
|
||||
if self.arch in ['armhfp', 'armv7hnl', 'armv7hl']:
|
||||
arch = 'armv7l'
|
||||
else:
|
||||
arch = self.arch
|
||||
|
|
@ -3841,22 +3841,22 @@ class BaseImageTask(OzImageTask):
|
|||
Call out to ImageFactory to build the image(s) we want. Returns a dict
|
||||
of details for each image type we had to ask ImageFactory to build
|
||||
"""
|
||||
fcalls = {'raw': self._buildBase,
|
||||
fcalls = {'raw': self._buildBase,
|
||||
'raw-xz': self._buildXZ,
|
||||
'tar-gz': self._buildTarGZ,
|
||||
'liveimg-squashfs': self._buildSquashfs,
|
||||
'vmdk': self._buildConvert,
|
||||
'vdi': self._buildConvert,
|
||||
'qcow': self._buildConvert,
|
||||
'vmdk': self._buildConvert,
|
||||
'vdi': self._buildConvert,
|
||||
'qcow': self._buildConvert,
|
||||
'qcow2': self._buildConvert,
|
||||
'vpc': self._buildConvert,
|
||||
'rhevm-ova': self._buildOVA,
|
||||
'vpc': self._buildConvert,
|
||||
'rhevm-ova': self._buildOVA,
|
||||
'vsphere-ova': self._buildOVA,
|
||||
'vagrant-virtualbox': self._buildOVA,
|
||||
'vagrant-libvirt': self._buildOVA,
|
||||
'vagrant-libvirt': self._buildOVA,
|
||||
'vagrant-vmware-fusion': self._buildOVA,
|
||||
'vagrant-hyperv': self._buildOVA,
|
||||
'docker': self._buildDocker
|
||||
'docker': self._buildDocker
|
||||
}
|
||||
# add a handler to the logger so that we capture ImageFactory's logging
|
||||
self.fhandler = logging.FileHandler(self.ozlog)
|
||||
|
|
@ -3865,7 +3865,7 @@ class BaseImageTask(OzImageTask):
|
|||
self.tlog.setLevel(logging.DEBUG)
|
||||
self.tlog.addHandler(self.fhandler)
|
||||
images = {}
|
||||
random.seed() # necessary to ensure a unique mac address
|
||||
random.seed() # necessary to ensure a unique mac address
|
||||
params = {'install_script': str(ks.handler),
|
||||
'offline_icicle': True}
|
||||
# build the base (raw) image
|
||||
|
|
@ -3925,7 +3925,7 @@ class BaseImageTask(OzImageTask):
|
|||
if scrnshot:
|
||||
ext = scrnshot[-3:]
|
||||
self.uploadFile(scrnshot, remoteName='screenshot.%s' % ext)
|
||||
image.os_plugin.abort() # forcibly tear down the VM
|
||||
image.os_plugin.abort() # forcibly tear down the VM
|
||||
# TODO abort when a task is CANCELLED
|
||||
if not self.session.checkUpload('', os.path.basename(self.ozlog)):
|
||||
self.tlog.removeHandler(self.fhandler)
|
||||
|
|
@ -3972,7 +3972,7 @@ class BaseImageTask(OzImageTask):
|
|||
self.logger.debug('templates: %s' % template)
|
||||
self.logger.debug('pre-merge params: %s' % params)
|
||||
# We enforce various things related to the ks file - do not allow override
|
||||
self._mergeFactoryParams(params, [ 'install_script' ])
|
||||
self._mergeFactoryParams(params, ['install_script'])
|
||||
self.logger.debug('post-merge params: %s' % params)
|
||||
base = self.bd.builder_for_base_image(template, parameters=params)
|
||||
if wait:
|
||||
|
|
@ -4088,22 +4088,22 @@ class BaseImageTask(OzImageTask):
|
|||
if format == 'vagrant-virtualbox':
|
||||
format = 'vsphere-ova'
|
||||
img_opts['vsphere_ova_format'] = 'vagrant-virtualbox'
|
||||
fixed_params = [ 'vsphere_ova_format' ]
|
||||
fixed_params = ['vsphere_ova_format']
|
||||
if format == 'vagrant-libvirt':
|
||||
format = 'rhevm-ova'
|
||||
img_opts['rhevm_ova_format'] = 'vagrant-libvirt'
|
||||
fixed_params = [ 'rhevm_ova_format' ]
|
||||
fixed_params = ['rhevm_ova_format']
|
||||
if format == 'vagrant-vmware-fusion':
|
||||
format = 'vsphere-ova'
|
||||
img_opts['vsphere_ova_format'] = 'vagrant-vmware-fusion'
|
||||
# The initial disk image transform for VMWare Fusion/Workstation requires a "standard" VMDK
|
||||
# not the stream oriented format used for VirtualBox or regular VMWare OVAs
|
||||
img_opts['vsphere_vmdk_format'] = 'standard'
|
||||
fixed_params = [ 'vsphere_ova_format', 'vsphere_vmdk_format' ]
|
||||
fixed_params = ['vsphere_ova_format', 'vsphere_vmdk_format']
|
||||
if format == 'vagrant-hyperv':
|
||||
format = 'hyperv-ova'
|
||||
img_opts['hyperv_ova_format'] = 'hyperv-vagrant'
|
||||
fixed_params = [ 'hyperv_ova_format' ]
|
||||
fixed_params = ['hyperv_ova_format']
|
||||
targ = self._do_target_image(self.base_img.base_image.identifier,
|
||||
format.replace('-ova', ''), img_opts=img_opts, fixed_params=fixed_params)
|
||||
targ2 = self._do_target_image(targ.target_image.identifier, 'OVA',
|
||||
|
|
@ -4177,7 +4177,7 @@ class BaseImageTask(OzImageTask):
|
|||
cmd = ['/usr/bin/qemu-img', 'convert', '-f', 'raw', '-O',
|
||||
format, self.base_img.base_image.data, newimg]
|
||||
if format == 'qcow':
|
||||
cmd.insert(2, '-c') # enable compression for qcow images
|
||||
cmd.insert(2, '-c') # enable compression for qcow images
|
||||
if format == 'qcow2':
|
||||
# qemu-img changed its default behavior at some point to generate a
|
||||
# v3 image when the requested output format is qcow2. We don't
|
||||
|
|
@ -4262,14 +4262,14 @@ class BaseImageTask(OzImageTask):
|
|||
for p in icicle.getElementsByTagName('extra'):
|
||||
bits = p.firstChild.nodeValue.split(',')
|
||||
rpm = {
|
||||
'name': bits[0],
|
||||
'version': bits[1],
|
||||
'release': bits[2],
|
||||
'arch': bits[3],
|
||||
'name': bits[0],
|
||||
'version': bits[1],
|
||||
'release': bits[2],
|
||||
'arch': bits[3],
|
||||
# epoch is a special case, as usual
|
||||
'size': int(bits[5]),
|
||||
'payloadhash': bits[6],
|
||||
'buildtime': int(bits[7])
|
||||
'size': int(bits[5]),
|
||||
'payloadhash': bits[6],
|
||||
'buildtime': int(bits[7])
|
||||
}
|
||||
if rpm['name'] in ['buildsys-build', 'gpg-pubkey']:
|
||||
continue
|
||||
|
|
@ -4369,15 +4369,15 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
srcdir=scmsrcdir)
|
||||
final_path = os.path.join(scmsrcdir, os.path.basename(filepath))
|
||||
else:
|
||||
tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])
|
||||
tops = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])
|
||||
tops['tempdir'] = self.workdir
|
||||
final_path = os.path.join(self.workdir, os.path.basename(filepath))
|
||||
with koji.openRemoteFile(filepath, **tops) as remote_fileobj:
|
||||
with open(final_path, 'w') as final_fileobj:
|
||||
shutil.copyfileobj(remote_fileobj, final_fileobj)
|
||||
self.logger.debug('uploading retrieved file from here: %s' % final_path)
|
||||
self.uploadFile(final_path) # upload the original ks file
|
||||
return final_path # absolute path to the ks file
|
||||
self.uploadFile(final_path) # upload the original ks file
|
||||
return final_path # absolute path to the ks file
|
||||
|
||||
def handler(self, opts):
|
||||
"""Governing task for building an image with two other images using Factory Indirection"""
|
||||
|
|
@ -4406,7 +4406,7 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
task_diskimage = _match_name(result['files'], ".*qcow2$")
|
||||
task_tdl = _match_name(result['files'], "tdl.*xml")
|
||||
|
||||
task_dir = os.path.join(koji.pathinfo.work(),koji.pathinfo.taskrelpath(task_id))
|
||||
task_dir = os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(task_id))
|
||||
diskimage_full = os.path.join(task_dir, task_diskimage)
|
||||
tdl_full = os.path.join(task_dir, task_tdl)
|
||||
|
||||
|
|
@ -4443,7 +4443,7 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
raise koji.BuildError("Could not retrieve archives for build (%s) from NVR (%s)" %
|
||||
(build['id'], nvr))
|
||||
|
||||
buildfiles = [ x['filename'] for x in buildarchives ]
|
||||
buildfiles = [x['filename'] for x in buildarchives]
|
||||
builddir = koji.pathinfo.imagebuild(build)
|
||||
|
||||
def _match_name(inlist, namere):
|
||||
|
|
@ -4509,7 +4509,7 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
release = self.getRelease(name, version)
|
||||
if '-' in version:
|
||||
raise koji.ApplianceError('The Version may not have a hyphen')
|
||||
if '-' in release:
|
||||
if '-' in release:
|
||||
raise koji.ApplianceError('The Release may not have a hyphen')
|
||||
|
||||
indirection_template = self.fetchHubOrSCM(opts.get('indirection_template'),
|
||||
|
|
@ -4570,8 +4570,8 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
results_loc = "/" + results_loc
|
||||
params = {'utility_image': str(utility_factory_image.identifier),
|
||||
'utility_customizations': utility_customizations,
|
||||
'results_location': results_loc }
|
||||
random.seed() # necessary to ensure a unique mac address
|
||||
'results_location': results_loc}
|
||||
random.seed() # necessary to ensure a unique mac address
|
||||
try:
|
||||
try:
|
||||
# Embedded deep debug option - if template is just the string MOCK
|
||||
|
|
@ -4584,7 +4584,7 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
target.target_image = target_image
|
||||
with open(target_image.data, "w") as f:
|
||||
f.write("Mock build from task ID: %s" % self.id)
|
||||
target_image.status='COMPLETE'
|
||||
target_image.status = 'COMPLETE'
|
||||
else:
|
||||
target = bd.builder_for_target_image('indirection',
|
||||
image_id=base_factory_image.identifier,
|
||||
|
|
@ -4609,18 +4609,18 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
|
||||
self.uploadFile(target.target_image.data, remoteName=os.path.basename(results_loc))
|
||||
|
||||
myresults = { }
|
||||
myresults = {}
|
||||
myresults['task_id'] = self.id
|
||||
myresults['files'] = [ os.path.basename(results_loc) ]
|
||||
myresults['logs'] = [ os.path.basename(ozlog) ]
|
||||
myresults['files'] = [os.path.basename(results_loc)]
|
||||
myresults['logs'] = [os.path.basename(ozlog)]
|
||||
myresults['arch'] = opts['arch']
|
||||
# TODO: This should instead track the two input images: base and utility
|
||||
myresults['rpmlist'] = [ ]
|
||||
myresults['rpmlist'] = []
|
||||
|
||||
# This is compatible with some helper methods originally implemented for the base
|
||||
# image build. In the original usage, the dict contains an entry per build arch
|
||||
# TODO: If adding multiarch support, keep this in mind
|
||||
results = { str(self.id): myresults }
|
||||
results = {str(self.id): myresults}
|
||||
self.logger.debug('Image Results for hub: %s' % results)
|
||||
|
||||
if opts['scratch']:
|
||||
|
|
@ -4640,7 +4640,7 @@ class BuildIndirectionImageTask(OzImageTask):
|
|||
report = ''
|
||||
if opts.get('scratch'):
|
||||
respath = ', '.join(
|
||||
[os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in [self.id] ])
|
||||
[os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in [self.id]])
|
||||
report += 'Scratch '
|
||||
else:
|
||||
respath = koji.pathinfo.imagebuild(bld_info)
|
||||
|
|
@ -4773,7 +4773,7 @@ class BuildSRPMFromSCMTask(BaseBuildTask):
|
|||
'setup_dns': True,
|
||||
'repo_id': repo_id}
|
||||
if self.options.scm_credentials_dir is not None and os.path.isdir(self.options.scm_credentials_dir):
|
||||
rootopts['bind_opts'] = {'dirs' : {self.options.scm_credentials_dir : '/credentials',}}
|
||||
rootopts['bind_opts'] = {'dirs': {self.options.scm_credentials_dir: '/credentials', }}
|
||||
# Force internal_dev_setup back to true because bind_opts is used to turn it off
|
||||
rootopts['internal_dev_setup'] = True
|
||||
br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))
|
||||
|
|
@ -5092,9 +5092,9 @@ Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
|
|||
|
||||
failure = failure_info or cancel_info or ''
|
||||
|
||||
tasks = {'failed' : [task for task in task_data.values() if task['state'] == 'failed'],
|
||||
'canceled' : [task for task in task_data.values() if task['state'] == 'canceled'],
|
||||
'closed' : [task for task in task_data.values() if task['state'] == 'closed']}
|
||||
tasks = {'failed': [task for task in task_data.values() if task['state'] == 'failed'],
|
||||
'canceled': [task for task in task_data.values() if task['state'] == 'canceled'],
|
||||
'closed': [task for task in task_data.values() if task['state'] == 'closed']}
|
||||
|
||||
srpms = []
|
||||
for taskinfo in task_data.values():
|
||||
|
|
@ -5143,7 +5143,7 @@ Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
|
|||
output += "\r\n"
|
||||
output += "\r\n"
|
||||
|
||||
changelog = koji.util.formatChangelog(self.session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n","\r\n")
|
||||
changelog = koji.util.formatChangelog(self.session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n", "\r\n")
|
||||
if changelog:
|
||||
changelog = "Changelog:\r\n%s" % changelog
|
||||
|
||||
|
|
@ -5230,7 +5230,7 @@ class NewRepoTask(BaseTaskHandler):
|
|||
results = self.wait(to_list(subtasks.values()), all=True, failany=True)
|
||||
for (arch, task_id) in six.iteritems(subtasks):
|
||||
data[arch] = results[task_id]
|
||||
self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],))
|
||||
self.logger.debug("DEBUG: %r : %r " % (arch, data[arch],))
|
||||
|
||||
# finalize
|
||||
kwargs = {}
|
||||
|
|
@ -5448,7 +5448,7 @@ class createDistRepoTask(BaseTaskHandler):
|
|||
"ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),
|
||||
"ppc64le": ("ppc64le", "noarch"),
|
||||
"s390": ("s390", "noarch"),
|
||||
"s390x": ("s390x", "noarch"),
|
||||
"s390x": ("s390x", "noarch"),
|
||||
"sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),
|
||||
"sparc64": ("sparc64v", "sparc64", "noarch"),
|
||||
"alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),
|
||||
|
|
@ -5703,7 +5703,7 @@ enabled=1
|
|||
self.session.uploadWrapper(yconfig_path, self.uploadpath,
|
||||
os.path.basename(yconfig_path))
|
||||
conf = dnf.conf.Conf()
|
||||
conf.reposdir = [] # don't use system repos at all
|
||||
conf.reposdir = [] # don't use system repos at all
|
||||
conf.read(yconfig_path)
|
||||
dnfbase = dnf.Base(conf)
|
||||
if hasattr(koji.arch, 'ArchStorage'):
|
||||
|
|
@ -6243,7 +6243,7 @@ def get_options():
|
|||
defaults[name] = int(value)
|
||||
except ValueError:
|
||||
quit("value for %s option must be a valid integer" % name)
|
||||
elif name in ['offline_retry', 'use_createrepo_c', 'createrepo_skip_stat',
|
||||
elif name in ['offline_retry', 'use_createrepo_c', 'createrepo_skip_stat',
|
||||
'createrepo_update', 'use_fast_upload', 'support_rpm_source_layout',
|
||||
'krb_rdns', 'krb_canon_host', 'build_arch_can_fail', 'no_ssl_verify',
|
||||
'log_timestamps']:
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ EXPAND_ARCHES = {
|
|||
'alpha': ['alphaev4', 'alphaev45', 'alphaev5', 'alphaev56',
|
||||
'alphapca56', 'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7'],
|
||||
'armhfp': ['armv7hl', 'armv7hnl', 'armv6hl', 'armv6hnl'],
|
||||
'arm': ['armv5tel', 'armv5tejl', 'armv6l','armv7l'],
|
||||
'arm': ['armv5tel', 'armv5tejl', 'armv6l', 'armv7l'],
|
||||
'sh4': ['sh4a']
|
||||
}
|
||||
|
||||
|
|
@ -64,7 +64,7 @@ def parse_args(args):
|
|||
|
||||
mergerepos --repo=url --repo=url --outputdir=/some/path"""
|
||||
|
||||
parser = OptionParser(version = "mergerepos 0.1", usage=usage)
|
||||
parser = OptionParser(version="mergerepos 0.1", usage=usage)
|
||||
# query options
|
||||
parser.add_option("-r", "--repo", dest="repos", default=[], action="append",
|
||||
help="repo url")
|
||||
|
|
@ -158,7 +158,7 @@ class RepoMerge(object):
|
|||
# in the repolist
|
||||
count = 0
|
||||
for r in self.repolist:
|
||||
count +=1
|
||||
count += 1
|
||||
rid = 'repo%s' % count
|
||||
sys.stderr.write('Adding repo: %s\n' % r)
|
||||
n = self.yumbase.add_enable_repo(rid, baseurls=[r])
|
||||
|
|
|
|||
24
cli/koji
24
cli/koji
|
|
@ -193,9 +193,9 @@ def get_options():
|
|||
return options, '_list_commands', [0, '']
|
||||
|
||||
aliases = {
|
||||
'cancel-task' : 'cancel',
|
||||
'cxl' : 'cancel',
|
||||
'list-commands' : 'help',
|
||||
'cancel-task': 'cancel',
|
||||
'cxl': 'cancel',
|
||||
'list-commands': 'help',
|
||||
'move-pkg': 'move-build',
|
||||
'move': 'move-build',
|
||||
'latest-pkg': 'latest-build',
|
||||
|
|
@ -252,7 +252,7 @@ def fix_pyver(options, logger):
|
|||
pyver = getattr(options, 'pyver', None)
|
||||
if not pyver:
|
||||
return
|
||||
if pyver not in [2,3]:
|
||||
if pyver not in [2, 3]:
|
||||
logger.warning('Invalid python version requested: %s', pyver)
|
||||
if sys.version_info[0] == pyver:
|
||||
return
|
||||
|
|
@ -278,20 +278,20 @@ def list_commands(categories_chosen=None):
|
|||
categories_chosen = list(categories_chosen)
|
||||
categories_chosen.sort()
|
||||
handlers = []
|
||||
for name,value in globals().items():
|
||||
for name, value in globals().items():
|
||||
if name.startswith('handle_'):
|
||||
alias = name.replace('handle_','')
|
||||
alias = alias.replace('_','-')
|
||||
handlers.append((alias,value))
|
||||
alias = name.replace('handle_', '')
|
||||
alias = alias.replace('_', '-')
|
||||
handlers.append((alias, value))
|
||||
elif name.startswith('anon_handle_'):
|
||||
alias = name.replace('anon_handle_','')
|
||||
alias = alias.replace('_','-')
|
||||
handlers.append((alias,value))
|
||||
alias = name.replace('anon_handle_', '')
|
||||
alias = alias.replace('_', '-')
|
||||
handlers.append((alias, value))
|
||||
handlers.sort()
|
||||
print(_("Available commands:"))
|
||||
for category in categories_chosen:
|
||||
print(_("\n%s:" % categories[category]))
|
||||
for alias,handler in handlers:
|
||||
for alias, handler in handlers:
|
||||
desc = handler.__doc__ or ''
|
||||
if desc.startswith('[%s] ' % category):
|
||||
desc = desc[len('[%s] ' % category):]
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ except ImportError: # pragma: no cover
|
|||
def _printable_unicode(s):
|
||||
if six.PY2:
|
||||
return s.encode('utf-8')
|
||||
else: # no cover: 2.x
|
||||
else: # no cover: 2.x
|
||||
return s
|
||||
|
||||
|
||||
|
|
@ -354,7 +354,7 @@ def handle_add_pkg(goptions, session, args):
|
|||
opts['force'] = options.force
|
||||
opts['block'] = False
|
||||
# check if list of packages exists for that tag already
|
||||
dsttag=session.getTag(tag)
|
||||
dsttag = session.getTag(tag)
|
||||
if dsttag is None:
|
||||
print("No such tag: %s" % tag)
|
||||
sys.exit(1)
|
||||
|
|
@ -388,7 +388,7 @@ def handle_block_pkg(goptions, session, args):
|
|||
activate_session(session, goptions)
|
||||
tag = args[0]
|
||||
# check if list of packages exists for that tag already
|
||||
dsttag=session.getTag(tag)
|
||||
dsttag = session.getTag(tag)
|
||||
if dsttag is None:
|
||||
print("No such tag: %s" % tag)
|
||||
return 1
|
||||
|
|
@ -425,7 +425,7 @@ def handle_remove_pkg(goptions, session, args):
|
|||
opts = {}
|
||||
opts['force'] = options.force
|
||||
# check if list of packages exists for that tag already
|
||||
dsttag=session.getTag(tag)
|
||||
dsttag = session.getTag(tag)
|
||||
if dsttag is None:
|
||||
print("No such tag: %s" % tag)
|
||||
return 1
|
||||
|
|
@ -717,7 +717,7 @@ def handle_wrapper_rpm(options, session, args):
|
|||
|
||||
(build_opts, args) = parser.parse_args(args)
|
||||
if build_opts.inis:
|
||||
if len(args)!= 1:
|
||||
if len(args) != 1:
|
||||
parser.error(_("Exactly one argument (a build target) is required"))
|
||||
else:
|
||||
if len(args) < 3:
|
||||
|
|
@ -1153,14 +1153,14 @@ def handle_import(goptions, session, args):
|
|||
activate_session(session, goptions)
|
||||
to_import = {}
|
||||
for path in args:
|
||||
data = koji.get_header_fields(path, ('name','version','release','epoch',
|
||||
'arch','sigmd5','sourcepackage','sourcerpm'))
|
||||
data = koji.get_header_fields(path, ('name', 'version', 'release', 'epoch',
|
||||
'arch', 'sigmd5', 'sourcepackage', 'sourcerpm'))
|
||||
if data['sourcepackage']:
|
||||
data['arch'] = 'src'
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % data
|
||||
else:
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % koji.parse_NVRA(data['sourcerpm'])
|
||||
to_import.setdefault(nvr,[]).append((path,data))
|
||||
to_import.setdefault(nvr, []).append((path, data))
|
||||
builds_missing = False
|
||||
nvrs = to_list(to_import.keys())
|
||||
nvrs.sort()
|
||||
|
|
@ -1181,7 +1181,7 @@ def handle_import(goptions, session, args):
|
|||
|
||||
# local function to help us out below
|
||||
def do_import(path, data):
|
||||
rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')])
|
||||
rinfo = dict([(k, data[k]) for k in ('name', 'version', 'release', 'arch')])
|
||||
prev = session.getRPM(rinfo)
|
||||
if prev and not prev.get('external_repo_id', 0):
|
||||
if prev['payloadhash'] == koji.hex_string(data['sigmd5']):
|
||||
|
|
@ -1247,7 +1247,7 @@ def handle_import(goptions, session, args):
|
|||
|
||||
if need_build:
|
||||
# if we're doing this here, we weren't given the matching srpm
|
||||
if not options.create_build: # pragma: no cover
|
||||
if not options.create_build: # pragma: no cover
|
||||
if binfo:
|
||||
# should have caught this earlier, but just in case...
|
||||
b_state = koji.BUILD_STATES[binfo['state']]
|
||||
|
|
@ -1362,11 +1362,11 @@ def _import_comps(session, filename, tag, options):
|
|||
comps.fromxml_f(filename)
|
||||
force = options.force
|
||||
ptypes = {
|
||||
libcomps.PACKAGE_TYPE_DEFAULT : 'default',
|
||||
libcomps.PACKAGE_TYPE_OPTIONAL : 'optional',
|
||||
libcomps.PACKAGE_TYPE_CONDITIONAL : 'conditional',
|
||||
libcomps.PACKAGE_TYPE_MANDATORY : 'mandatory',
|
||||
libcomps.PACKAGE_TYPE_UNKNOWN : 'unknown',
|
||||
libcomps.PACKAGE_TYPE_DEFAULT: 'default',
|
||||
libcomps.PACKAGE_TYPE_OPTIONAL: 'optional',
|
||||
libcomps.PACKAGE_TYPE_CONDITIONAL: 'conditional',
|
||||
libcomps.PACKAGE_TYPE_MANDATORY: 'mandatory',
|
||||
libcomps.PACKAGE_TYPE_UNKNOWN: 'unknown',
|
||||
}
|
||||
for group in comps.groups:
|
||||
print("Group: %s (%s)" % (group.id, group.name))
|
||||
|
|
@ -1378,8 +1378,8 @@ def _import_comps(session, filename, tag, options):
|
|||
langonly=group.lang_only,
|
||||
biarchonly=bool(group.biarchonly))
|
||||
for pkg in group.packages:
|
||||
pkgopts = {'type' : ptypes[pkg.type],
|
||||
'basearchonly' : bool(pkg.basearchonly),
|
||||
pkgopts = {'type': ptypes[pkg.type],
|
||||
'basearchonly': bool(pkg.basearchonly),
|
||||
}
|
||||
if pkg.type == libcomps.PACKAGE_TYPE_CONDITIONAL:
|
||||
pkgopts['requires'] = pkg.requires
|
||||
|
|
@ -1393,7 +1393,7 @@ def _import_comps(session, filename, tag, options):
|
|||
# libcomps does not support metapkgs
|
||||
|
||||
|
||||
def _import_comps_alt(session, filename, tag, options): # no cover 3.x
|
||||
def _import_comps_alt(session, filename, tag, options): # no cover 3.x
|
||||
"""Import comps data using yum.comps module"""
|
||||
print('WARN: yum.comps does not support the biarchonly of group and basearchonly of package')
|
||||
comps = yumcomps.Comps()
|
||||
|
|
@ -1413,7 +1413,7 @@ def _import_comps_alt(session, filename, tag, options): # no cover 3.x
|
|||
('conditional', group.conditional_packages)]:
|
||||
for pkg in pdata:
|
||||
# yum.comps does not support basearchonly
|
||||
pkgopts = {'type' : ptype}
|
||||
pkgopts = {'type': ptype}
|
||||
if ptype == 'conditional':
|
||||
pkgopts['requires'] = pdata[pkg]
|
||||
for k in pkgopts.keys():
|
||||
|
|
@ -1444,7 +1444,7 @@ def handle_import_sig(goptions, session, args):
|
|||
parser.error(_("No such file: %s") % path)
|
||||
activate_session(session, goptions)
|
||||
for path in args:
|
||||
data = koji.get_header_fields(path, ('name','version','release','arch','siggpg','sigpgp','sourcepackage'))
|
||||
data = koji.get_header_fields(path, ('name', 'version', 'release', 'arch', 'siggpg', 'sigpgp', 'sourcepackage'))
|
||||
if data['sourcepackage']:
|
||||
data['arch'] = 'src'
|
||||
sigkey = data['siggpg']
|
||||
|
|
@ -1524,7 +1524,7 @@ def handle_write_signed_rpm(goptions, session, args):
|
|||
rpms.extend(session.listRPMs(buildID=build['id']))
|
||||
for i, rpminfo in enumerate(rpms):
|
||||
nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rpminfo
|
||||
print("[%d/%d] %s" % (i+1, len(rpms), nvra))
|
||||
print("[%d/%d] %s" % (i + 1, len(rpms), nvra))
|
||||
session.writeSignedRPM(rpminfo['id'], key)
|
||||
|
||||
|
||||
|
|
@ -1573,7 +1573,7 @@ def handle_prune_signed_copies(options, session, args):
|
|||
if options.verbose:
|
||||
print("Getting builds...")
|
||||
qopts = {
|
||||
'state' : koji.BUILD_STATES['COMPLETE'],
|
||||
'state': koji.BUILD_STATES['COMPLETE'],
|
||||
'queryOpts': {
|
||||
'limit': 50000,
|
||||
'offset': 0,
|
||||
|
|
@ -1665,7 +1665,7 @@ def handle_prune_signed_copies(options, session, args):
|
|||
timeline.sort(key=lambda entry: entry[:2])
|
||||
# find most recent creation entry for our build and crop there
|
||||
latest_ts = None
|
||||
for i in range(len(timeline)-1, -1, -1):
|
||||
for i in range(len(timeline) - 1, -1, -1):
|
||||
# searching in reverse cronological order
|
||||
event_id, is_create, entry = timeline[i]
|
||||
if entry['build_id'] == binfo['id'] and is_create:
|
||||
|
|
@ -1678,7 +1678,7 @@ def handle_prune_signed_copies(options, session, args):
|
|||
if options.debug:
|
||||
print(_histline(event_id, our_entry))
|
||||
# now go through the events since most recent creation entry
|
||||
timeline = timeline[i+1:]
|
||||
timeline = timeline[i + 1:]
|
||||
if not timeline:
|
||||
is_latest = True
|
||||
if options.debug:
|
||||
|
|
@ -1831,7 +1831,7 @@ def handle_prune_signed_copies(options, session, args):
|
|||
print("Error removing %s: %s" % (signedpath, e))
|
||||
print("This script needs write access to %s" % koji.BASEDIR)
|
||||
continue
|
||||
mycount +=1
|
||||
mycount += 1
|
||||
build_files += 1
|
||||
build_space += st.st_size
|
||||
# XXX - this makes some layout assumptions, but
|
||||
|
|
@ -1894,7 +1894,7 @@ def handle_set_build_volume(goptions, session, args):
|
|||
if not binfo:
|
||||
print("No such build: %s" % nvr)
|
||||
elif binfo['volume_id'] == volinfo['id']:
|
||||
print("Build %s already on volume %s" %(nvr, volinfo['name']))
|
||||
print("Build %s already on volume %s" % (nvr, volinfo['name']))
|
||||
else:
|
||||
builds.append(binfo)
|
||||
if not builds:
|
||||
|
|
@ -2352,13 +2352,13 @@ def anon_handle_latest_build(goptions, session, args):
|
|||
if not options.quiet:
|
||||
if options.type == 'maven':
|
||||
print("%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))
|
||||
print("%s %s %s %s %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16))
|
||||
print("%s %s %s %s %s" % ("-" * 40, "-" * 20, "-" * 20, "-" * 20, "-" * 16))
|
||||
else:
|
||||
print("%-40s %-20s %s" % ("Build","Tag","Built by"))
|
||||
print("%s %s %s" % ("-"*40, "-"*20, "-"*16))
|
||||
print("%-40s %-20s %s" % ("Build", "Tag", "Built by"))
|
||||
print("%s %s %s" % ("-" * 40, "-" * 20, "-" * 16))
|
||||
options.quiet = True
|
||||
|
||||
output = [ fmt % x for x in data]
|
||||
output = [fmt % x for x in data]
|
||||
output.sort()
|
||||
for line in output:
|
||||
print(line)
|
||||
|
|
@ -2452,7 +2452,7 @@ def anon_handle_list_tagged(goptions, session, args):
|
|||
rpms, builds = session.listTaggedRPMS(tag, **opts)
|
||||
data = rpms
|
||||
if options.paths:
|
||||
build_idx = dict([(b['id'],b) for b in builds])
|
||||
build_idx = dict([(b['id'], b) for b in builds])
|
||||
for rinfo in data:
|
||||
build = build_idx[rinfo['build_id']]
|
||||
builddir = pathinfo.build(build)
|
||||
|
|
@ -2488,12 +2488,12 @@ def anon_handle_list_tagged(goptions, session, args):
|
|||
if not options.quiet:
|
||||
if options.type == 'maven':
|
||||
print("%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))
|
||||
print("%s %s %s %s %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16))
|
||||
print("%s %s %s %s %s" % ("-" * 40, "-" * 20, "-" * 20, "-" * 20, "-" * 16))
|
||||
else:
|
||||
print("%-40s %-20s %s" % ("Build","Tag","Built by"))
|
||||
print("%s %s %s" % ("-"*40, "-"*20, "-"*16))
|
||||
print("%-40s %-20s %s" % ("Build", "Tag", "Built by"))
|
||||
print("%s %s %s" % ("-" * 40, "-" * 20, "-" * 16))
|
||||
|
||||
output = [ fmt % x for x in data]
|
||||
output = [fmt % x for x in data]
|
||||
output.sort()
|
||||
for line in output:
|
||||
print(line)
|
||||
|
|
@ -2573,7 +2573,7 @@ def anon_handle_list_untagged(goptions, session, args):
|
|||
if options.show_references:
|
||||
fmt = fmt + " %(refs)s"
|
||||
|
||||
output = [ fmt % x for x in data]
|
||||
output = [fmt % x for x in data]
|
||||
output.sort()
|
||||
for line in output:
|
||||
print(line)
|
||||
|
|
@ -2904,21 +2904,21 @@ def anon_handle_list_pkgs(goptions, session, args):
|
|||
if not options.quiet:
|
||||
if allpkgs:
|
||||
print("Package")
|
||||
print('-'*23)
|
||||
print('-' * 23)
|
||||
else:
|
||||
print("%-23s %-23s %-16s %-15s" % ('Package','Tag','Extra Arches','Owner'))
|
||||
print("%s %s %s %s" % ('-'*23,'-'*23,'-'*16,'-'*15))
|
||||
print("%-23s %-23s %-16s %-15s" % ('Package', 'Tag', 'Extra Arches', 'Owner'))
|
||||
print("%s %s %s %s" % ('-' * 23, '-' * 23, '-' * 16, '-' * 15))
|
||||
for pkg in data:
|
||||
if allpkgs:
|
||||
print(pkg['package_name'])
|
||||
else:
|
||||
if not options.show_blocked and pkg.get('blocked',False):
|
||||
if not options.show_blocked and pkg.get('blocked', False):
|
||||
continue
|
||||
if 'tag_id' in pkg:
|
||||
if pkg['extra_arches'] is None:
|
||||
pkg['extra_arches'] = ""
|
||||
fmt = "%(package_name)-23s %(tag_name)-23s %(extra_arches)-16s %(owner_name)-15s"
|
||||
if pkg.get('blocked',False):
|
||||
if pkg.get('blocked', False):
|
||||
fmt += " [BLOCKED]"
|
||||
else:
|
||||
fmt = "%(package_name)s"
|
||||
|
|
@ -3038,7 +3038,7 @@ def anon_handle_list_builds(goptions, session, args):
|
|||
fmt = "%(nvr)-55s %(owner_name)-16s %(state)s"
|
||||
if not options.quiet:
|
||||
print("%-55s %-16s %s" % ("Build", "Built by", "State"))
|
||||
print("%s %s %s" % ("-"*55, "-"*16, "-"*16))
|
||||
print("%s %s %s" % ("-" * 55, "-" * 16, "-" * 16))
|
||||
|
||||
for build in data:
|
||||
print(fmt % build)
|
||||
|
|
@ -3102,11 +3102,11 @@ def anon_handle_rpminfo(goptions, session, args):
|
|||
if info.get('extra'):
|
||||
print("Extra: %(extra)r" % info)
|
||||
if options.buildroots:
|
||||
br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order':'buildroot.id'})
|
||||
br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order': 'buildroot.id'})
|
||||
print("Used in %i buildroots:" % len(br_list))
|
||||
if len(br_list):
|
||||
print(" %8s %-28s %-8s %-29s" % ('id','build tag','arch','build host'))
|
||||
print(" %s %s %s %s" % ('-'*8, '-'*28, '-'*8, '-'*29))
|
||||
print(" %8s %-28s %-8s %-29s" % ('id', 'build tag', 'arch', 'build host'))
|
||||
print(" %s %s %s %s" % ('-' * 8, '-' * 28, '-' * 8, '-' * 29))
|
||||
for br_info in br_list:
|
||||
print(" %(id)8i %(tag_name)-28s %(arch)-8s %(host_name)-29s" % br_info)
|
||||
|
||||
|
|
@ -3226,14 +3226,14 @@ def anon_handle_hostinfo(goptions, session, args):
|
|||
description = info['description'].splitlines()
|
||||
print("Description: %s" % description[0])
|
||||
for line in description[1:]:
|
||||
print("%s%s" % (" "*13, line))
|
||||
print("%s%s" % (" " * 13, line))
|
||||
else:
|
||||
print("Description:")
|
||||
if info['comment']:
|
||||
comment = info['comment'].splitlines()
|
||||
print("Comment: %s" % comment[0])
|
||||
for line in comment[1:]:
|
||||
print("%s%s" % (" "*9, line))
|
||||
print("%s%s" % (" " * 9, line))
|
||||
else:
|
||||
print("Comment:")
|
||||
print("Enabled: %s" % (info['enabled'] and 'yes' or 'no'))
|
||||
|
|
@ -3246,9 +3246,9 @@ def anon_handle_hostinfo(goptions, session, args):
|
|||
print("Last Update: %s" % update)
|
||||
print("Channels: %s" % ' '.join([c['name'] for c in session.listChannels(hostID=info['id'])]))
|
||||
print("Active Buildroots:")
|
||||
states = {0:"INIT", 1:"WAITING", 2:"BUILDING"}
|
||||
states = {0: "INIT", 1: "WAITING", 2: "BUILDING"}
|
||||
rows = [('NAME', 'STATE', 'CREATION TIME')]
|
||||
for s in range(0,3):
|
||||
for s in range(0, 3):
|
||||
for b in session.listBuildroots(hostID=info['id'], state=s):
|
||||
rows.append((("%s-%s-%s" % (b['tag_name'], b['id'], b['repo_id'])), states[s],
|
||||
b['create_event_time'][:b['create_event_time'].find('.')]))
|
||||
|
|
@ -3327,9 +3327,9 @@ def handle_clone_tag(goptions, session, args):
|
|||
"Please use --force if this is what you really want to do."))
|
||||
|
||||
# init debug lists.
|
||||
chgpkglist=[]
|
||||
chgbldlist=[]
|
||||
chggrplist=[]
|
||||
chgpkglist = []
|
||||
chgbldlist = []
|
||||
chggrplist = []
|
||||
# case of brand new dst-tag.
|
||||
if not dsttag:
|
||||
if not options.config:
|
||||
|
|
@ -3500,7 +3500,7 @@ def handle_clone_tag(goptions, session, args):
|
|||
bdellist.extend(dblds)
|
||||
baddlist.sort(key=lambda x: x['package_name'])
|
||||
bdellist.sort(key=lambda x: x['package_name'])
|
||||
gaddlist = [] # list containing new groups to be added from src tag
|
||||
gaddlist = [] # list containing new groups to be added from src tag
|
||||
for (grpname, group) in six.iteritems(srcgroups):
|
||||
if grpname not in dstgroups:
|
||||
gaddlist.append(group)
|
||||
|
|
@ -3517,8 +3517,8 @@ def handle_clone_tag(goptions, session, args):
|
|||
grpchanges[grpname]['inherited'] = False
|
||||
if dstgroup['tag_id'] != dsttag['id']:
|
||||
grpchanges[grpname]['inherited'] = True
|
||||
srcgrppkglist=[]
|
||||
dstgrppkglist=[]
|
||||
srcgrppkglist = []
|
||||
dstgrppkglist = []
|
||||
for pkg in group['packagelist']:
|
||||
srcgrppkglist.append(pkg['package'])
|
||||
for pkg in dstgroups[grpname]['packagelist']:
|
||||
|
|
@ -3717,22 +3717,22 @@ def handle_clone_tag(goptions, session, args):
|
|||
session.multiCall(batch=options.batch)
|
||||
# print final list of actions.
|
||||
if options.verbose:
|
||||
pfmt=' %-7s %-28s %-10s %-10s %-10s\n'
|
||||
bfmt=' %-7s %-28s %-40s %-10s %-10s %-10s\n'
|
||||
gfmt=' %-7s %-28s %-28s\n'
|
||||
pfmt = ' %-7s %-28s %-10s %-10s %-10s\n'
|
||||
bfmt = ' %-7s %-28s %-40s %-10s %-10s %-10s\n'
|
||||
gfmt = ' %-7s %-28s %-28s\n'
|
||||
sys.stdout.write('\nList of changes:\n\n')
|
||||
sys.stdout.write(pfmt % ('Action', 'Package', 'Blocked', 'Owner', 'From Tag'))
|
||||
sys.stdout.write(pfmt % ('-'*7, '-'*28, '-'*10, '-'*10, '-'*10))
|
||||
sys.stdout.write(pfmt % ('-' * 7, '-' * 28, '-' * 10, '-' * 10, '-' * 10))
|
||||
for changes in chgpkglist:
|
||||
sys.stdout.write(pfmt % changes)
|
||||
sys.stdout.write('\n')
|
||||
sys.stdout.write(bfmt % ('Action', 'From/To Package', 'Build(s)', 'State', 'Owner', 'From Tag'))
|
||||
sys.stdout.write(bfmt % ('-'*7, '-'*28, '-'*40, '-'*10, '-'*10, '-'*10))
|
||||
sys.stdout.write(bfmt % ('-' * 7, '-' * 28, '-' * 40, '-' * 10, '-' * 10, '-' * 10))
|
||||
for changes in chgbldlist:
|
||||
sys.stdout.write(bfmt % changes)
|
||||
sys.stdout.write('\n')
|
||||
sys.stdout.write(gfmt % ('Action', 'Package', 'Group'))
|
||||
sys.stdout.write(gfmt % ('-'*7, '-'*28, '-'*28))
|
||||
sys.stdout.write(gfmt % ('-' * 7, '-' * 28, '-' * 28))
|
||||
for changes in chggrplist:
|
||||
sys.stdout.write(gfmt % changes)
|
||||
|
||||
|
|
@ -3874,7 +3874,7 @@ def anon_handle_list_targets(goptions, session, args):
|
|||
|
||||
fmt = "%(name)-30s %(build_tag_name)-30s %(dest_tag_name)-30s"
|
||||
if not options.quiet:
|
||||
print("%-30s %-30s %-30s" % ('Name','Buildroot','Destination'))
|
||||
print("%-30s %-30s %-30s" % ('Name', 'Buildroot', 'Destination'))
|
||||
print("-" * 93)
|
||||
tmp_list = [(x['name'], x) for x in session.getBuildTargets(options.name)]
|
||||
tmp_list.sort()
|
||||
|
|
@ -4005,7 +4005,7 @@ def anon_handle_list_tags(goptions, session, args):
|
|||
if not buildinfo:
|
||||
parser.error(_("Invalid build %s" % options.build))
|
||||
|
||||
tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None))
|
||||
tags = session.listTags(buildinfo.get('id', None), pkginfo.get('id', None))
|
||||
tags.sort(key=lambda x: x['name'])
|
||||
# if options.verbose:
|
||||
# fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s"
|
||||
|
|
@ -4107,7 +4107,7 @@ def _print_histline(entry, **kwargs):
|
|||
del x['.related']
|
||||
bad_edit = None
|
||||
if len(edit) != 1:
|
||||
bad_edit = "%i elements" % (len(edit)+1)
|
||||
bad_edit = "%i elements" % (len(edit) + 1)
|
||||
other = edit[0]
|
||||
# check edit for sanity
|
||||
if create or not other[2]:
|
||||
|
|
@ -4256,7 +4256,7 @@ def _print_histline(entry, **kwargs):
|
|||
else:
|
||||
fmt = "%s entry revoked" % table
|
||||
time_str = time.asctime(time.localtime(ts))
|
||||
parts = [time_str, fmt % x]
|
||||
parts = [time_str, fmt % x]
|
||||
if options.events or options.verbose:
|
||||
parts.insert(1, "(eid %i)" % event_id)
|
||||
if who:
|
||||
|
|
@ -4312,23 +4312,23 @@ def _print_histline(entry, **kwargs):
|
|||
print(" %s: %s" % (dkey, x[key]))
|
||||
|
||||
_table_keys = {
|
||||
'user_perms' : ['user_id', 'perm_id'],
|
||||
'user_groups' : ['user_id', 'group_id'],
|
||||
'cg_users' : ['user_id', 'cg_id'],
|
||||
'tag_inheritance' : ['tag_id', 'parent_id'],
|
||||
'tag_config' : ['tag_id'],
|
||||
'tag_extra' : ['tag_id', 'key'],
|
||||
'build_target_config' : ['build_target_id'],
|
||||
'external_repo_config' : ['external_repo_id'],
|
||||
'user_perms': ['user_id', 'perm_id'],
|
||||
'user_groups': ['user_id', 'group_id'],
|
||||
'cg_users': ['user_id', 'cg_id'],
|
||||
'tag_inheritance': ['tag_id', 'parent_id'],
|
||||
'tag_config': ['tag_id'],
|
||||
'tag_extra': ['tag_id', 'key'],
|
||||
'build_target_config': ['build_target_id'],
|
||||
'external_repo_config': ['external_repo_id'],
|
||||
'host_config': ['host_id'],
|
||||
'host_channels': ['host_id', 'channel_id'],
|
||||
'tag_external_repos' : ['tag_id', 'external_repo_id'],
|
||||
'tag_listing' : ['build_id', 'tag_id'],
|
||||
'tag_packages' : ['package_id', 'tag_id'],
|
||||
'tag_package_owners' : ['package_id', 'tag_id'],
|
||||
'group_config' : ['group_id', 'tag_id'],
|
||||
'group_req_listing' : ['group_id', 'tag_id', 'req_id'],
|
||||
'group_package_listing' : ['group_id', 'tag_id', 'package'],
|
||||
'tag_external_repos': ['tag_id', 'external_repo_id'],
|
||||
'tag_listing': ['build_id', 'tag_id'],
|
||||
'tag_packages': ['package_id', 'tag_id'],
|
||||
'tag_package_owners': ['package_id', 'tag_id'],
|
||||
'group_config': ['group_id', 'tag_id'],
|
||||
'group_req_listing': ['group_id', 'tag_id', 'req_id'],
|
||||
'group_package_listing': ['group_id', 'tag_id', 'package'],
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -4640,7 +4640,7 @@ def _printTaskInfo(session, task_id, topdir, level=0, recurse=True, verbose=True
|
|||
and its children."""
|
||||
|
||||
BUILDDIR = '/var/lib/mock'
|
||||
indent = " "*2*level
|
||||
indent = " " * 2 * level
|
||||
|
||||
info = session.getTaskInfo(task_id)
|
||||
|
||||
|
|
@ -4762,8 +4762,8 @@ def anon_handle_taginfo(goptions, session, args):
|
|||
for n, info in enumerate(tags):
|
||||
if n > 0:
|
||||
print('')
|
||||
print("Tag: %(name)s [%(id)d]" %info)
|
||||
print("Arches: %(arches)s" %info)
|
||||
print("Tag: %(name)s [%(id)d]" % info)
|
||||
print("Arches: %(arches)s" % info)
|
||||
group_list = [x['name'] for x in session.getTagGroups(info['id'], **event_opts)]
|
||||
group_list.sort()
|
||||
print("Groups: " + ', '.join(group_list))
|
||||
|
|
@ -4851,7 +4851,7 @@ def handle_add_tag(goptions, session, args):
|
|||
value = arg_filter(value)
|
||||
extra[key] = value
|
||||
opts['extra'] = extra
|
||||
session.createTag(args[0],**opts)
|
||||
session.createTag(args[0], **opts)
|
||||
|
||||
|
||||
def handle_edit_tag(goptions, session, args):
|
||||
|
|
@ -5119,7 +5119,7 @@ def handle_edit_tag_inheritance(goptions, session, args):
|
|||
return 1
|
||||
|
||||
new_data = data.copy()
|
||||
if options.priority is not None and options.priority.isdigit():
|
||||
if options.priority is not None and options.priority.isdigit():
|
||||
new_data['priority'] = int(options.priority)
|
||||
if options.maxdepth is not None:
|
||||
if options.maxdepth.isdigit():
|
||||
|
|
@ -5226,7 +5226,7 @@ def anon_handle_show_groups(goptions, session, args):
|
|||
if options.comps:
|
||||
print(koji.generate_comps(groups, expand_groups=options.expand))
|
||||
elif options.spec:
|
||||
print(koji.make_groups_spec(groups,name='buildgroups',buildgroup='build'))
|
||||
print(koji.make_groups_spec(groups, name='buildgroups', buildgroup='build'))
|
||||
else:
|
||||
pprint.pprint(groups)
|
||||
|
||||
|
|
@ -5235,8 +5235,8 @@ def anon_handle_list_external_repos(goptions, session, args):
|
|||
"[info] List external repos"
|
||||
usage = _("usage: %prog list-external-repos [options]")
|
||||
parser = OptionParser(usage=get_usage_str(usage))
|
||||
parser.add_option("--url", help=_("Select by url"))
|
||||
parser.add_option("--name", help=_("Select by name"))
|
||||
parser.add_option("--url", help=_("Select by url"))
|
||||
parser.add_option("--name", help=_("Select by name"))
|
||||
parser.add_option("--id", type="int", help=_("Select by id"))
|
||||
parser.add_option("--tag", help=_("Select by tag"))
|
||||
parser.add_option("--used", action='store_true', help=_("List which tags use the repo(s)"))
|
||||
|
|
@ -5278,7 +5278,7 @@ def anon_handle_list_external_repos(goptions, session, args):
|
|||
format = "basic"
|
||||
opts['info'] = options.id or options.name or None
|
||||
opts['url'] = options.url or None
|
||||
data = session.listExternalRepos (**opts)
|
||||
data = session.listExternalRepos(**opts)
|
||||
|
||||
# There are three different output formats
|
||||
# 1) Listing just repo data (name, url)
|
||||
|
|
@ -5287,15 +5287,15 @@ def anon_handle_list_external_repos(goptions, session, args):
|
|||
if format == "basic":
|
||||
format = "%(name)-25s %(url)s"
|
||||
header1 = "%-25s %s" % ("External repo name", "URL")
|
||||
header2 = "%s %s" % ("-"*25, "-"*40)
|
||||
header2 = "%s %s" % ("-" * 25, "-" * 40)
|
||||
elif format == "tag":
|
||||
format = "%(priority)-3i %(external_repo_name)-25s %(merge_mode)-10s %(url)s"
|
||||
header1 = "%-3s %-25s %-10s URL" % ("Pri", "External repo name", "Mode")
|
||||
header2 = "%s %s %s %s" % ("-"*3, "-"*25, "-"*10, "-"*40)
|
||||
header2 = "%s %s %s %s" % ("-" * 3, "-" * 25, "-" * 10, "-" * 40)
|
||||
elif format == "multitag":
|
||||
format = "%(tag_name)-20s %(priority)-3i %(merge_mode)-10s %(external_repo_name)s"
|
||||
header1 = "%-20s %-3s %-10s %s" % ("Tag", "Pri", "Mode", "External repo name")
|
||||
header2 = "%s %s %s %s" % ("-"*20, "-"*3, "-"*10, "-"*25)
|
||||
header2 = "%s %s %s %s" % ("-" * 20, "-" * 3, "-" * 10, "-" * 25)
|
||||
if not options.quiet:
|
||||
print(header1)
|
||||
print(header2)
|
||||
|
|
@ -5379,8 +5379,8 @@ def handle_edit_external_repo(goptions, session, args):
|
|||
"[admin] Edit data for an external repo"
|
||||
usage = _("usage: %prog edit-external-repo <name>")
|
||||
parser = OptionParser(usage=get_usage_str(usage))
|
||||
parser.add_option("--url", help=_("Change the url"))
|
||||
parser.add_option("--name", help=_("Change the name"))
|
||||
parser.add_option("--url", help=_("Change the url"))
|
||||
parser.add_option("--name", help=_("Change the name"))
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 1:
|
||||
parser.error(_("Incorrect number of arguments"))
|
||||
|
|
@ -5654,17 +5654,17 @@ def _build_image_indirection(options, task_opts, session, args):
|
|||
bool(task_opts.base_image_build)):
|
||||
raise koji.GenericError(_("You must specify either a base-image task or build ID/NVR"))
|
||||
|
||||
required_opts = [ 'name', 'version', 'arch', 'target', 'indirection_template', 'results_loc' ]
|
||||
optional_opts = [ 'indirection_template_url', 'scratch', 'utility_image_task', 'utility_image_build',
|
||||
'base_image_task', 'base_image_build', 'release', 'skip_tag' ]
|
||||
required_opts = ['name', 'version', 'arch', 'target', 'indirection_template', 'results_loc']
|
||||
optional_opts = ['indirection_template_url', 'scratch', 'utility_image_task', 'utility_image_build',
|
||||
'base_image_task', 'base_image_build', 'release', 'skip_tag']
|
||||
|
||||
missing = [ ]
|
||||
missing = []
|
||||
for opt in required_opts:
|
||||
if not getattr(task_opts, opt, None):
|
||||
missing.append(opt)
|
||||
|
||||
if len(missing) > 0:
|
||||
print("Missing the following required options: %s" % ' '.join(['--%s' % o.replace('_','-') for o in missing]))
|
||||
print("Missing the following required options: %s" % ' '.join(['--%s' % o.replace('_', '-') for o in missing]))
|
||||
raise koji.GenericError(_("Missing required options specified above"))
|
||||
|
||||
activate_session(session, options)
|
||||
|
|
@ -5708,7 +5708,7 @@ def _build_image_indirection(options, task_opts, session, args):
|
|||
os.path.basename(templatefile))
|
||||
print('')
|
||||
|
||||
hub_opts = { }
|
||||
hub_opts = {}
|
||||
# Just pass everything in as opts. No posiitonal arguments at all. Why not?
|
||||
for opt in required_opts + optional_opts:
|
||||
val = getattr(task_opts, opt, None)
|
||||
|
|
@ -5829,10 +5829,10 @@ def handle_image_build(options, session, args):
|
|||
# as do factory-parameters
|
||||
section = 'factory-parameters'
|
||||
if config.has_section(section):
|
||||
task_options.factory_parameter = [ ]
|
||||
task_options.factory_parameter = []
|
||||
for k, v in config.items(section):
|
||||
# We do this, rather than a dict, to match what argparse spits out
|
||||
task_options.factory_parameter.append( (k, v) )
|
||||
task_options.factory_parameter.append((k, v))
|
||||
|
||||
else:
|
||||
if len(args) < 5:
|
||||
|
|
@ -6198,7 +6198,7 @@ def handle_set_pkg_arches(goptions, session, args):
|
|||
tag = args[1]
|
||||
with session.multicall(strict=True) as m:
|
||||
for package in args[2:]:
|
||||
m.packageListSetArches(tag,package,arches,force=options.force)
|
||||
m.packageListSetArches(tag, package, arches, force=options.force)
|
||||
|
||||
|
||||
def handle_set_pkg_owner(goptions, session, args):
|
||||
|
|
@ -6214,7 +6214,7 @@ def handle_set_pkg_owner(goptions, session, args):
|
|||
tag = args[1]
|
||||
with session.multicall(strict=True) as m:
|
||||
for package in args[2:]:
|
||||
m.packageListSetOwner(tag,package,owner,force=options.force)
|
||||
m.packageListSetOwner(tag, package, owner, force=options.force)
|
||||
|
||||
|
||||
def handle_set_pkg_owner_global(goptions, session, args):
|
||||
|
|
@ -6237,7 +6237,7 @@ def handle_set_pkg_owner_global(goptions, session, args):
|
|||
if not user:
|
||||
print("No such user: %s" % owner)
|
||||
return 1
|
||||
opts = {'with_dups' : True}
|
||||
opts = {'with_dups': True}
|
||||
old_user = None
|
||||
if options.old_user:
|
||||
old_user = session.getUser(options.old_user)
|
||||
|
|
@ -6262,7 +6262,7 @@ def handle_set_pkg_owner_global(goptions, session, args):
|
|||
if user['id'] == entry['owner_id']:
|
||||
if options.verbose:
|
||||
print("Preserving owner=%s for package %s in tag %s" \
|
||||
% (user['name'], package, entry['tag_name']))
|
||||
% (user['name'], package, entry['tag_name']))
|
||||
else:
|
||||
if options.test:
|
||||
print("Would have changed owner for %s in tag %s: %s -> %s" \
|
||||
|
|
@ -6527,7 +6527,7 @@ def handle_unblock_pkg(goptions, session, args):
|
|||
tag = args[0]
|
||||
with session.multicall(strict=True) as m:
|
||||
for package in args[1:]:
|
||||
m.packageListUnblock(tag,package)
|
||||
m.packageListUnblock(tag, package)
|
||||
|
||||
|
||||
def anon_handle_download_build(options, session, args):
|
||||
|
|
@ -6729,7 +6729,7 @@ def anon_handle_download_logs(options, session, args):
|
|||
if task_info is None:
|
||||
error(_("No such task id: %i" % task_id))
|
||||
files = list_task_output_all_volumes(session, task_id)
|
||||
logs = [] # list of tuples (filename, volume)
|
||||
logs = [] # list of tuples (filename, volume)
|
||||
for filename in files:
|
||||
if not filename.endswith(".log"):
|
||||
continue
|
||||
|
|
@ -6741,7 +6741,7 @@ def anon_handle_download_logs(options, session, args):
|
|||
"%s-%s" % (task_info["arch"], task_id))
|
||||
|
||||
count = 0
|
||||
state = koji.TASK_STATES[task_info['state']]
|
||||
state = koji.TASK_STATES[task_info['state']]
|
||||
if state == 'FAILED':
|
||||
if not match or koji.util.multi_fnmatch(FAIL_LOG, match):
|
||||
write_fail_log(task_log_dir, task_id)
|
||||
|
|
@ -7022,7 +7022,7 @@ def handle_dist_repo(options, session, args):
|
|||
parser.add_option("--split-debuginfo", action='store_true', default=False,
|
||||
help='Split debuginfo info a separate repo for each arch')
|
||||
parser.add_option('--comps', help='Include a comps file in the repodata')
|
||||
parser.add_option('--delta-rpms', metavar='REPO',default=[],
|
||||
parser.add_option('--delta-rpms', metavar='REPO', default=[],
|
||||
action='append',
|
||||
help=_('Create delta rpms. REPO can be the id of another dist repo '
|
||||
'or the name of a tag that has a dist repo. May be specified '
|
||||
|
|
@ -7178,7 +7178,7 @@ def handle_moshimoshi(options, session, args):
|
|||
u = session.getLoggedInUser()
|
||||
if not u:
|
||||
print("Not authenticated")
|
||||
u = {'name' : 'anonymous user'}
|
||||
u = {'name': 'anonymous user'}
|
||||
print("%s, %s!" % (_printable_unicode(random.choice(greetings)), u["name"]))
|
||||
print("")
|
||||
print("You are using the hub at %s" % session.baseurl)
|
||||
|
|
|
|||
|
|
@ -77,21 +77,21 @@ def arg_filter(arg):
|
|||
|
||||
|
||||
categories = {
|
||||
'admin' : 'admin commands',
|
||||
'build' : 'build commands',
|
||||
'search' : 'search commands',
|
||||
'download' : 'download commands',
|
||||
'monitor' : 'monitor commands',
|
||||
'info' : 'info commands',
|
||||
'bind' : 'bind commands',
|
||||
'misc' : 'miscellaneous commands',
|
||||
'admin': 'admin commands',
|
||||
'build': 'build commands',
|
||||
'search': 'search commands',
|
||||
'download': 'download commands',
|
||||
'monitor': 'monitor commands',
|
||||
'info': 'info commands',
|
||||
'bind': 'bind commands',
|
||||
'misc': 'miscellaneous commands',
|
||||
}
|
||||
|
||||
|
||||
def get_epilog_str(progname=None):
|
||||
if progname is None:
|
||||
progname = os.path.basename(sys.argv[0]) or 'koji'
|
||||
categories_ordered=', '.join(sorted(['all'] + to_list(categories.keys())))
|
||||
categories_ordered = ', '.join(sorted(['all'] + to_list(categories.keys())))
|
||||
epilog_str = '''
|
||||
Try "%(progname)s --help" for help about global options
|
||||
Try "%(progname)s help" to get all available commands
|
||||
|
|
@ -119,29 +119,29 @@ def print_task_headers():
|
|||
print("ID Pri Owner State Arch Name")
|
||||
|
||||
|
||||
def print_task(task,depth=0):
|
||||
def print_task(task, depth=0):
|
||||
"""Print a task"""
|
||||
task = task.copy()
|
||||
task['state'] = koji.TASK_STATES.get(task['state'],'BADSTATE')
|
||||
task['state'] = koji.TASK_STATES.get(task['state'], 'BADSTATE')
|
||||
fmt = "%(id)-8s %(priority)-4s %(owner_name)-20s %(state)-8s %(arch)-10s "
|
||||
if depth:
|
||||
indent = " "*(depth-1) + " +"
|
||||
indent = " " * (depth - 1) + " +"
|
||||
else:
|
||||
indent = ''
|
||||
label = koji.taskLabel(task)
|
||||
print(''.join([fmt % task, indent, label]))
|
||||
|
||||
|
||||
def print_task_recurse(task,depth=0):
|
||||
def print_task_recurse(task, depth=0):
|
||||
"""Print a task and its children"""
|
||||
print_task(task,depth)
|
||||
for child in task.get('children',()):
|
||||
print_task_recurse(child,depth+1)
|
||||
print_task(task, depth)
|
||||
for child in task.get('children', ()):
|
||||
print_task_recurse(child, depth + 1)
|
||||
|
||||
|
||||
class TaskWatcher(object):
|
||||
|
||||
def __init__(self,task_id,session,level=0,quiet=False):
|
||||
def __init__(self, task_id, session, level=0, quiet=False):
|
||||
self.id = task_id
|
||||
self.session = session
|
||||
self.info = None
|
||||
|
|
@ -167,7 +167,7 @@ class TaskWatcher(object):
|
|||
error = None
|
||||
try:
|
||||
self.session.getTaskResult(self.id)
|
||||
except (six.moves.xmlrpc_client.Fault,koji.GenericError) as e:
|
||||
except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:
|
||||
error = e
|
||||
if error is None:
|
||||
# print("%s: complete" % self.str())
|
||||
|
|
@ -206,7 +206,7 @@ class TaskWatcher(object):
|
|||
if self.info is None:
|
||||
return False
|
||||
state = koji.TASK_STATES[self.info['state']]
|
||||
return (state in ['CLOSED','CANCELED','FAILED'])
|
||||
return (state in ['CLOSED', 'CANCELED', 'FAILED'])
|
||||
|
||||
def is_success(self):
|
||||
if self.info is None:
|
||||
|
|
@ -339,7 +339,7 @@ def watch_logs(session, tasklist, opts, poll_interval):
|
|||
print("No such task id: %i" % taskId)
|
||||
sys.exit(1)
|
||||
state = koji.TASK_STATES[info['state']]
|
||||
return (state in ['CLOSED','CANCELED','FAILED'])
|
||||
return (state in ['CLOSED', 'CANCELED', 'FAILED'])
|
||||
|
||||
offsets = {}
|
||||
for task_id in tasklist:
|
||||
|
|
@ -422,7 +422,7 @@ def _format_size(size):
|
|||
return "%0.2f GiB" % (size / 1073741824.0)
|
||||
if (size / 1048576 >= 1):
|
||||
return "%0.2f MiB" % (size / 1048576.0)
|
||||
if (size / 1024 >=1):
|
||||
if (size / 1024 >= 1):
|
||||
return "%0.2f KiB" % (size / 1024.0)
|
||||
return "%0.2f B" % (size)
|
||||
|
||||
|
|
@ -439,7 +439,7 @@ def _progress_callback(uploaded, total, piece, time, total_time):
|
|||
if total == 0:
|
||||
percent_done = 0.0
|
||||
else:
|
||||
percent_done = float(uploaded)/float(total)
|
||||
percent_done = float(uploaded) / float(total)
|
||||
percent_done_str = "%02d%%" % (percent_done * 100)
|
||||
data_done = _format_size(uploaded)
|
||||
elapsed = _format_secs(total_time)
|
||||
|
|
@ -447,12 +447,12 @@ def _progress_callback(uploaded, total, piece, time, total_time):
|
|||
speed = "- B/sec"
|
||||
if (time):
|
||||
if (uploaded != total):
|
||||
speed = _format_size(float(piece)/float(time)) + "/sec"
|
||||
speed = _format_size(float(piece) / float(time)) + "/sec"
|
||||
else:
|
||||
speed = _format_size(float(total)/float(total_time)) + "/sec"
|
||||
speed = _format_size(float(total) / float(total_time)) + "/sec"
|
||||
|
||||
# write formated string and flush
|
||||
sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('='*(int(percent_done*36)), percent_done_str, elapsed, data_done, speed))
|
||||
sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('=' * (int(percent_done * 36)), percent_done_str, elapsed, data_done, speed))
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
|
|
@ -587,8 +587,8 @@ def _list_tasks(options, session):
|
|||
"Retrieve a list of tasks"
|
||||
|
||||
callopts = {
|
||||
'state' : [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],
|
||||
'decode' : True,
|
||||
'state': [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],
|
||||
'decode': True,
|
||||
}
|
||||
|
||||
if getattr(options, 'mine', False):
|
||||
|
|
@ -622,7 +622,7 @@ def _list_tasks(options, session):
|
|||
sys.exit(1)
|
||||
callopts['host_id'] = host['id']
|
||||
|
||||
qopts = {'order' : 'priority,create_time'}
|
||||
qopts = {'order': 'priority,create_time'}
|
||||
tasklist = session.listTasks(callopts, qopts)
|
||||
tasks = dict([(x['id'], x) for x in tasklist])
|
||||
|
||||
|
|
@ -631,7 +631,7 @@ def _list_tasks(options, session):
|
|||
if t['parent'] is not None:
|
||||
parent = tasks.get(t['parent'])
|
||||
if parent:
|
||||
parent.setdefault('children',[])
|
||||
parent.setdefault('children', [])
|
||||
parent['children'].append(t)
|
||||
t['sub'] = True
|
||||
|
||||
|
|
@ -641,7 +641,7 @@ def _list_tasks(options, session):
|
|||
def format_inheritance_flags(parent):
|
||||
"""Return a human readable string of inheritance flags"""
|
||||
flags = ''
|
||||
for code,expr in (
|
||||
for code, expr in (
|
||||
('M', parent['maxdepth'] is not None),
|
||||
('F', parent['pkg_filter']),
|
||||
('I', parent['intransitive']),
|
||||
|
|
|
|||
156
hub/kojihub.py
156
hub/kojihub.py
|
|
@ -401,7 +401,7 @@ class Task(object):
|
|||
if strict:
|
||||
raise koji.GenericError("Task %d is not top-level (parent=%d)" % (task_id, parent))
|
||||
# otherwise, find the top-level task and go from there
|
||||
seen = {task_id:1}
|
||||
seen = {task_id: 1}
|
||||
while parent is not None:
|
||||
if parent in seen:
|
||||
raise koji.GenericError("Task LOOP at task %i" % task_id)
|
||||
|
|
@ -891,7 +891,7 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept
|
|||
continue
|
||||
else:
|
||||
hist[id] = []
|
||||
hist[id].append(link) #record history
|
||||
hist[id].append(link) # record history
|
||||
order.append(link)
|
||||
if link['intransitive'] and reverse:
|
||||
# add link, but don't follow it
|
||||
|
|
@ -910,20 +910,20 @@ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdept
|
|||
def _pkglist_remove(tag_id, pkg_id):
|
||||
clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')
|
||||
update = UpdateProcessor('tag_packages', values=locals(), clauses=clauses)
|
||||
update.make_revoke() #XXX user_id?
|
||||
update.make_revoke() # XXX user_id?
|
||||
update.execute()
|
||||
|
||||
def _pkglist_owner_remove(tag_id, pkg_id):
|
||||
clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')
|
||||
update = UpdateProcessor('tag_package_owners', values=locals(), clauses=clauses)
|
||||
update.make_revoke() #XXX user_id?
|
||||
update.make_revoke() # XXX user_id?
|
||||
update.execute()
|
||||
|
||||
def _pkglist_owner_add(tag_id, pkg_id, owner):
|
||||
_pkglist_owner_remove(tag_id, pkg_id)
|
||||
data = {'tag_id': tag_id, 'package_id': pkg_id, 'owner': owner}
|
||||
insert = InsertProcessor('tag_package_owners', data=data)
|
||||
insert.make_create() #XXX user_id?
|
||||
insert.make_create() # XXX user_id?
|
||||
insert.execute()
|
||||
|
||||
def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches):
|
||||
|
|
@ -936,7 +936,7 @@ def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches):
|
|||
'extra_arches': koji.parse_arches(extra_arches, strict=True, allow_none=True)
|
||||
}
|
||||
insert = InsertProcessor('tag_packages', data=data)
|
||||
insert.make_create() #XXX user_id?
|
||||
insert.make_create() # XXX user_id?
|
||||
insert.execute()
|
||||
_pkglist_owner_add(tag_id, pkg_id, owner)
|
||||
|
||||
|
|
@ -966,7 +966,7 @@ def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force,
|
|||
action = 'block'
|
||||
if policy:
|
||||
context.session.assertLogin()
|
||||
policy_data = {'tag' : tag_id, 'action' : action, 'package' : pkginfo, 'force' : force}
|
||||
policy_data = {'tag': tag_id, 'action': action, 'package': pkginfo, 'force': force}
|
||||
# don't check policy for admins using force
|
||||
if not (force and context.session.hasPerm('admin')):
|
||||
assert_policy('package_list', policy_data)
|
||||
|
|
@ -1045,7 +1045,7 @@ def _direct_pkglist_remove(taginfo, pkginfo, force=False, policy=False):
|
|||
pkg = lookup_package(pkginfo, strict=True)
|
||||
if policy:
|
||||
context.session.assertLogin()
|
||||
policy_data = {'tag' : tag['id'], 'action' : 'remove', 'package' : pkg['id'], 'force' : force}
|
||||
policy_data = {'tag': tag['id'], 'action': 'remove', 'package': pkg['id'], 'force': force}
|
||||
# don't check policy for admins using force
|
||||
if not (force and context.session.hasPerm('admin')):
|
||||
assert_policy('package_list', policy_data)
|
||||
|
|
@ -1074,7 +1074,7 @@ def pkglist_unblock(taginfo, pkginfo, force=False):
|
|||
tag = get_tag(taginfo, strict=True)
|
||||
pkg = lookup_package(pkginfo, strict=True)
|
||||
context.session.assertLogin()
|
||||
policy_data = {'tag' : tag['id'], 'action' : 'unblock', 'package' : pkg['id'], 'force' : force}
|
||||
policy_data = {'tag': tag['id'], 'action': 'unblock', 'package': pkg['id'], 'force': force}
|
||||
# don't check policy for admins using force
|
||||
if not (force and context.session.hasPerm('admin')):
|
||||
assert_policy('package_list', policy_data)
|
||||
|
|
@ -1411,7 +1411,7 @@ def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, late
|
|||
tables = ['rpminfo']
|
||||
joins = ['tag_listing ON rpminfo.build_id = tag_listing.build_id']
|
||||
clauses = [eventCondition(event, 'tag_listing'), 'tag_id=%(tagid)s']
|
||||
data = {} #tagid added later
|
||||
data = {} # tagid added later
|
||||
if package:
|
||||
joins.append('build ON rpminfo.build_id = build.id')
|
||||
joins.append('package ON package.id = build.pkg_id')
|
||||
|
|
@ -1629,8 +1629,8 @@ def _direct_tag_build(tag, build, user, force=False):
|
|||
table = 'tag_listing'
|
||||
clauses = ('tag_id=%(tag_id)i', 'build_id=%(build_id)i')
|
||||
query = QueryProcessor(columns=['build_id'], tables=[table],
|
||||
clauses=('active = TRUE',)+clauses,
|
||||
values=locals(), opts={'rowlock':True})
|
||||
clauses=('active = TRUE',) + clauses,
|
||||
values=locals(), opts={'rowlock': True})
|
||||
# note: tag_listing is unique on (build_id, tag_id, active)
|
||||
if query.executeOne():
|
||||
# already tagged
|
||||
|
|
@ -1814,8 +1814,8 @@ def _grplist_unblock(taginfo, grpinfo):
|
|||
table = 'group_config'
|
||||
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s')
|
||||
query = QueryProcessor(columns=['blocked'], tables=[table],
|
||||
clauses=('active = TRUE',)+clauses,
|
||||
values=locals(), opts={'rowlock':True})
|
||||
clauses=('active = TRUE',) + clauses,
|
||||
values=locals(), opts={'rowlock': True})
|
||||
blocked = query.singleValue(strict=False)
|
||||
if not blocked:
|
||||
raise koji.GenericError("group %s is NOT blocked in tag %s" % (group['name'], tag['name']))
|
||||
|
|
@ -1936,8 +1936,8 @@ def _grp_pkg_unblock(taginfo, grpinfo, pkg_name):
|
|||
grp_id = get_group_id(grpinfo, strict=True)
|
||||
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'package = %(pkg_name)s')
|
||||
query = QueryProcessor(columns=['blocked'], tables=[table],
|
||||
clauses=('active = TRUE',)+clauses,
|
||||
values=locals(), opts={'rowlock':True})
|
||||
clauses=('active = TRUE',) + clauses,
|
||||
values=locals(), opts={'rowlock': True})
|
||||
blocked = query.singleValue(strict=False)
|
||||
if not blocked:
|
||||
raise koji.GenericError("package %s is NOT blocked in group %s, tag %s" \
|
||||
|
|
@ -2063,8 +2063,8 @@ def _grp_req_unblock(taginfo, grpinfo, reqinfo):
|
|||
|
||||
clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'req_id = %(req_id)s')
|
||||
query = QueryProcessor(columns=['blocked'], tables=[table],
|
||||
clauses=('active = TRUE',)+clauses,
|
||||
values=locals(), opts={'rowlock':True})
|
||||
clauses=('active = TRUE',) + clauses,
|
||||
values=locals(), opts={'rowlock': True})
|
||||
blocked = query.singleValue(strict=False)
|
||||
if not blocked:
|
||||
raise koji.GenericError("group req %s is NOT blocked in group %s, tag %s" \
|
||||
|
|
@ -2274,12 +2274,12 @@ def remove_channel(channel_name, force=False):
|
|||
channel_id = get_channel_id(channel_name, strict=True)
|
||||
# check for task references
|
||||
query = QueryProcessor(tables=['task'], clauses=['channel_id=%(channel_id)i'],
|
||||
values=locals(), columns=['id'], opts={'limit':1})
|
||||
values=locals(), columns=['id'], opts={'limit': 1})
|
||||
# XXX slow query
|
||||
if query.execute():
|
||||
raise koji.GenericError('channel %s has task references' % channel_name)
|
||||
query = QueryProcessor(tables=['host_channels'], clauses=['channel_id=%(channel_id)i'],
|
||||
values=locals(), columns=['host_id'], opts={'limit':1})
|
||||
values=locals(), columns=['host_id'], opts={'limit': 1})
|
||||
if query.execute():
|
||||
if not force:
|
||||
raise koji.GenericError('channel %s has host references' % channel_name)
|
||||
|
|
@ -2344,7 +2344,7 @@ AND channel_id IN %(channels)s)'''
|
|||
clauses = [clause]
|
||||
else:
|
||||
clauses = ['state IN (%(FREE)i,%(ASSIGNED)i)']
|
||||
queryOpts = {'limit' : 100, 'order' : 'priority,create_time'}
|
||||
queryOpts = {'limit': 100, 'order': 'priority,create_time'}
|
||||
query = QueryProcessor(columns=fields, tables=['task'], clauses=clauses,
|
||||
values=values, opts=queryOpts)
|
||||
return query.execute()
|
||||
|
|
@ -2496,7 +2496,7 @@ def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separa
|
|||
blocks = [pkg for pkg in readPackageList(tag_id, event=event_id, inherit=True).values() \
|
||||
if pkg['blocked']]
|
||||
repodir = koji.pathinfo.repo(repo_id, tinfo['name'])
|
||||
os.makedirs(repodir) #should not already exist
|
||||
os.makedirs(repodir) # should not already exist
|
||||
|
||||
# generate comps and groups.spec
|
||||
groupsdir = "%s/groups" % (repodir)
|
||||
|
|
@ -2710,7 +2710,7 @@ def repo_info(repo_id, strict=False):
|
|||
('repo.id', 'id'),
|
||||
('repo.state', 'state'),
|
||||
('repo.create_event', 'create_event'),
|
||||
('events.time', 'creation_time'), #for compatibility with getRepo
|
||||
('events.time', 'creation_time'), # for compatibility with getRepo
|
||||
('EXTRACT(EPOCH FROM events.time)', 'create_ts'),
|
||||
('repo.tag_id', 'tag_id'),
|
||||
('tag.name', 'tag_name'),
|
||||
|
|
@ -2753,9 +2753,9 @@ def repo_expire_older(tag_id, event_id, dist=None):
|
|||
If dist is not None, then only expire repos with the given dist value
|
||||
"""
|
||||
st_ready = koji.REPO_READY
|
||||
clauses=['tag_id = %(tag_id)s',
|
||||
'create_event < %(event_id)s',
|
||||
'state = %(st_ready)s']
|
||||
clauses = ['tag_id = %(tag_id)s',
|
||||
'create_event < %(event_id)s',
|
||||
'state = %(st_ready)s']
|
||||
if dist is not None:
|
||||
dist = bool(dist)
|
||||
clauses.append('dist = %(dist)s')
|
||||
|
|
@ -3406,7 +3406,7 @@ def _delete_tag(tagInfo):
|
|||
|
||||
def _tagDelete(tableName, value, columnName='tag_id'):
|
||||
update = UpdateProcessor(tableName, clauses=["%s = %%(value)i" % columnName],
|
||||
values={'value':value})
|
||||
values={'value': value})
|
||||
update.make_revoke()
|
||||
update.execute()
|
||||
|
||||
|
|
@ -4147,8 +4147,8 @@ def get_rpm(rpminfo, strict=False, multi=False):
|
|||
clauses.append("""external_repo_id = %(external_repo_id)i""")
|
||||
elif not multi:
|
||||
# try to match internal first, otherwise first matching external
|
||||
retry = True #if no internal match
|
||||
orig_clauses = list(clauses) #copy
|
||||
retry = True # if no internal match
|
||||
orig_clauses = list(clauses) # copy
|
||||
clauses.append("""external_repo_id = 0""")
|
||||
|
||||
joins = ['external_repo ON rpminfo.external_repo_id = external_repo.id']
|
||||
|
|
@ -4339,7 +4339,7 @@ def get_build_type(buildInfo, strict=False):
|
|||
joins=['build_types ON btype_id=btype.id'],
|
||||
clauses=['build_id = %(id)i'],
|
||||
values=binfo,
|
||||
opts={'asList':True},
|
||||
opts={'asList': True},
|
||||
)
|
||||
|
||||
ret = {}
|
||||
|
|
@ -5098,7 +5098,7 @@ def query_buildroots(hostID=None, tagID=None, state=None, rpmID=None, archiveID=
|
|||
('content_generator.name', 'cg_name'),
|
||||
('buildroot.cg_version', 'cg_version'),
|
||||
('buildroot.container_arch', 'container_arch'),
|
||||
('buildroot.container_arch', 'arch'), #alias for back compat
|
||||
('buildroot.container_arch', 'arch'), # alias for back compat
|
||||
('buildroot.container_type', 'container_type'),
|
||||
('buildroot.host_os', 'host_os'),
|
||||
('buildroot.host_arch', 'host_arch'),
|
||||
|
|
@ -5231,7 +5231,7 @@ def remove_volume(volume):
|
|||
context.session.assertPerm('admin')
|
||||
volinfo = lookup_name('volume', volume, strict=True)
|
||||
query = QueryProcessor(tables=['build'], clauses=['volume_id=%(id)i'],
|
||||
values=volinfo, columns=['id'], opts={'limit':1})
|
||||
values=volinfo, columns=['id'], opts={'limit': 1})
|
||||
if query.execute():
|
||||
raise koji.GenericError('volume %(name)s has build references' % volinfo)
|
||||
delete = """DELETE FROM volume WHERE id=%(id)i"""
|
||||
|
|
@ -5510,19 +5510,19 @@ def recycle_build(old, data):
|
|||
|
||||
# check for evidence of tag activity
|
||||
query = QueryProcessor(columns=['tag_id'], tables=['tag_listing'],
|
||||
clauses = ['build_id = %(id)s'], values=old)
|
||||
clauses=['build_id = %(id)s'], values=old)
|
||||
if query.execute():
|
||||
raise koji.GenericError("Build already exists. Unable to recycle, "
|
||||
"has tag history")
|
||||
|
||||
# check for rpms or archives
|
||||
query = QueryProcessor(columns=['id'], tables=['rpminfo'],
|
||||
clauses = ['build_id = %(id)s'], values=old)
|
||||
clauses=['build_id = %(id)s'], values=old)
|
||||
if query.execute():
|
||||
raise koji.GenericError("Build already exists. Unable to recycle, "
|
||||
"has rpm data")
|
||||
query = QueryProcessor(columns=['id'], tables=['archiveinfo'],
|
||||
clauses = ['build_id = %(id)s'], values=old)
|
||||
clauses=['build_id = %(id)s'], values=old)
|
||||
if query.execute():
|
||||
raise koji.GenericError("Build already exists. Unable to recycle, "
|
||||
"has archive data")
|
||||
|
|
@ -6760,7 +6760,7 @@ def get_archive_type(filename=None, type_name=None, type_id=None, strict=False):
|
|||
columns=['id', 'name', 'description', 'extensions'],
|
||||
clauses=['extensions ~* %(pattern)s'],
|
||||
)
|
||||
for start in range(len(parts)-1, -1, -1):
|
||||
for start in range(len(parts) - 1, -1, -1):
|
||||
ext = '.'.join(parts[start:])
|
||||
query.values['pattern'] = r'(\s|^)%s(\s|$)' % ext
|
||||
results = query.execute()
|
||||
|
|
@ -6874,7 +6874,7 @@ def new_image_build(build_info):
|
|||
def new_typed_build(build_info, btype):
|
||||
"""Mark build as a given btype"""
|
||||
|
||||
btype_id=lookup_name('btype', btype, strict=True)['id']
|
||||
btype_id = lookup_name('btype', btype, strict=True)['id']
|
||||
query = QueryProcessor(tables=('build_types',), columns=('build_id',),
|
||||
clauses=('build_id = %(build_id)i',
|
||||
'btype_id = %(btype_id)i',),
|
||||
|
|
@ -7170,7 +7170,7 @@ def _scan_sighdr(sighdr, fn):
|
|||
inp.close()
|
||||
outp.seek(0, 0)
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
|
||||
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)
|
||||
# (we have no payload, so verifies would fail otherwise)
|
||||
hdr = ts.hdrFromFdno(outp.fileno())
|
||||
outp.close()
|
||||
|
|
@ -7194,7 +7194,7 @@ def check_rpm_sig(an_rpm, sigkey, sighdr):
|
|||
try:
|
||||
koji.splice_rpm_sighdr(sighdr, rpm_path, temp)
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(0) #full verify
|
||||
ts.setVSFlags(0) # full verify
|
||||
with open(temp, 'rb') as fo:
|
||||
hdr = ts.hdrFromFdno(fo.fileno())
|
||||
except:
|
||||
|
|
@ -7393,7 +7393,7 @@ def query_history(tables=None, **kwargs):
|
|||
elif field == 'build_id':
|
||||
# special case
|
||||
fields.update({
|
||||
'package.name': 'name', #XXX?
|
||||
'package.name': 'name', # XXX?
|
||||
'build.version': 'version',
|
||||
'build.release': 'release',
|
||||
'build.epoch': 'epoch',
|
||||
|
|
@ -8165,12 +8165,12 @@ def add_group_member(group, user, strict=True):
|
|||
if uinfo['usertype'] == koji.USERTYPES['GROUP']:
|
||||
raise koji.GenericError("Groups cannot be members of other groups")
|
||||
# check to see if user is already a member
|
||||
data = {'user_id' : uinfo['id'], 'group_id' : ginfo['id']}
|
||||
data = {'user_id': uinfo['id'], 'group_id': ginfo['id']}
|
||||
table = 'user_groups'
|
||||
clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s')
|
||||
query = QueryProcessor(columns=['user_id'], tables=[table],
|
||||
clauses=('active = TRUE',)+clauses,
|
||||
values=data, opts={'rowlock':True})
|
||||
clauses=('active = TRUE',) + clauses,
|
||||
values=data, opts={'rowlock': True})
|
||||
row = query.executeOne()
|
||||
if row:
|
||||
if not strict:
|
||||
|
|
@ -8189,7 +8189,7 @@ def drop_group_member(group, user):
|
|||
raise koji.GenericError("No such group: %s" % group)
|
||||
if user['id'] not in [u['id'] for u in get_group_members(group)]:
|
||||
raise koji.GenericError("No such user in group: %s" % group)
|
||||
data = {'user_id' : user['id'], 'group_id' : ginfo['id']}
|
||||
data = {'user_id': user['id'], 'group_id': ginfo['id']}
|
||||
clauses = ["user_id = %(user_id)i", "group_id = %(group_id)i"]
|
||||
update = UpdateProcessor('user_groups', values=data, clauses=clauses)
|
||||
update.make_revoke()
|
||||
|
|
@ -8305,7 +8305,7 @@ def revoke_cg_access(user, cg):
|
|||
context.session.assertPerm('admin')
|
||||
user = get_user(user, strict=True)
|
||||
cg = lookup_name('content_generator', cg, strict=True)
|
||||
data = {'user_id': user['id'], 'cg_id' : cg['id']}
|
||||
data = {'user_id': user['id'], 'cg_id': cg['id']}
|
||||
update = UpdateProcessor('cg_users', values=data,
|
||||
clauses=["user_id = %(user_id)i", "cg_id = %(cg_id)i"])
|
||||
update.make_revoke()
|
||||
|
|
@ -8320,7 +8320,7 @@ def assert_cg(cg, user=None):
|
|||
user = context.session.user_id
|
||||
user = get_user(user, strict=True)
|
||||
clauses = ['active = TRUE', 'user_id = %(user_id)s', 'cg_id = %(cg_id)s']
|
||||
data = {'user_id' : user['id'], 'cg_id' : cg['id']}
|
||||
data = {'user_id': user['id'], 'cg_id': cg['id']}
|
||||
query = QueryProcessor(tables=['cg_users'], columns=['cg_id'], clauses=clauses, values=data)
|
||||
if not query.execute():
|
||||
raise koji.AuthError("Content generator access required (%s)" % cg['name'])
|
||||
|
|
@ -8461,7 +8461,7 @@ class BulkInsertProcessor(object):
|
|||
self._one_insert(self.data)
|
||||
else:
|
||||
for i in range(0, len(self.data), self.batch):
|
||||
data = self.data[i:i+self.batch]
|
||||
data = self.data[i:i + self.batch]
|
||||
self._one_insert(data)
|
||||
|
||||
def _one_insert(self, data):
|
||||
|
|
@ -8592,7 +8592,7 @@ class UpdateProcessor(object):
|
|||
ret = {}
|
||||
ret.update(self.values)
|
||||
for key in self.data:
|
||||
ret["data."+key] = self.data[key]
|
||||
ret["data." + key] = self.data[key]
|
||||
return ret
|
||||
|
||||
def set(self, **kwargs):
|
||||
|
|
@ -8956,13 +8956,13 @@ def policy_get_pkg(data):
|
|||
# for some operations (e.g. adding a new package), the package
|
||||
# entry may not exist yet
|
||||
if isinstance(data['package'], six.string_types):
|
||||
return {'id' : None, 'name' : data['package']}
|
||||
return {'id': None, 'name': data['package']}
|
||||
else:
|
||||
raise koji.GenericError("Invalid package: %s" % data['package'])
|
||||
return pkginfo
|
||||
if 'build' in data:
|
||||
binfo = get_build(data['build'], strict=True)
|
||||
return {'id' : binfo['package_id'], 'name' : binfo['name']}
|
||||
return {'id': binfo['package_id'], 'name': binfo['name']}
|
||||
# else
|
||||
raise koji.GenericError("policy requires package data")
|
||||
|
||||
|
|
@ -9981,7 +9981,7 @@ class RootExports(object):
|
|||
try:
|
||||
if offset == 0 or (offset == -1 and size == len(contents)):
|
||||
# truncate file
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
try:
|
||||
os.ftruncate(fd, 0)
|
||||
# log_error("truncating fd %r to 0" %fd)
|
||||
|
|
@ -9992,7 +9992,7 @@ class RootExports(object):
|
|||
else:
|
||||
os.lseek(fd, offset, 0)
|
||||
# write contents
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2)
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, len(contents), 0, 2)
|
||||
try:
|
||||
os.write(fd, contents)
|
||||
# log_error("wrote contents")
|
||||
|
|
@ -10001,7 +10001,7 @@ class RootExports(object):
|
|||
if offset == -1:
|
||||
if size is not None:
|
||||
# truncate file
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
try:
|
||||
os.ftruncate(fd, size)
|
||||
# log_error("truncating fd %r to size %r" % (fd,size))
|
||||
|
|
@ -10010,7 +10010,7 @@ class RootExports(object):
|
|||
if verify is not None:
|
||||
# check final digest
|
||||
chksum = sum_cls()
|
||||
fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB)
|
||||
fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
|
||||
try:
|
||||
os.lseek(fd, 0, 0)
|
||||
while True:
|
||||
|
|
@ -10038,7 +10038,7 @@ class RootExports(object):
|
|||
raise
|
||||
try:
|
||||
try:
|
||||
fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB)
|
||||
fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
|
||||
except IOError as e:
|
||||
raise koji.LockError(e)
|
||||
st = os.fstat(fd)
|
||||
|
|
@ -10188,8 +10188,8 @@ class RootExports(object):
|
|||
|
||||
def createEmptyBuild(self, name, version, release, epoch, owner=None):
|
||||
context.session.assertPerm('admin')
|
||||
data = {'name' : name, 'version' : version, 'release' : release,
|
||||
'epoch' : epoch}
|
||||
data = {'name': name, 'version': version, 'release': release,
|
||||
'epoch': epoch}
|
||||
if owner is not None:
|
||||
data['owner'] = owner
|
||||
return new_build(data)
|
||||
|
|
@ -10243,7 +10243,7 @@ class RootExports(object):
|
|||
"""
|
||||
context.session.assertPerm('admin')
|
||||
uploadpath = koji.pathinfo.work()
|
||||
fn = "%s/%s/%s" %(uploadpath, path, basename)
|
||||
fn = "%s/%s/%s" % (uploadpath, path, basename)
|
||||
if not os.path.exists(fn):
|
||||
raise koji.GenericError("No such file: %s" % fn)
|
||||
rpminfo = import_rpm(fn)
|
||||
|
|
@ -10306,7 +10306,7 @@ class RootExports(object):
|
|||
context.session.assertPerm('tag')
|
||||
tag_id = get_tag(tag, strict=True)['id']
|
||||
build_id = get_build(build, strict=True)['id']
|
||||
policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : None, 'operation' : 'tag'}
|
||||
policy_data = {'tag': tag_id, 'build': build_id, 'fromtag': None, 'operation': 'tag'}
|
||||
assert_policy('tag', policy_data)
|
||||
_tag_build(tag, build, force=force)
|
||||
if notify:
|
||||
|
|
@ -10362,7 +10362,7 @@ class RootExports(object):
|
|||
else:
|
||||
raise koji.TagError(pkg_error)
|
||||
# tag policy check
|
||||
policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : fromtag_id}
|
||||
policy_data = {'tag': tag_id, 'build': build_id, 'fromtag': fromtag_id}
|
||||
if fromtag is None:
|
||||
policy_data['operation'] = 'tag'
|
||||
else:
|
||||
|
|
@ -10384,7 +10384,7 @@ class RootExports(object):
|
|||
user_id = context.session.user_id
|
||||
tag_id = get_tag(tag, strict=True)['id']
|
||||
build_id = get_build(build, strict=True)['id']
|
||||
policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id}
|
||||
policy_data = {'tag': None, 'build': build_id, 'fromtag': tag_id}
|
||||
policy_data['operation'] = 'untag'
|
||||
try:
|
||||
# don't check policy for admins using force
|
||||
|
|
@ -10410,7 +10410,7 @@ class RootExports(object):
|
|||
context.session.assertPerm('tag')
|
||||
tag_id = get_tag(tag, strict=True)['id']
|
||||
build_id = get_build(build, strict=True)['id']
|
||||
policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id, 'operation' : 'untag'}
|
||||
policy_data = {'tag': None, 'build': build_id, 'fromtag': tag_id, 'operation': 'untag'}
|
||||
assert_policy('tag', policy_data)
|
||||
_untag_build(tag, build, strict=strict, force=force)
|
||||
if notify:
|
||||
|
|
@ -10457,7 +10457,7 @@ class RootExports(object):
|
|||
build_list.reverse()
|
||||
|
||||
# policy check
|
||||
policy_data = {'tag' : tag2, 'fromtag' : tag1, 'operation' : 'move'}
|
||||
policy_data = {'tag': tag2, 'fromtag': tag1, 'operation': 'move'}
|
||||
# don't check policy for admins using force
|
||||
if not (force and context.session.hasPerm('admin')):
|
||||
for build in build_list:
|
||||
|
|
@ -10468,7 +10468,7 @@ class RootExports(object):
|
|||
wait_on = []
|
||||
tasklist = []
|
||||
for build in build_list:
|
||||
task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority':15}]]])
|
||||
task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority': 15}]]])
|
||||
wait_on = [task_id]
|
||||
log_error("\nMade Task: %s\n" % task_id)
|
||||
tasklist.append(task_id)
|
||||
|
|
@ -12161,10 +12161,10 @@ class RootExports(object):
|
|||
raise GenericError, else return None.
|
||||
"""
|
||||
query = QueryProcessor(tables=['build_notifications'],
|
||||
columns = ('id', 'user_id', 'package_id', 'tag_id',
|
||||
'success_only', 'email'),
|
||||
clauses = ['id = %(id)i'],
|
||||
values = locals())
|
||||
columns=('id', 'user_id', 'package_id', 'tag_id',
|
||||
'success_only', 'email'),
|
||||
clauses=['id = %(id)i'],
|
||||
values=locals())
|
||||
result = query.executeOne()
|
||||
if strict and not result:
|
||||
raise koji.GenericError("No notification with ID %i found" % id)
|
||||
|
|
@ -12184,9 +12184,9 @@ class RootExports(object):
|
|||
raise GenericError, else return None.
|
||||
"""
|
||||
query = QueryProcessor(tables=['build_notifications_block'],
|
||||
columns = ('id', 'user_id', 'package_id', 'tag_id'),
|
||||
clauses = ['id = %(id)i'],
|
||||
values = locals())
|
||||
columns=('id', 'user_id', 'package_id', 'tag_id'),
|
||||
clauses=['id = %(id)i'],
|
||||
values=locals())
|
||||
result = query.executeOne()
|
||||
if strict and not result:
|
||||
raise koji.GenericError("No notification block with ID %i found" % id)
|
||||
|
|
@ -12554,7 +12554,7 @@ class BuildRoot(object):
|
|||
raise koji.GenericError("Cannot change buildroot state to INIT")
|
||||
query = QueryProcessor(columns=['state', 'retire_event'], values=self.data,
|
||||
tables=['standard_buildroot'], clauses=['buildroot_id=%(id)s'],
|
||||
opts={'rowlock':True})
|
||||
opts={'rowlock': True})
|
||||
row = query.executeOne()
|
||||
if not row:
|
||||
raise koji.GenericError("Unable to get state for buildroot %s" % self.id)
|
||||
|
|
@ -12805,7 +12805,7 @@ class Host(object):
|
|||
if tasks is None:
|
||||
# Query all finished subtasks
|
||||
states = tuple([koji.TASK_STATES[s]
|
||||
for s in ['CLOSED', 'FAILED','CANCELED']])
|
||||
for s in ['CLOSED', 'FAILED', 'CANCELED']])
|
||||
query = QueryProcessor(tables=['task'], columns=['id'],
|
||||
clauses=['parent=%(parent)s', 'state in %(states)s'],
|
||||
values=locals(), opts={'asList': True})
|
||||
|
|
@ -12838,7 +12838,7 @@ class Host(object):
|
|||
q = """
|
||||
SELECT %s FROM task
|
||||
WHERE host_id = %%(host_id)s AND state = %%(st_open)s
|
||||
""" % (",".join(fields))
|
||||
""" % (",".join(fields))
|
||||
c.execute(q, locals())
|
||||
tasks = [dict(zip(fields, x)) for x in c.fetchall()]
|
||||
for task in tasks:
|
||||
|
|
@ -12899,7 +12899,7 @@ class Host(object):
|
|||
WHERE (state = %%(st_free)s)
|
||||
OR (state = %%(st_assigned)s AND host_id = %%(id)s)
|
||||
ORDER BY priority,create_time
|
||||
""" % (",".join(fields))
|
||||
""" % (",".join(fields))
|
||||
c.execute(q, locals())
|
||||
for data in c.fetchall():
|
||||
data = dict(zip(fields, data))
|
||||
|
|
@ -13568,7 +13568,7 @@ class HostExports(object):
|
|||
pkg_id = build['package_id']
|
||||
tag_id = get_tag(tag, strict=True)['id']
|
||||
user_id = task.getOwner()
|
||||
policy_data = {'tag' : tag, 'build' : build, 'fromtag' : fromtag}
|
||||
policy_data = {'tag': tag, 'build': build, 'fromtag': fromtag}
|
||||
policy_data['user_id'] = user_id
|
||||
if fromtag is None:
|
||||
policy_data['operation'] = 'tag'
|
||||
|
|
@ -14090,7 +14090,7 @@ def handle_upload(environ):
|
|||
fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0o666)
|
||||
try:
|
||||
try:
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
||||
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
except IOError as e:
|
||||
raise koji.LockError(e)
|
||||
if offset == -1:
|
||||
|
|
@ -14120,5 +14120,5 @@ def handle_upload(environ):
|
|||
logger.debug("Upload result: %r", ret)
|
||||
logger.info("Completed upload for session %s (#%s): %f seconds, %i bytes, %s",
|
||||
context.session.id, context.session.callnum,
|
||||
time.time()-start, size, fn)
|
||||
time.time() - start, size, fn)
|
||||
return ret
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ class HandlerRegistry(object):
|
|||
if not callable(function):
|
||||
continue
|
||||
if prefix is not None:
|
||||
name = "%s.%s" %(prefix, name)
|
||||
name = "%s.%s" % (prefix, name)
|
||||
self.register_function(function, name=name)
|
||||
|
||||
def register_instance(self, instance):
|
||||
|
|
@ -128,7 +128,7 @@ class HandlerRegistry(object):
|
|||
# bound method, remove first arg
|
||||
args, varargs, varkw, defaults = ret
|
||||
if args:
|
||||
aname = args[0] #generally "self"
|
||||
aname = args[0] # generally "self"
|
||||
del args[0]
|
||||
if defaults and aname in defaults:
|
||||
# shouldn't happen, but...
|
||||
|
|
@ -202,7 +202,7 @@ class ModXMLRPCRequestHandler(object):
|
|||
|
||||
def __init__(self, handlers):
|
||||
self.traceback = False
|
||||
self.handlers = handlers #expecting HandlerRegistry instance
|
||||
self.handlers = handlers # expecting HandlerRegistry instance
|
||||
self.logger = logging.getLogger('koji.xmlrpc')
|
||||
|
||||
def _get_handler(self, name):
|
||||
|
|
@ -319,7 +319,7 @@ class ModXMLRPCRequestHandler(object):
|
|||
rusage = resource.getrusage(resource.RUSAGE_SELF)
|
||||
self.logger.info("Completed method %s for session %s (#%s): %f seconds, rss %s, stime %f",
|
||||
method, context.session.id, context.session.callnum,
|
||||
time.time()-start,
|
||||
time.time() - start,
|
||||
rusage.ru_maxrss, rusage.ru_stime)
|
||||
|
||||
return ret
|
||||
|
|
@ -771,7 +771,7 @@ def application(environ, start_response):
|
|||
except Exception:
|
||||
pass
|
||||
context._threadclear()
|
||||
return [response] #XXX
|
||||
return [response] # XXX
|
||||
|
||||
|
||||
def get_registry(opts, plugins):
|
||||
|
|
|
|||
100
koji/__init__.py
100
koji/__init__.py
|
|
@ -108,13 +108,13 @@ RPM_FILEDIGESTALGO_IDS = {
|
|||
# Taken from RFC 4880
|
||||
# A missing algo ID means md5
|
||||
None: 'MD5',
|
||||
1: 'MD5',
|
||||
2: 'SHA1',
|
||||
3: 'RIPEMD160',
|
||||
8: 'SHA256',
|
||||
9: 'SHA384',
|
||||
10: 'SHA512',
|
||||
11: 'SHA224'
|
||||
1: 'MD5',
|
||||
2: 'SHA1',
|
||||
3: 'RIPEMD160',
|
||||
8: 'SHA256',
|
||||
9: 'SHA384',
|
||||
10: 'SHA512',
|
||||
11: 'SHA224'
|
||||
}
|
||||
|
||||
# rpm 4.12 introduces optional deps, but they can also be backported in some
|
||||
|
|
@ -288,7 +288,7 @@ DEFAULT_AUTH_TIMEOUT = 60
|
|||
# BEGIN kojikamid dup #
|
||||
|
||||
# Exceptions
|
||||
PythonImportError = ImportError # will be masked by koji's one
|
||||
PythonImportError = ImportError # will be masked by koji's one
|
||||
|
||||
class GenericError(Exception):
|
||||
"""Base class for our custom exceptions"""
|
||||
|
|
@ -640,8 +640,8 @@ class RawHeader(object):
|
|||
for i in range(il):
|
||||
entry = []
|
||||
for j in range(4):
|
||||
ofs = 16 + i*16 + j*4
|
||||
data = [_ord(x) for x in self.header[ofs:ofs+4]]
|
||||
ofs = 16 + i * 16 + j * 4
|
||||
data = [_ord(x) for x in self.header[ofs:ofs + 4]]
|
||||
entry.append(multibyte(data))
|
||||
|
||||
# print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry))
|
||||
|
|
@ -693,7 +693,7 @@ class RawHeader(object):
|
|||
# integer
|
||||
n = 1 << (dtype - 2)
|
||||
for i in range(count):
|
||||
data = [_ord(x) for x in self.header[pos:pos+n]]
|
||||
data = [_ord(x) for x in self.header[pos:pos + n]]
|
||||
print("%r" % data)
|
||||
num = multibyte(data)
|
||||
print("Int(%d): %d" % (n, num))
|
||||
|
|
@ -702,23 +702,23 @@ class RawHeader(object):
|
|||
elif dtype == 6:
|
||||
# string (null terminated)
|
||||
end = self.header.find(six.b('\0'), pos)
|
||||
print("String(%d): %r" % (end-pos, self.header[pos:end]))
|
||||
print("String(%d): %r" % (end - pos, self.header[pos:end]))
|
||||
next = end + 1
|
||||
elif dtype == 7:
|
||||
print("Data: %s" % hex_string(self.header[pos:pos+count]))
|
||||
next = pos+count
|
||||
print("Data: %s" % hex_string(self.header[pos:pos + count]))
|
||||
next = pos + count
|
||||
elif dtype == 8:
|
||||
# string array
|
||||
for i in range(count):
|
||||
end = self.header.find(six.b('\0'), pos)
|
||||
print("String(%d): %r" % (end-pos, self.header[pos:end]))
|
||||
print("String(%d): %r" % (end - pos, self.header[pos:end]))
|
||||
pos = end + 1
|
||||
next = pos
|
||||
elif dtype == 9:
|
||||
# unicode string array
|
||||
for i in range(count):
|
||||
end = self.header.find(six.b('\0'), pos)
|
||||
print("i18n(%d): %r" % (end-pos, self.header[pos:end]))
|
||||
print("i18n(%d): %r" % (end - pos, self.header[pos:end]))
|
||||
pos = end + 1
|
||||
next = pos
|
||||
else:
|
||||
|
|
@ -746,7 +746,7 @@ class RawHeader(object):
|
|||
if dtype >= 2 and dtype <= 5:
|
||||
n = 1 << (dtype - 2)
|
||||
# n-byte integer
|
||||
data = [_ord(x) for x in self.header[pos:pos+n]]
|
||||
data = [_ord(x) for x in self.header[pos:pos + n]]
|
||||
return multibyte(data)
|
||||
elif dtype == 6:
|
||||
# string (null terminated)
|
||||
|
|
@ -754,7 +754,7 @@ class RawHeader(object):
|
|||
return self.header[pos:end]
|
||||
elif dtype == 7:
|
||||
# raw data
|
||||
return self.header[pos:pos+count]
|
||||
return self.header[pos:pos + count]
|
||||
else:
|
||||
# XXX - not all valid data types are handled
|
||||
raise GenericError("Unable to read header data type: %x" % dtype)
|
||||
|
|
@ -806,7 +806,7 @@ def __parse_packet_header(pgp_packet):
|
|||
offset = 1
|
||||
length = len(pgp_packet) - offset
|
||||
else:
|
||||
(fmt, offset) = {0:('>B', 2), 1:('>H', 3), 2:('>I', 5)}[len_type]
|
||||
(fmt, offset) = {0: ('>B', 2), 1: ('>H', 3), 2: ('>I', 5)}[len_type]
|
||||
length = struct.unpack(fmt, pgp_packet[1:offset])[0]
|
||||
else:
|
||||
tag = byte0 & 0x3F
|
||||
|
|
@ -843,8 +843,8 @@ def __subpacket_key_ids(subs):
|
|||
length = struct.unpack('>I', subs[1:5])[0]
|
||||
off = 5
|
||||
if _ord(subs[off]) == 16:
|
||||
res.append(subs[off+1 : off+length])
|
||||
subs = subs[off+length:]
|
||||
res.append(subs[off + 1: off + length])
|
||||
subs = subs[off + length:]
|
||||
return res
|
||||
|
||||
def get_sigpacket_key_id(sigpacket):
|
||||
|
|
@ -858,9 +858,9 @@ def get_sigpacket_key_id(sigpacket):
|
|||
sub_len = struct.unpack('>H', sigpacket[4:6])[0]
|
||||
off = 6 + sub_len
|
||||
key_ids = __subpacket_key_ids(sigpacket[6:off])
|
||||
sub_len = struct.unpack('>H', sigpacket[off : off+2])[0]
|
||||
sub_len = struct.unpack('>H', sigpacket[off: off + 2])[0]
|
||||
off += 2
|
||||
key_ids += __subpacket_key_ids(sigpacket[off : off+sub_len])
|
||||
key_ids += __subpacket_key_ids(sigpacket[off: off + sub_len])
|
||||
if len(key_ids) != 1:
|
||||
raise NotImplementedError(
|
||||
'Unexpected number of key IDs: %s' % len(key_ids))
|
||||
|
|
@ -907,7 +907,7 @@ def get_rpm_header(f, ts=None):
|
|||
raise GenericError("rpm's python bindings are not installed")
|
||||
if ts is None:
|
||||
ts = rpm.TransactionSet()
|
||||
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
|
||||
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)
|
||||
if isinstance(f, six.string_types):
|
||||
fo = open(f, "rb")
|
||||
else:
|
||||
|
|
@ -1007,8 +1007,8 @@ def parse_NVR(nvr):
|
|||
p1 = nvr.rfind("-", 0, p2)
|
||||
if p1 == -1 or p1 == p2 - 1:
|
||||
raise GenericError("invalid format: %s" % nvr)
|
||||
ret['release'] = nvr[p2+1:]
|
||||
ret['version'] = nvr[p1+1:p2]
|
||||
ret['release'] = nvr[p2 + 1:]
|
||||
ret['version'] = nvr[p1 + 1:p2]
|
||||
ret['name'] = nvr[:p1]
|
||||
epochIndex = ret['name'].find(':')
|
||||
if epochIndex == -1:
|
||||
|
|
@ -1031,7 +1031,7 @@ def parse_NVRA(nvra):
|
|||
p3 = nvra.rfind(".")
|
||||
if p3 == -1 or p3 == len(nvra) - 1:
|
||||
raise GenericError("invalid format: %s" % nvra)
|
||||
arch = nvra[p3+1:]
|
||||
arch = nvra[p3 + 1:]
|
||||
ret = parse_NVR(nvra[:p3])
|
||||
ret['arch'] = arch
|
||||
if arch == 'src':
|
||||
|
|
@ -1427,7 +1427,7 @@ def generate_comps(groups, expand_groups=False):
|
|||
if expand_groups and g['grouplist']:
|
||||
# add a requires entry for all packages in groups required by buildgroup
|
||||
need = [req['name'] for req in g['grouplist']]
|
||||
seen_grp = {g['name'] : 1}
|
||||
seen_grp = {g['name']: 1}
|
||||
seen_pkg = {}
|
||||
for p in g['packagelist']:
|
||||
seen_pkg[p['package']] = 1
|
||||
|
|
@ -1503,9 +1503,9 @@ def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts)
|
|||
# rely on the mock defaults being correct
|
||||
# and only includes changes from the defaults here
|
||||
config_opts = {
|
||||
'root' : name,
|
||||
'basedir' : mockdir,
|
||||
'target_arch' : opts.get('target_arch', arch),
|
||||
'root': name,
|
||||
'basedir': mockdir,
|
||||
'target_arch': opts.get('target_arch', arch),
|
||||
'chroothome': '/builddir',
|
||||
# Use the group data rather than a generated rpm
|
||||
'chroot_setup_cmd': 'groupinstall %s' % opts.get('install_group', 'build'),
|
||||
|
|
@ -1578,9 +1578,9 @@ name=build
|
|||
}
|
||||
|
||||
macros = {
|
||||
'%_rpmfilename' : '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',
|
||||
'%vendor' : opts.get('vendor', 'Koji'),
|
||||
'%packager' : opts.get('packager', 'Koji'),
|
||||
'%_rpmfilename': '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',
|
||||
'%vendor': opts.get('vendor', 'Koji'),
|
||||
'%packager': opts.get('packager', 'Koji'),
|
||||
'%distribution': opts.get('distribution', 'Unknown')
|
||||
}
|
||||
|
||||
|
|
@ -1747,18 +1747,18 @@ def config_directory_contents(dir_name, strict=False):
|
|||
|
||||
def read_config(profile_name, user_config=None):
|
||||
config_defaults = {
|
||||
'server' : 'http://localhost/kojihub',
|
||||
'weburl' : 'http://localhost/koji',
|
||||
'topurl' : None,
|
||||
'pkgurl' : None,
|
||||
'topdir' : '/mnt/koji',
|
||||
'max_retries' : None,
|
||||
'server': 'http://localhost/kojihub',
|
||||
'weburl': 'http://localhost/koji',
|
||||
'topurl': None,
|
||||
'pkgurl': None,
|
||||
'topdir': '/mnt/koji',
|
||||
'max_retries': None,
|
||||
'retry_interval': None,
|
||||
'anon_retry' : None,
|
||||
'offline_retry' : None,
|
||||
'offline_retry_interval' : None,
|
||||
'timeout' : DEFAULT_REQUEST_TIMEOUT,
|
||||
'auth_timeout' : DEFAULT_AUTH_TIMEOUT,
|
||||
'anon_retry': None,
|
||||
'offline_retry': None,
|
||||
'offline_retry_interval': None,
|
||||
'timeout': DEFAULT_REQUEST_TIMEOUT,
|
||||
'auth_timeout': DEFAULT_AUTH_TIMEOUT,
|
||||
'use_fast_upload': False,
|
||||
'upload_blocksize': 1048576,
|
||||
'poll_interval': 6,
|
||||
|
|
@ -2109,7 +2109,7 @@ def is_requests_cert_error(e):
|
|||
def is_cert_error(e):
|
||||
"""Determine if an OpenSSL error is due to a bad cert"""
|
||||
|
||||
if SSL_Error is None: #pragma: no cover
|
||||
if SSL_Error is None: # pragma: no cover
|
||||
# import failed, so we can't determine
|
||||
raise Exception("OpenSSL library did not load")
|
||||
if not isinstance(e, SSL_Error):
|
||||
|
|
@ -2980,14 +2980,14 @@ class ClientSession(object):
|
|||
tries = 0
|
||||
while True:
|
||||
if debug:
|
||||
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %(path, name, sz, digest, offset))
|
||||
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" % (path, name, sz, digest, offset))
|
||||
if self.callMethod('uploadFile', path, name, sz, digest, offset, data, **volopts):
|
||||
break
|
||||
if tries <= retries:
|
||||
tries += 1
|
||||
continue
|
||||
else:
|
||||
raise GenericError("Error uploading file %s, offset %d" %(path, offset))
|
||||
raise GenericError("Error uploading file %s, offset %d" % (path, offset))
|
||||
if size == 0:
|
||||
break
|
||||
ofs += size
|
||||
|
|
@ -3127,7 +3127,7 @@ class MultiCallSession(object):
|
|||
self._session.logger.debug(
|
||||
"MultiCall with batch size %i, calls/groups(%i/%i)",
|
||||
batch, len(calls), round(len(calls) // batch))
|
||||
batches = [calls[i:i+batch] for i in range(0, len(calls), batch)]
|
||||
batches = [calls[i:i + batch] for i in range(0, len(calls), batch)]
|
||||
else:
|
||||
batches = [calls]
|
||||
results = []
|
||||
|
|
@ -3502,7 +3502,7 @@ def add_file_logger(logger, fn):
|
|||
return
|
||||
if not os.access(fn, os.W_OK):
|
||||
return
|
||||
handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024*1024*10, backupCount=5)
|
||||
handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024 * 1024 * 10, backupCount=5)
|
||||
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))
|
||||
logging.getLogger(logger).addHandler(handler)
|
||||
|
||||
|
|
|
|||
52
koji/arch.py
52
koji/arch.py
|
|
@ -10,14 +10,14 @@ import rpm
|
|||
_ppc64_native_is_best = True
|
||||
|
||||
# dict mapping arch -> ( multicompat, best personality, biarch personality )
|
||||
multilibArches = { "x86_64": ( "athlon", "x86_64", "athlon" ),
|
||||
"sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ),
|
||||
"sparc64": ( "sparcv9", "sparcv9", "sparc64" ),
|
||||
"ppc64": ( "ppc", "ppc", "ppc64" ),
|
||||
"s390x": ( "s390", "s390x", "s390" ),
|
||||
}
|
||||
multilibArches = {"x86_64": ("athlon", "x86_64", "athlon"),
|
||||
"sparc64v": ("sparcv9v", "sparcv9v", "sparc64v"),
|
||||
"sparc64": ("sparcv9", "sparcv9", "sparc64"),
|
||||
"ppc64": ("ppc", "ppc", "ppc64"),
|
||||
"s390x": ("s390", "s390x", "s390"),
|
||||
}
|
||||
if _ppc64_native_is_best:
|
||||
multilibArches["ppc64"] = ( "ppc", "ppc64", "ppc64" )
|
||||
multilibArches["ppc64"] = ("ppc", "ppc64", "ppc64")
|
||||
|
||||
arches = {
|
||||
# ia32
|
||||
|
|
@ -34,7 +34,7 @@ arches = {
|
|||
"ia32e": "x86_64",
|
||||
|
||||
# ppc64le
|
||||
"ppc64le": "noarch",
|
||||
"ppc64le": "noarch",
|
||||
|
||||
# ppc
|
||||
"ppc64p7": "ppc64",
|
||||
|
|
@ -56,16 +56,16 @@ arches = {
|
|||
"sparc": "noarch",
|
||||
|
||||
# alpha
|
||||
"alphaev7": "alphaev68",
|
||||
"alphaev68": "alphaev67",
|
||||
"alphaev67": "alphaev6",
|
||||
"alphaev6": "alphapca56",
|
||||
"alphaev7": "alphaev68",
|
||||
"alphaev68": "alphaev67",
|
||||
"alphaev67": "alphaev6",
|
||||
"alphaev6": "alphapca56",
|
||||
"alphapca56": "alphaev56",
|
||||
"alphaev56": "alphaev5",
|
||||
"alphaev5": "alphaev45",
|
||||
"alphaev45": "alphaev4",
|
||||
"alphaev4": "alpha",
|
||||
"alpha": "noarch",
|
||||
"alphaev56": "alphaev5",
|
||||
"alphaev5": "alphaev45",
|
||||
"alphaev45": "alphaev4",
|
||||
"alphaev4": "alpha",
|
||||
"alpha": "noarch",
|
||||
|
||||
# arm
|
||||
"armv7l": "armv6l",
|
||||
|
|
@ -124,7 +124,7 @@ def canCoinstall(arch1, arch2):
|
|||
# if both are a multlibarch then we can't coinstall (x86_64, ia32e)
|
||||
# if both are not multilibarches then we can't coinstall (i386, i686)
|
||||
|
||||
if 'noarch' in [arch1, arch2]: # noarch can never coinstall
|
||||
if 'noarch' in [arch1, arch2]: # noarch can never coinstall
|
||||
return False
|
||||
|
||||
if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2):
|
||||
|
|
@ -153,7 +153,7 @@ def isMultiLibArch(arch=None):
|
|||
if arch is None:
|
||||
arch = canonArch
|
||||
|
||||
if arch not in arches: # or we could check if it is noarch
|
||||
if arch not in arches: # or we could check if it is noarch
|
||||
return 0
|
||||
|
||||
if arch in multilibArches:
|
||||
|
|
@ -219,7 +219,7 @@ def getArchList(thisarch=None):
|
|||
# hack hack hack
|
||||
# sparc64v is also sparc64 compat
|
||||
if archlist[0] == "sparc64v":
|
||||
archlist.insert(1,"sparc64")
|
||||
archlist.insert(1, "sparc64")
|
||||
|
||||
# if we're a weirdo arch - add noarch on there.
|
||||
if len(archlist) == 1 and archlist[0] == thisarch:
|
||||
|
|
@ -366,7 +366,7 @@ def getCanonX86_64Arch(arch):
|
|||
return "ia32e"
|
||||
return arch
|
||||
|
||||
def getCanonArch(skipRpmPlatform = 0):
|
||||
def getCanonArch(skipRpmPlatform=0):
|
||||
if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK):
|
||||
try:
|
||||
f = open("/etc/rpm/platform", "r")
|
||||
|
|
@ -398,11 +398,11 @@ def getCanonArch(skipRpmPlatform = 0):
|
|||
canonArch = getCanonArch()
|
||||
|
||||
# this gets you the "compat" arch of a biarch pair
|
||||
def getMultiArchInfo(arch = canonArch):
|
||||
def getMultiArchInfo(arch=canonArch):
|
||||
if arch in multilibArches:
|
||||
return multilibArches[arch]
|
||||
if arch in arches and arches[arch] != "noarch":
|
||||
return getMultiArchInfo(arch = arches[arch])
|
||||
return getMultiArchInfo(arch=arches[arch])
|
||||
return None
|
||||
|
||||
# get the best usual userspace arch for the arch we're on. this is
|
||||
|
|
@ -430,7 +430,7 @@ def getBaseArch(myarch=None):
|
|||
if not myarch:
|
||||
myarch = canonArch
|
||||
|
||||
if myarch not in arches: # this is dumb, but <shrug>
|
||||
if myarch not in arches: # this is dumb, but <shrug>
|
||||
return myarch
|
||||
|
||||
if myarch.startswith("sparc64"):
|
||||
|
|
@ -485,7 +485,7 @@ class ArchStorage(object):
|
|||
self.basearch = getBaseArch(myarch=self.canonarch)
|
||||
self.archlist = getArchList(thisarch=self.canonarch)
|
||||
|
||||
if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64
|
||||
if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64
|
||||
limit_archlist = []
|
||||
for a in self.archlist:
|
||||
if isMultiLibArch(a) or a == 'noarch':
|
||||
|
|
@ -495,7 +495,7 @@ class ArchStorage(object):
|
|||
self.bestarch = getBestArch(myarch=self.canonarch)
|
||||
self.compatarches = getMultiArchInfo(arch=self.canonarch)
|
||||
self.multilib = isMultiLibArch(arch=self.canonarch)
|
||||
self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch)
|
||||
self.legit_multi_arches = legitMultiArchesInSameLib(arch=self.canonarch)
|
||||
|
||||
def get_best_arch_from_list(self, archlist, fromarch=None):
|
||||
if not fromarch:
|
||||
|
|
|
|||
10
koji/auth.py
10
koji/auth.py
|
|
@ -321,7 +321,7 @@ class Session(object):
|
|||
srvkt = krbV.Keytab(name=context.opts.get('AuthKeytab'), context=ctx)
|
||||
|
||||
ac = krbV.AuthContext(context=ctx)
|
||||
ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME
|
||||
ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE | krbV.KRB5_AUTH_CONTEXT_DO_TIME
|
||||
conninfo = self.getConnInfo()
|
||||
ac.addrs = conninfo
|
||||
|
||||
|
|
@ -537,8 +537,8 @@ class Session(object):
|
|||
|
||||
# generate a random key
|
||||
alnum = string.ascii_letters + string.digits
|
||||
key = "%s-%s" %(user_id,
|
||||
''.join([random.choice(alnum) for x in range(1, 20)]))
|
||||
key = "%s-%s" % (user_id,
|
||||
''.join([random.choice(alnum) for x in range(1, 20)]))
|
||||
# use sha? sha.new(phrase).hexdigest()
|
||||
|
||||
# get a session id
|
||||
|
|
@ -556,7 +556,7 @@ class Session(object):
|
|||
context.cnx.commit()
|
||||
|
||||
# return session info
|
||||
return {'session-id' : session_id, 'session-key' : key}
|
||||
return {'session-id': session_id, 'session-key': key}
|
||||
|
||||
def subsession(self):
|
||||
"Create a subsession"
|
||||
|
|
@ -607,7 +607,7 @@ class Session(object):
|
|||
return None
|
||||
c = context.cnx.cursor()
|
||||
q = """SELECT id FROM host WHERE user_id = %(uid)d"""
|
||||
c.execute(q, {'uid' : self.user_id})
|
||||
c.execute(q, {'uid': self.user_id})
|
||||
r = c.fetchone()
|
||||
c.close()
|
||||
if r:
|
||||
|
|
|
|||
|
|
@ -67,8 +67,8 @@ class ThreadLocal(object):
|
|||
def __str__(self):
|
||||
id = six.moves._thread.get_ident()
|
||||
tdict = object.__getattribute__(self, '_tdict')
|
||||
return "(current thread: %s) {" % id + \
|
||||
", ".join(["%s : %s" %(k, v.__dict__) for (k, v) in six.iteritems(tdict)]) + \
|
||||
return "(current thread: %s) {" % id + \
|
||||
", ".join(["%s : %s" % (k, v.__dict__) for (k, v) in six.iteritems(tdict)]) + \
|
||||
"}"
|
||||
|
||||
def _threadclear(self):
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ class SCM(object):
|
|||
# check for validity: params should be empty, query may be empty, everything else should be populated
|
||||
if params:
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Params element %s should be empty.' % (self.url, params))
|
||||
if not scheme: #pragma: no cover
|
||||
if not scheme: # pragma: no cover
|
||||
# should not happen because of is_scm_url check earlier
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url)
|
||||
if not fragment:
|
||||
|
|
@ -699,7 +699,7 @@ class TaskManager(object):
|
|||
# can lead to a world of hurt.
|
||||
# We remove the rootdir contents but leave the rootdir unless it
|
||||
# is really old
|
||||
if age > 3600*24:
|
||||
if age > 3600 * 24:
|
||||
# dir untouched for a day
|
||||
self.logger.info("Removing buildroot: %s" % desc)
|
||||
if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0:
|
||||
|
|
@ -850,8 +850,8 @@ class TaskManager(object):
|
|||
self.logger.debug(" hosts: %r" % hosts)
|
||||
self.logger.debug(" tasks: %r" % tasks)
|
||||
# now we organize this data into channel-arch bins
|
||||
bin_hosts = {} #hosts indexed by bin
|
||||
bins = {} #bins for this host
|
||||
bin_hosts = {} # hosts indexed by bin
|
||||
bins = {} # bins for this host
|
||||
our_avail = None
|
||||
for host in hosts:
|
||||
host['bins'] = []
|
||||
|
|
|
|||
|
|
@ -34,28 +34,28 @@ from koji.util import encode_datetime_recurse
|
|||
# the available callback hooks and a list
|
||||
# of functions to be called for each event
|
||||
callbacks = {
|
||||
'prePackageListChange': [],
|
||||
'postPackageListChange': [],
|
||||
'preTaskStateChange': [],
|
||||
'postTaskStateChange': [],
|
||||
'preBuildStateChange': [],
|
||||
'postBuildStateChange': [],
|
||||
'preImport': [],
|
||||
'postImport': [],
|
||||
'preRPMSign': [],
|
||||
'postRPMSign': [],
|
||||
'preTag': [],
|
||||
'postTag': [],
|
||||
'preUntag': [],
|
||||
'postUntag': [],
|
||||
'preRepoInit': [],
|
||||
'postRepoInit': [],
|
||||
'preRepoDone': [],
|
||||
'postRepoDone': [],
|
||||
'preCommit': [],
|
||||
'postCommit': [],
|
||||
'preSCMCheckout': [],
|
||||
'postSCMCheckout': [],
|
||||
'prePackageListChange': [],
|
||||
'postPackageListChange': [],
|
||||
'preTaskStateChange': [],
|
||||
'postTaskStateChange': [],
|
||||
'preBuildStateChange': [],
|
||||
'postBuildStateChange': [],
|
||||
'preImport': [],
|
||||
'postImport': [],
|
||||
'preRPMSign': [],
|
||||
'postRPMSign': [],
|
||||
'preTag': [],
|
||||
'postTag': [],
|
||||
'preUntag': [],
|
||||
'postUntag': [],
|
||||
'preRepoInit': [],
|
||||
'postRepoInit': [],
|
||||
'preRepoDone': [],
|
||||
'postRepoDone': [],
|
||||
'preCommit': [],
|
||||
'postCommit': [],
|
||||
'preSCMCheckout': [],
|
||||
'postSCMCheckout': [],
|
||||
}
|
||||
|
||||
class PluginTracker(object):
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ class SimpleRuleSet(object):
|
|||
raise Exception("bad policy line: %s" % line)
|
||||
negate = True
|
||||
tests = line[:pos]
|
||||
action = line[pos+2:]
|
||||
action = line[pos + 2:]
|
||||
tests = [self.get_test_handler(x) for x in tests.split('&&')]
|
||||
action = action.strip()
|
||||
# just return action = { for nested rules
|
||||
|
|
|
|||
|
|
@ -40,53 +40,53 @@ class Rpmdiff:
|
|||
|
||||
# constants
|
||||
|
||||
TAGS = ( rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY,
|
||||
rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP,
|
||||
rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL,
|
||||
rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN,
|
||||
rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN)
|
||||
TAGS = (rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY,
|
||||
rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP,
|
||||
rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL,
|
||||
rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN,
|
||||
rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN)
|
||||
|
||||
PRCO = ( 'REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')
|
||||
PRCO = ('REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')
|
||||
|
||||
# {fname : (size, mode, mtime, flags, dev, inode,
|
||||
# nlink, state, vflags, user, group, digest)}
|
||||
__FILEIDX = [ ['S', 0],
|
||||
['M', 1],
|
||||
['5', 11],
|
||||
['D', 4],
|
||||
['N', 6],
|
||||
['L', 7],
|
||||
['V', 8],
|
||||
['U', 9],
|
||||
['G', 10],
|
||||
['F', 3],
|
||||
['T', 2] ]
|
||||
__FILEIDX = [['S', 0],
|
||||
['M', 1],
|
||||
['5', 11],
|
||||
['D', 4],
|
||||
['N', 6],
|
||||
['L', 7],
|
||||
['V', 8],
|
||||
['U', 9],
|
||||
['G', 10],
|
||||
['F', 3],
|
||||
['T', 2]]
|
||||
|
||||
try:
|
||||
if rpm.RPMSENSE_SCRIPT_PRE:
|
||||
PREREQ_FLAG=rpm.RPMSENSE_PREREQ|rpm.RPMSENSE_SCRIPT_PRE|\
|
||||
rpm.RPMSENSE_SCRIPT_POST|rpm.RPMSENSE_SCRIPT_PREUN|\
|
||||
PREREQ_FLAG = rpm.RPMSENSE_PREREQ | rpm.RPMSENSE_SCRIPT_PRE |\
|
||||
rpm.RPMSENSE_SCRIPT_POST | rpm.RPMSENSE_SCRIPT_PREUN |\
|
||||
rpm.RPMSENSE_SCRIPT_POSTUN
|
||||
except AttributeError:
|
||||
try:
|
||||
PREREQ_FLAG=rpm.RPMSENSE_PREREQ
|
||||
PREREQ_FLAG = rpm.RPMSENSE_PREREQ
|
||||
except:
|
||||
# (proyvind): This seems ugly, but then again so does
|
||||
# this whole check as well.
|
||||
PREREQ_FLAG=False
|
||||
PREREQ_FLAG = False
|
||||
|
||||
DEPFORMAT = '%-12s%s %s %s %s'
|
||||
FORMAT = '%-12s%s'
|
||||
|
||||
ADDED = 'added'
|
||||
ADDED = 'added'
|
||||
REMOVED = 'removed'
|
||||
|
||||
# code starts here
|
||||
|
||||
def __init__(self, old, new, ignore=None):
|
||||
self.result = []
|
||||
self.old_data = { 'tags': {}, 'ignore': ignore }
|
||||
self.new_data = { 'tags': {}, 'ignore': ignore }
|
||||
self.old_data = {'tags': {}, 'ignore': ignore}
|
||||
self.new_data = {'tags': {}, 'ignore': ignore}
|
||||
if ignore is None:
|
||||
ignore = set()
|
||||
else:
|
||||
|
|
@ -111,7 +111,7 @@ class Rpmdiff:
|
|||
self.__add(self.FORMAT, ('S.5........', tagname))
|
||||
|
||||
# compare Provides, Requires, ...
|
||||
for tag in self.PRCO:
|
||||
for tag in self.PRCO:
|
||||
self.__comparePRCOs(old, new, tag)
|
||||
|
||||
# compare the files
|
||||
|
|
@ -183,16 +183,16 @@ class Rpmdiff:
|
|||
|
||||
# compare Provides, Requires, Conflicts, Obsoletes
|
||||
def __comparePRCOs(self, old, new, name):
|
||||
oldflags = old[name[:-1]+'FLAGS']
|
||||
newflags = new[name[:-1]+'FLAGS']
|
||||
oldflags = old[name[:-1] + 'FLAGS']
|
||||
newflags = new[name[:-1] + 'FLAGS']
|
||||
# fix buggy rpm binding not returning list for single entries
|
||||
if not isinstance(oldflags, list): oldflags = [ oldflags ]
|
||||
if not isinstance(newflags, list): newflags = [ newflags ]
|
||||
if not isinstance(oldflags, list): oldflags = [oldflags]
|
||||
if not isinstance(newflags, list): newflags = [newflags]
|
||||
|
||||
o = list(zip(old[name], oldflags, old[name[:-1]+'VERSION']))
|
||||
n = list(zip(new[name], newflags, new[name[:-1]+'VERSION']))
|
||||
o = list(zip(old[name], oldflags, old[name[:-1] + 'VERSION']))
|
||||
n = list(zip(new[name], newflags, new[name[:-1] + 'VERSION']))
|
||||
|
||||
if name == 'PROVIDES': # filter our self provide
|
||||
if name == 'PROVIDES': # filter our self provide
|
||||
oldNV = (old['name'], rpm.RPMSENSE_EQUAL,
|
||||
"%s-%s" % (old['version'], old['release']))
|
||||
newNV = (new['name'], rpm.RPMSENSE_EQUAL,
|
||||
|
|
|
|||
|
|
@ -144,121 +144,121 @@ def parse_task_params(method, params):
|
|||
LEGACY_SIGNATURES = {
|
||||
# key is method name, value is list of possible signatures
|
||||
# signatures are like getargspec -- args, varargs, keywords, defaults
|
||||
'chainbuild' : [
|
||||
'chainbuild': [
|
||||
[['srcs', 'target', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'waitrepo' : [
|
||||
'waitrepo': [
|
||||
[['tag', 'newer_than', 'nvrs'], None, None, (None, None)],
|
||||
],
|
||||
'createLiveMedia' : [
|
||||
'createLiveMedia': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'createAppliance' : [
|
||||
'createAppliance': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'livecd' : [
|
||||
'livecd': [
|
||||
[['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'buildNotification' : [
|
||||
'buildNotification': [
|
||||
[['recipients', 'build', 'target', 'weburl'], None, None, None],
|
||||
],
|
||||
'buildMaven' : [
|
||||
'buildMaven': [
|
||||
[['url', 'build_tag', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'build' : [
|
||||
'build': [
|
||||
[['src', 'target', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'buildSRPMFromSCM' : [
|
||||
'buildSRPMFromSCM': [
|
||||
[['url', 'build_tag', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'rebuildSRPM' : [
|
||||
'rebuildSRPM': [
|
||||
[['srpm', 'build_tag', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'createrepo' : [
|
||||
'createrepo': [
|
||||
[['repo_id', 'arch', 'oldrepo'], None, None, None],
|
||||
],
|
||||
'livemedia' : [
|
||||
'livemedia': [
|
||||
[['name', 'version', 'arches', 'target', 'ksfile', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'indirectionimage' : [
|
||||
'indirectionimage': [
|
||||
[['opts'], None, None, None],
|
||||
],
|
||||
'wrapperRPM' : [
|
||||
'wrapperRPM': [
|
||||
[['spec_url', 'build_target', 'build', 'task', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'createLiveCD' : [
|
||||
'createLiveCD': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'appliance' : [
|
||||
'appliance': [
|
||||
[['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'image' : [
|
||||
'image': [
|
||||
[['name', 'version', 'arches', 'target', 'inst_tree', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'tagBuild' : [
|
||||
'tagBuild': [
|
||||
[['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'], None, None, (False, None, False)],
|
||||
],
|
||||
'chainmaven' : [
|
||||
'chainmaven': [
|
||||
[['builds', 'target', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'newRepo' : [
|
||||
'newRepo': [
|
||||
[['tag', 'event', 'src', 'debuginfo', 'separate_src'], None, None, (None, False, False, False)],
|
||||
],
|
||||
'createImage' : [
|
||||
'createImage': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'inst_tree', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'tagNotification' : [
|
||||
'tagNotification': [
|
||||
[['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info', 'ignore_success', 'failure_msg'], None, None, (None, '')],
|
||||
],
|
||||
'buildArch' : [
|
||||
'buildArch': [
|
||||
[['pkg', 'root', 'arch', 'keep_srpm', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'maven' : [
|
||||
'maven': [
|
||||
[['url', 'target', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'waittest' : [
|
||||
'waittest': [
|
||||
[['count', 'seconds'], None, None, (10,)],
|
||||
],
|
||||
'default' : [
|
||||
'default': [
|
||||
[[], 'args', 'opts', None],
|
||||
],
|
||||
'shutdown' : [
|
||||
'shutdown': [
|
||||
[[], None, None, None],
|
||||
],
|
||||
'restartVerify' : [
|
||||
'restartVerify': [
|
||||
[['task_id', 'host'], None, None, None],
|
||||
],
|
||||
'someMethod' : [
|
||||
'someMethod': [
|
||||
[[], 'args', None, None],
|
||||
],
|
||||
'restart' : [
|
||||
'restart': [
|
||||
[['host'], None, None, None],
|
||||
],
|
||||
'fork' : [
|
||||
'fork': [
|
||||
[['n', 'm'], None, None, (5, 37)],
|
||||
],
|
||||
'sleep' : [
|
||||
'sleep': [
|
||||
[['n'], None, None, None],
|
||||
],
|
||||
'dependantTask' : [
|
||||
'dependantTask': [
|
||||
[['wait_list', 'task_list'], None, None, None],
|
||||
],
|
||||
'subtask' : [
|
||||
'subtask': [
|
||||
[['n'], None, None, (4,)],
|
||||
],
|
||||
'restartHosts' : [
|
||||
'restartHosts': [
|
||||
[['options'], None, None, (None,)],
|
||||
],
|
||||
'runroot' : [
|
||||
'runroot': [
|
||||
[['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch', 'weight', 'upload_logs', 'new_chroot'], None, None, (False, [], [], None, False, None, None, False)],
|
||||
],
|
||||
'distRepo' : [
|
||||
'distRepo': [
|
||||
[['tag', 'repo_id', 'keys', 'task_opts'], None, None, None],
|
||||
],
|
||||
'createdistrepo' : [
|
||||
'createdistrepo': [
|
||||
[['tag', 'repo_id', 'arch', 'keys', 'opts'], None, None, None],
|
||||
],
|
||||
'saveFailedTree' : [
|
||||
'saveFailedTree': [
|
||||
[['buildrootID', 'full'], None, None, (False,)],
|
||||
],
|
||||
}
|
||||
|
|
@ -278,7 +278,7 @@ class BaseTaskHandler(object):
|
|||
Foreground = False
|
||||
|
||||
def __init__(self, id, method, params, session, options, workdir=None):
|
||||
self.id = id #task id
|
||||
self.id = id # task id
|
||||
if method not in self.Methods:
|
||||
raise koji.GenericError('method "%s" is not supported' % method)
|
||||
self.method = method
|
||||
|
|
@ -641,7 +641,7 @@ class SubtaskTask(BaseTaskHandler):
|
|||
def handler(self, n=4):
|
||||
if n > 0:
|
||||
task_id = self.session.host.subtask(method='subtask',
|
||||
arglist=[n-1],
|
||||
arglist=[n - 1],
|
||||
label='foo',
|
||||
parent=self.id)
|
||||
self.wait(task_id)
|
||||
|
|
@ -715,14 +715,14 @@ class RestartHostsTask(BaseTaskHandler):
|
|||
hostquery = {'enabled': True}
|
||||
if 'channel' in options:
|
||||
chan = self.session.getChannel(options['channel'], strict=True)
|
||||
hostquery['channelID']= chan['id']
|
||||
hostquery['channelID'] = chan['id']
|
||||
if 'arches' in options:
|
||||
hostquery['arches'] = options['arches']
|
||||
hosts = self.session.listHosts(**hostquery)
|
||||
if not hosts:
|
||||
raise koji.GenericError("No matching hosts")
|
||||
|
||||
timeout = options.get('timeout', 3600*24)
|
||||
timeout = options.get('timeout', 3600 * 24)
|
||||
|
||||
# fire off the subtasks
|
||||
this_host = self.session.host.getID()
|
||||
|
|
|
|||
|
|
@ -532,8 +532,8 @@ def eventFromOpts(session, opts):
|
|||
if repo:
|
||||
rinfo = session.repoInfo(repo)
|
||||
if rinfo:
|
||||
return {'id' : rinfo['create_event'],
|
||||
'ts' : rinfo['create_ts']}
|
||||
return {'id': rinfo['create_event'],
|
||||
'ts': rinfo['create_ts']}
|
||||
return None
|
||||
|
||||
|
||||
|
|
@ -664,7 +664,7 @@ class adler32_constructor(object):
|
|||
return dup
|
||||
|
||||
digest_size = 4
|
||||
block_size = 1 #I think
|
||||
block_size = 1 # I think
|
||||
|
||||
|
||||
def tsort(parts):
|
||||
|
|
|
|||
|
|
@ -196,7 +196,7 @@ class RunRootTask(koji.tasks.BaseTaskHandler):
|
|||
mock_cmd = ['chroot']
|
||||
if new_chroot:
|
||||
mock_cmd.append('--new-chroot')
|
||||
elif new_chroot is False: # None -> no option added
|
||||
elif new_chroot is False: # None -> no option added
|
||||
mock_cmd.append('--old-chroot')
|
||||
if skip_setarch:
|
||||
# we can't really skip it, but we can set it to the current one instead of of the chroot one
|
||||
|
|
|
|||
|
|
@ -58,12 +58,12 @@ def handle_runroot(options, session, args):
|
|||
else:
|
||||
command = args[2:]
|
||||
try:
|
||||
kwargs = { 'channel': opts.channel_override,
|
||||
'packages': opts.package,
|
||||
'mounts': opts.mount,
|
||||
'repo_id': opts.repo_id,
|
||||
'skip_setarch': opts.skip_setarch,
|
||||
'weight': opts.weight }
|
||||
kwargs = {'channel': opts.channel_override,
|
||||
'packages': opts.package,
|
||||
'mounts': opts.mount,
|
||||
'repo_id': opts.repo_id,
|
||||
'skip_setarch': opts.skip_setarch,
|
||||
'weight': opts.weight}
|
||||
# Only pass this kwarg if it is true - this prevents confusing older
|
||||
# builders with a different function signature
|
||||
if opts.new_chroot is not None:
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ def handle_add_sidetag(options, session, args):
|
|||
parser.error(_("Policy violation"))
|
||||
|
||||
if not opts.quiet:
|
||||
print (tag["name"])
|
||||
print(tag["name"])
|
||||
|
||||
if opts.wait:
|
||||
args = ["--target", tag["name"]]
|
||||
|
|
@ -88,4 +88,4 @@ def handle_list_sidetags(options, session, args):
|
|||
user = opts.user
|
||||
|
||||
for tag in session.listSideTags(basetag=opts.basetag, user=user):
|
||||
print (tag["name"])
|
||||
print(tag["name"])
|
||||
|
|
|
|||
4
setup.py
4
setup.py
|
|
@ -41,8 +41,8 @@ setup(
|
|||
" interface."),
|
||||
license="LGPLv2 and GPLv2+",
|
||||
url="http://pagure.io/koji/",
|
||||
author = 'Koji developers',
|
||||
author_email = 'koji-devel@lists.fedorahosted.org',
|
||||
author='Koji developers',
|
||||
author_email='koji-devel@lists.fedorahosted.org',
|
||||
classifiers=[
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
|
|
|
|||
52
util/koji-gc
52
util/koji-gc
|
|
@ -81,7 +81,7 @@ def get_options():
|
|||
parser.add_option("--smtp-user", dest="smtp_user", metavar="USER",
|
||||
help=_("specify smtp username for notifications"))
|
||||
parser.add_option("--smtp-pass", dest="smtp_pass", metavar="PASSWORD",
|
||||
help=optparse.SUPPRESS_HELP) # do not allow passwords on a command line
|
||||
help=optparse.SUPPRESS_HELP) # do not allow passwords on a command line
|
||||
parser.add_option("--no-mail", action='store_false', default=True, dest="mail",
|
||||
help=_("don't send notifications"))
|
||||
parser.add_option("--send-mail", action='store_true', dest="mail",
|
||||
|
|
@ -93,7 +93,7 @@ def get_options():
|
|||
parser.add_option("--email-template", default="/etc/koji-gc/email.tpl",
|
||||
help=_("notification template"))
|
||||
parser.add_option("--action", help=_("action(s) to take"))
|
||||
parser.add_option("--delay", metavar="INTERVAL", default = '5 days',
|
||||
parser.add_option("--delay", metavar="INTERVAL", default='5 days',
|
||||
help="time before eligible builds are placed in trashcan")
|
||||
parser.add_option("--grace-period", default='4 weeks', metavar="INTERVAL",
|
||||
help="time that builds are held in trashcan")
|
||||
|
|
@ -184,7 +184,7 @@ def get_options():
|
|||
# figure out actions
|
||||
actions = ('prune', 'trash', 'delete', 'salvage')
|
||||
if options.action:
|
||||
options.action = options.action.lower().replace(',',' ').split()
|
||||
options.action = options.action.lower().replace(',', ' ').split()
|
||||
for x in options.action:
|
||||
if x not in actions:
|
||||
parser.error(_("Invalid action: %s") % x)
|
||||
|
|
@ -193,7 +193,7 @@ def get_options():
|
|||
|
||||
# split patterns for unprotected keys
|
||||
if options.unprotected_keys:
|
||||
options.unprotected_key_patterns = options.unprotected_keys.replace(',',' ').split()
|
||||
options.unprotected_key_patterns = options.unprotected_keys.replace(',', ' ').split()
|
||||
else:
|
||||
options.unprotected_key_patterns = []
|
||||
|
||||
|
|
@ -208,7 +208,7 @@ def get_options():
|
|||
options.key_aliases[parts[0].upper()] = parts[1]
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
parser.error(_("Invalid key alias data in config: %s") % config.get('main','key_aliases'))
|
||||
parser.error(_("Invalid key alias data in config: %s") % config.get('main', 'key_aliases'))
|
||||
|
||||
# parse time intervals
|
||||
for key in ('delay', 'grace_period'):
|
||||
|
|
@ -274,11 +274,11 @@ def check_package(name):
|
|||
return True
|
||||
|
||||
time_units = {
|
||||
'second' : 1,
|
||||
'minute' : 60,
|
||||
'hour' : 3600,
|
||||
'day' : 86400,
|
||||
'week' : 604800,
|
||||
'second': 1,
|
||||
'minute': 60,
|
||||
'hour': 3600,
|
||||
'day': 86400,
|
||||
'week': 604800,
|
||||
}
|
||||
time_unit_aliases = [
|
||||
# [unit, alias, alias, ...]
|
||||
|
|
@ -405,14 +405,14 @@ def send_warning_notice(owner_name, builds):
|
|||
with open(options.email_template, 'r') as f:
|
||||
tpl = Template(f.read())
|
||||
|
||||
fmt="""\
|
||||
fmt = """\
|
||||
Build: %%(name)s-%%(version)s-%%(release)s
|
||||
%s/buildinfo?buildID=%%(id)i""" % options.weburl
|
||||
middle = '\n\n'.join([fmt % b for b in builds])
|
||||
|
||||
msg = MIMEText.MIMEText(tpl.safe_substitute(
|
||||
owner = owner_name,
|
||||
builds = middle,
|
||||
owner=owner_name,
|
||||
builds=middle,
|
||||
))
|
||||
|
||||
if len(builds) == 1:
|
||||
|
|
@ -420,7 +420,7 @@ Build: %%(name)s-%%(version)s-%%(release)s
|
|||
else:
|
||||
msg['Subject'] = "%i builds marked for deletion" % len(builds)
|
||||
msg['From'] = options.from_addr
|
||||
msg['To'] = "%s@%s" % (owner_name, options.email_domain) #XXX!
|
||||
msg['To'] = "%s@%s" % (owner_name, options.email_domain) # XXX!
|
||||
msg['X-Koji-Builder'] = owner_name
|
||||
if options.test:
|
||||
if options.debug:
|
||||
|
|
@ -528,7 +528,7 @@ def handle_trash():
|
|||
else:
|
||||
age = time.time() - ts
|
||||
else:
|
||||
history = [(h['revoke_event'],h) for h in history]
|
||||
history = [(h['revoke_event'], h) for h in history]
|
||||
last = max(history)[1]
|
||||
if not last['revoke_event']:
|
||||
# this might happen if the build was tagged just now
|
||||
|
|
@ -690,7 +690,7 @@ def handle_delete(just_salvage=False):
|
|||
pprint.pprint(binfo)
|
||||
pprint.pprint(history)
|
||||
continue
|
||||
assert len(current) == 1 #see db constraint
|
||||
assert len(current) == 1 # see db constraint
|
||||
current = current[0]
|
||||
age = time.time() - current['create_ts']
|
||||
if age < grace_period:
|
||||
|
|
@ -705,8 +705,8 @@ def handle_delete(just_salvage=False):
|
|||
if options.test:
|
||||
print("Would have deleted build from trashcan: %s" % binfo['nvr'])
|
||||
else:
|
||||
print("Deleting build: %s" % binfo['nvr'])
|
||||
mcall.untagBuildBypass(trashcan_tag, binfo['id'])
|
||||
print("Deleting build: %s" % binfo['nvr'])
|
||||
mcall.untagBuildBypass(trashcan_tag, binfo['id'])
|
||||
mcall.deleteBuild(binfo['id'])
|
||||
|
||||
for binfo, result in six.moves.zip(continuing, mcall.call_all()):
|
||||
|
|
@ -898,14 +898,14 @@ def handle_prune():
|
|||
# get sig data
|
||||
nvr = "%(name)s-%(version)s-%(release)s" % entry
|
||||
data = {
|
||||
'tagname' : tagname,
|
||||
'pkgname' : pkg,
|
||||
'tagname': tagname,
|
||||
'pkgname': pkg,
|
||||
'order': order - skipped,
|
||||
'ts' : entry['create_ts'],
|
||||
'nvr' : nvr,
|
||||
'ts': entry['create_ts'],
|
||||
'nvr': nvr,
|
||||
}
|
||||
data = LazyDict(data)
|
||||
data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache':True})
|
||||
data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache': True})
|
||||
data['volname'] = LazyValue(lambda x: session.getBuild(x).get('volume_name'),
|
||||
(entry['build_id'],), cache=True)
|
||||
build_ids[nvr] = entry['build_id']
|
||||
|
|
@ -923,7 +923,7 @@ def handle_prune():
|
|||
print("Would have untagged %s from %s" % (nvr, tagname))
|
||||
untagged.setdefault(nvr, {})[tagname] = 1
|
||||
else:
|
||||
print("Untagging build %s from %s" % (nvr, tagname))
|
||||
print("Untagging build %s from %s" % (nvr, tagname))
|
||||
try:
|
||||
session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)
|
||||
untagged.setdefault(nvr, {})[tagname] = 1
|
||||
|
|
@ -935,7 +935,7 @@ def handle_prune():
|
|||
print("Attempting to purge %i builds" % len(untagged))
|
||||
for nvr in untagged:
|
||||
build_id = build_ids[nvr]
|
||||
tags = [t['name'] for t in session.listTags(build_id, perms=False)]
|
||||
tags = [t['name'] for t in session.listTags(build_id, perms=False)]
|
||||
if options.test:
|
||||
# filted out the tags we would have dropped above
|
||||
tags = [t for t in tags if t not in untagged[nvr]]
|
||||
|
|
@ -958,7 +958,7 @@ def handle_prune():
|
|||
if options.test:
|
||||
print("Would have deleted build: %s" % nvr)
|
||||
else:
|
||||
print("Deleting untagged build: %s" % nvr)
|
||||
print("Deleting untagged build: %s" % nvr)
|
||||
try:
|
||||
session.deleteBuild(build_id, strict=False)
|
||||
except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:
|
||||
|
|
|
|||
136
util/koji-shadow
136
util/koji-shadow
|
|
@ -51,7 +51,7 @@ except ImportError: # pragma: no cover
|
|||
|
||||
# koji.fp.o keeps stalling, probably network errors...
|
||||
# better to time out than to stall
|
||||
socket.setdefaulttimeout(180) #XXX - too short?
|
||||
socket.setdefaulttimeout(180) # XXX - too short?
|
||||
|
||||
logfile = None
|
||||
|
||||
|
|
@ -190,11 +190,11 @@ def get_options():
|
|||
return options, args
|
||||
|
||||
time_units = {
|
||||
'second' : 1,
|
||||
'minute' : 60,
|
||||
'hour' : 3600,
|
||||
'day' : 86400,
|
||||
'week' : 604800,
|
||||
'second': 1,
|
||||
'minute': 60,
|
||||
'hour': 3600,
|
||||
'day': 86400,
|
||||
'week': 604800,
|
||||
}
|
||||
time_unit_aliases = [
|
||||
# [unit, alias, alias, ...]
|
||||
|
|
@ -282,8 +282,8 @@ def activate_session(session):
|
|||
pass
|
||||
elif options.auth_cert and options.serverca:
|
||||
# convert to absolute paths
|
||||
options.auth_cert = os.path.expanduser(options.auth_cert)
|
||||
options.serverca = os.path.expanduser(options.serverca)
|
||||
options.auth_cert = os.path.expanduser(options.auth_cert)
|
||||
options.serverca = os.path.expanduser(options.serverca)
|
||||
|
||||
if os.path.isfile(options.auth_cert):
|
||||
# authenticate using SSL client cert
|
||||
|
|
@ -358,7 +358,7 @@ class TrackedBuild(object):
|
|||
if rinfo['arch'] == 'src':
|
||||
self.srpm = rinfo
|
||||
self.getExtraArches()
|
||||
self.getDeps() #sets deps, br_tag, base, order, (maybe state)
|
||||
self.getDeps() # sets deps, br_tag, base, order, (maybe state)
|
||||
|
||||
def updateState(self):
|
||||
"""Update state from local hub
|
||||
|
|
@ -475,10 +475,10 @@ class TrackedBuild(object):
|
|||
return
|
||||
buildroots.sort()
|
||||
self.order = buildroots[-1]
|
||||
seen = {} #used to avoid scanning the same buildroot twice
|
||||
builds = {} #track which builds we need for a rebuild
|
||||
bases = {} #track base install for buildroots
|
||||
tags = {} #track buildroot tag(s)
|
||||
seen = {} # used to avoid scanning the same buildroot twice
|
||||
builds = {} # track which builds we need for a rebuild
|
||||
bases = {} # track base install for buildroots
|
||||
tags = {} # track buildroot tag(s)
|
||||
remote.multicall = True
|
||||
unpack = []
|
||||
for br_id in buildroots:
|
||||
|
|
@ -534,7 +534,7 @@ class TrackedBuild(object):
|
|||
if len(builds) == 0:
|
||||
self.setState("noroot")
|
||||
self.deps = builds
|
||||
self.revised_deps = None #BuildTracker will set this later
|
||||
self.revised_deps = None # BuildTracker will set this later
|
||||
self.br_tag = tag
|
||||
self.base = base
|
||||
|
||||
|
|
@ -714,20 +714,20 @@ class BuildTracker(object):
|
|||
head = " " * depth
|
||||
for ignored in self.ignorelist:
|
||||
if (build.name == ignored) or fnmatch.fnmatch(build.name, ignored):
|
||||
log ("%sIgnored Build: %s%s" % (head, build.nvr, tail))
|
||||
log("%sIgnored Build: %s%s" % (head, build.nvr, tail))
|
||||
build.setState('ignore')
|
||||
return build
|
||||
check = self.checkFilter(build, grey=None)
|
||||
if check is None:
|
||||
# greylisted builds are ok as deps, but not primary builds
|
||||
if depth == 0:
|
||||
log ("%sGreylisted build %s%s" % (head, build.nvr, tail))
|
||||
log("%sGreylisted build %s%s" % (head, build.nvr, tail))
|
||||
build.setState('grey')
|
||||
return build
|
||||
# get rid of 'grey' state (filter will not be checked again)
|
||||
build.updateState()
|
||||
elif not check:
|
||||
log ("%sBlocked build %s%s" % (head, build.nvr, tail))
|
||||
log("%sBlocked build %s%s" % (head, build.nvr, tail))
|
||||
build.setState('blocked')
|
||||
return build
|
||||
# make sure we dont have the build name protected
|
||||
|
|
@ -737,29 +737,29 @@ class BuildTracker(object):
|
|||
if replace:
|
||||
build.substitute = replace
|
||||
if depth > 0:
|
||||
log ("%sDep replaced: %s->%s" % (head, build.nvr, replace))
|
||||
log("%sDep replaced: %s->%s" % (head, build.nvr, replace))
|
||||
return build
|
||||
if options.prefer_new and (depth > 0) and (tag is not None) and not (build.state == "common"):
|
||||
latestBuild = self.newerBuild(build, tag)
|
||||
if latestBuild != None:
|
||||
build.substitute = latestBuild.nvr
|
||||
log ("%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr))
|
||||
log("%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr))
|
||||
return build
|
||||
else:
|
||||
log ("%sProtected Build: %s" % (head, build.nvr))
|
||||
log("%sProtected Build: %s" % (head, build.nvr))
|
||||
if build.state == "common":
|
||||
# we're good
|
||||
if build.rebuilt:
|
||||
log ("%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail))
|
||||
log("%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail))
|
||||
else:
|
||||
log ("%sCommon build %s%s" % (head, build.nvr, tail))
|
||||
log("%sCommon build %s%s" % (head, build.nvr, tail))
|
||||
elif build.state == 'pending':
|
||||
log ("%sRebuild in progress: %s%s" % (head, build.nvr, tail))
|
||||
log("%sRebuild in progress: %s%s" % (head, build.nvr, tail))
|
||||
elif build.state == "broken":
|
||||
# The build already exists locally, but is somehow invalid.
|
||||
# We should not replace it automatically. An admin can reset it
|
||||
# if that is the correct thing. A substitution might also be in order
|
||||
log ("%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail))
|
||||
log("%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail))
|
||||
#
|
||||
# !! Cases where importing a noarch is /not/ ok must occur
|
||||
# before this point
|
||||
|
|
@ -767,30 +767,30 @@ class BuildTracker(object):
|
|||
elif (options.import_noarch or options.import_noarch_only) and build.isNoarch():
|
||||
self.importBuild(build, tag)
|
||||
elif options.import_noarch_only and not build.isNoarch():
|
||||
log ("%sSkipping archful build: %s" % (head, build.nvr))
|
||||
log("%sSkipping archful build: %s" % (head, build.nvr))
|
||||
elif build.state == "noroot":
|
||||
# Can't rebuild it, this is what substitutions are for
|
||||
log ("%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail))
|
||||
log("%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail))
|
||||
elif build.state == 'brokendeps':
|
||||
# should not be possible at this point
|
||||
log ("Error: build reports brokendeps state before dep scan")
|
||||
log("Error: build reports brokendeps state before dep scan")
|
||||
elif build.state == "missing":
|
||||
# scan its deps
|
||||
log ("%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail))
|
||||
log("%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail))
|
||||
newdeps = []
|
||||
# include extra local builds as deps.
|
||||
if self.includelist:
|
||||
for dep in self.includelist:
|
||||
info = session.getBuild(dep)
|
||||
if info:
|
||||
log ("%s Adding local Dep %s%s" % (head, dep, tail))
|
||||
log("%s Adding local Dep %s%s" % (head, dep, tail))
|
||||
extradep = LocalBuild(info)
|
||||
newdeps.append(extradep)
|
||||
else:
|
||||
log ("%s Warning: could not find build for %s" % (head, dep))
|
||||
log("%s Warning: could not find build for %s" % (head, dep))
|
||||
# don't actually set build.revised_deps until we finish the dep scan
|
||||
for dep_id in build.deps:
|
||||
dep = self.scanBuild(dep_id, from_build=build, depth=depth+1, tag=tag)
|
||||
dep = self.scanBuild(dep_id, from_build=build, depth=depth + 1, tag=tag)
|
||||
if dep.name in self.ignorelist:
|
||||
# we are not done dep solving yet. but we dont want this dep in our buildroot
|
||||
continue
|
||||
|
|
@ -798,10 +798,10 @@ class BuildTracker(object):
|
|||
if dep.substitute:
|
||||
dep2 = self.getSubstitute(dep.substitute)
|
||||
if isinstance(dep2, TrackedBuild):
|
||||
self.scanBuild(dep2.id, from_build=build, depth=depth+1, tag=tag)
|
||||
self.scanBuild(dep2.id, from_build=build, depth=depth + 1, tag=tag)
|
||||
elif dep2 is None:
|
||||
# dep is missing on both local and remote
|
||||
log ("%sSubstitute dep unavailable: %s" % (head, dep2.nvr))
|
||||
log("%sSubstitute dep unavailable: %s" % (head, dep2.nvr))
|
||||
# no point in continuing
|
||||
break
|
||||
# otherwise dep2 should be LocalBuild instance
|
||||
|
|
@ -809,7 +809,7 @@ class BuildTracker(object):
|
|||
elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'):
|
||||
# no point in continuing
|
||||
build.setState('brokendeps')
|
||||
log ("%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state))
|
||||
log("%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state))
|
||||
newdeps = None
|
||||
break
|
||||
else:
|
||||
|
|
@ -837,11 +837,11 @@ class BuildTracker(object):
|
|||
if options.first_one:
|
||||
return
|
||||
except (socket.timeout, socket.error):
|
||||
log ("retry")
|
||||
log("retry")
|
||||
continue
|
||||
break
|
||||
else:
|
||||
log ("Error: unable to scan %(name)s-%(version)s-%(release)s" % build)
|
||||
log("Error: unable to scan %(name)s-%(version)s-%(release)s" % build)
|
||||
continue
|
||||
|
||||
def _importURL(self, url, fn):
|
||||
|
|
@ -853,8 +853,8 @@ class BuildTracker(object):
|
|||
old_umask = os.umask(0o02)
|
||||
try:
|
||||
koji.ensuredir(os.path.dirname(dst))
|
||||
os.chown(os.path.dirname(dst), 48, 48) #XXX - hack
|
||||
log ("Downloading %s to %s" % (url, dst))
|
||||
os.chown(os.path.dirname(dst), 48, 48) # XXX - hack
|
||||
log("Downloading %s to %s" % (url, dst))
|
||||
fsrc = urllib2.urlopen(url)
|
||||
fdst = open(fn, 'w')
|
||||
shutil.copyfileobj(fsrc, fdst)
|
||||
|
|
@ -867,24 +867,24 @@ class BuildTracker(object):
|
|||
# for now, though, just use uploadWrapper
|
||||
koji.ensuredir(options.workpath)
|
||||
dst = "%s/%s" % (options.workpath, fn)
|
||||
log ("Downloading %s to %s..." % (url, dst))
|
||||
log("Downloading %s to %s..." % (url, dst))
|
||||
fsrc = urllib2.urlopen(url)
|
||||
fdst = open(dst, 'w')
|
||||
shutil.copyfileobj(fsrc, fdst)
|
||||
fsrc.close()
|
||||
fdst.close()
|
||||
log ("Uploading %s..." % dst)
|
||||
log("Uploading %s..." % dst)
|
||||
session.uploadWrapper(dst, serverdir, blocksize=65536)
|
||||
session.importRPM(serverdir, fn)
|
||||
|
||||
def importBuild(self, build, tag=None):
|
||||
'''import a build from remote hub'''
|
||||
if not build.srpm:
|
||||
log ("No srpm for build %s, skipping import" % build.nvr)
|
||||
log("No srpm for build %s, skipping import" % build.nvr)
|
||||
# TODO - support no-src imports here
|
||||
return False
|
||||
if not options.remote_topurl:
|
||||
log ("Skipping import of %s, remote_topurl not specified" % build.nvr)
|
||||
log("Skipping import of %s, remote_topurl not specified" % build.nvr)
|
||||
return False
|
||||
pathinfo = koji.PathInfo(options.remote_topurl)
|
||||
build_url = pathinfo.build(build.info)
|
||||
|
|
@ -954,7 +954,7 @@ class BuildTracker(object):
|
|||
else:
|
||||
parents = session.getInheritanceData(taginfo['id'])
|
||||
if parents:
|
||||
log ("Warning: shadow build tag has inheritance")
|
||||
log("Warning: shadow build tag has inheritance")
|
||||
# check package list
|
||||
pkgs = {}
|
||||
for pkg in session.listPackages(tagID=taginfo['id']):
|
||||
|
|
@ -1010,7 +1010,7 @@ class BuildTracker(object):
|
|||
build_group = group
|
||||
else:
|
||||
# we should have no other groups but build
|
||||
log ("Warning: found stray group: %s" % group)
|
||||
log("Warning: found stray group: %s" % group)
|
||||
drop_groups.append(group['name'])
|
||||
if build_group:
|
||||
# fix build group package list based on base of build to shadow
|
||||
|
|
@ -1021,7 +1021,7 @@ class BuildTracker(object):
|
|||
# no group deps needed/allowed
|
||||
drop_deps = [(g['name'], 1) for g in build_group['grouplist']]
|
||||
if drop_deps:
|
||||
log ("Warning: build group had deps: %r" % build_group)
|
||||
log("Warning: build group had deps: %r" % build_group)
|
||||
else:
|
||||
add_pkgs = build.base
|
||||
drop_pkgs = []
|
||||
|
|
@ -1061,17 +1061,17 @@ class BuildTracker(object):
|
|||
# [?] use remote SCM url (if avail)?
|
||||
src = build.getSource()
|
||||
if not src:
|
||||
log ("Couldn't get source for %s" % build.nvr)
|
||||
log("Couldn't get source for %s" % build.nvr)
|
||||
return None
|
||||
# wait for repo task
|
||||
log ("Waiting on newRepo task %i" % task_id)
|
||||
log("Waiting on newRepo task %i" % task_id)
|
||||
while True:
|
||||
tinfo = session.getTaskInfo(task_id)
|
||||
tstate = koji.TASK_STATES[tinfo['state']]
|
||||
if tstate == 'CLOSED':
|
||||
break
|
||||
elif tstate in ('CANCELED', 'FAILED'):
|
||||
log ("Error: failed to generate repo")
|
||||
log("Error: failed to generate repo")
|
||||
return None
|
||||
# add a timeout?
|
||||
# TODO ...and verify repo
|
||||
|
|
@ -1127,12 +1127,12 @@ class BuildTracker(object):
|
|||
states = sorted(self.state_idx.keys())
|
||||
parts = ["%s: %i" % (s, len(self.state_idx[s])) for s in states]
|
||||
parts.append("total: %i" % N)
|
||||
log (' '.join(parts))
|
||||
log(' '.join(parts))
|
||||
|
||||
def _print_builds(self, mylist):
|
||||
"""small helper function for output"""
|
||||
for build in mylist:
|
||||
log (" %s (%s)" % (build.nvr, build.state))
|
||||
log(" %s (%s)" % (build.nvr, build.state))
|
||||
|
||||
def checkJobs(self, tag=None):
|
||||
"""Check outstanding jobs. Return true if anything changes"""
|
||||
|
|
@ -1140,31 +1140,31 @@ class BuildTracker(object):
|
|||
for build_id, build in self.state_idx['pending'].items():
|
||||
# check pending builds
|
||||
if not build.task_id:
|
||||
log ("No task id recorded for %s" % build.nvr)
|
||||
log("No task id recorded for %s" % build.nvr)
|
||||
build.updateState()
|
||||
ret = True
|
||||
info = session.getTaskInfo(build.task_id)
|
||||
if not info:
|
||||
log ("No such task: %i (build %s)" % (build.task_id, build.nvr))
|
||||
log("No such task: %i (build %s)" % (build.task_id, build.nvr))
|
||||
build.updateState()
|
||||
ret = True
|
||||
continue
|
||||
state = koji.TASK_STATES[info['state']]
|
||||
if state in ('CANCELED', 'FAILED'):
|
||||
log ("Task %i is %s (build %s)" % (build.task_id, state, build.nvr))
|
||||
log("Task %i is %s (build %s)" % (build.task_id, state, build.nvr))
|
||||
# we have to set the state to broken manually (updateState will mark
|
||||
# a failed build as missing)
|
||||
build.setState('broken')
|
||||
ret = True
|
||||
elif state == 'CLOSED':
|
||||
log ("Task %i complete (build %s)" % (build.task_id, build.nvr))
|
||||
log("Task %i complete (build %s)" % (build.task_id, build.nvr))
|
||||
if options.tag_build and not tag == None:
|
||||
self.tagSuccessful(build.nvr, tag)
|
||||
build.updateState()
|
||||
ret = True
|
||||
if build.state != 'common':
|
||||
log ("Task %i finished, but %s still missing" \
|
||||
% (build.task_id, build.nvr))
|
||||
log("Task %i finished, but %s still missing" \
|
||||
% (build.task_id, build.nvr))
|
||||
return ret
|
||||
|
||||
def checkBuildDeps(self, build):
|
||||
|
|
@ -1175,7 +1175,7 @@ class BuildTracker(object):
|
|||
problem = [x for x in build.revised_deps
|
||||
if x.state in ('broken', 'brokendeps', 'noroot', 'blocked')]
|
||||
if problem:
|
||||
log ("Can't rebuild %s, missing %i deps" % (build.nvr, len(problem)))
|
||||
log("Can't rebuild %s, missing %i deps" % (build.nvr, len(problem)))
|
||||
build.setState('brokendeps')
|
||||
self._print_builds(problem)
|
||||
return False
|
||||
|
|
@ -1201,7 +1201,7 @@ class BuildTracker(object):
|
|||
if not self.checkBuildDeps(build):
|
||||
continue
|
||||
# otherwise, we should be good to rebuild
|
||||
log ("rebuild: %s" % build.nvr)
|
||||
log("rebuild: %s" % build.nvr)
|
||||
task_id = self.rebuild(build)
|
||||
ret = True
|
||||
if options.test:
|
||||
|
|
@ -1209,7 +1209,7 @@ class BuildTracker(object):
|
|||
build.setState('common')
|
||||
elif not task_id:
|
||||
# something went wrong setting up the rebuild
|
||||
log ("Did not get a task for %s" % build.nvr)
|
||||
log("Did not get a task for %s" % build.nvr)
|
||||
build.setState('broken')
|
||||
else:
|
||||
# build might not show up as 'BUILDING' immediately, so we
|
||||
|
|
@ -1218,13 +1218,13 @@ class BuildTracker(object):
|
|||
build.setState('pending')
|
||||
if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs:
|
||||
if options.debug:
|
||||
log ("Maximum number of jobs reached.")
|
||||
log("Maximum number of jobs reached.")
|
||||
break
|
||||
return ret
|
||||
|
||||
def runRebuilds(self, tag=None):
|
||||
"""Rebuild missing builds"""
|
||||
log ("Determining rebuild order")
|
||||
log("Determining rebuild order")
|
||||
# using self.state_idx to track build states
|
||||
# make sure state_idx has at least these states
|
||||
initial_avail = len(self.state_idx['common'])
|
||||
|
|
@ -1240,16 +1240,16 @@ class BuildTracker(object):
|
|||
time.sleep(30)
|
||||
continue
|
||||
self.report_brief()
|
||||
log ("Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail))
|
||||
log("Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail))
|
||||
|
||||
def tagSuccessful(self, nvr, tag):
|
||||
"""tag completed builds into final tags"""
|
||||
# TODO: check if there are other reasons why tagging may fail and handle them
|
||||
try:
|
||||
session.tagBuildBypass(tag, nvr)
|
||||
log ("tagged %s to %s" % (nvr, tag))
|
||||
log("tagged %s to %s" % (nvr, tag))
|
||||
except koji.TagError:
|
||||
log ("NOTICE: %s already tagged in %s" % (nvr, tag))
|
||||
log("NOTICE: %s already tagged in %s" % (nvr, tag))
|
||||
|
||||
|
||||
def main(args):
|
||||
|
|
@ -1263,11 +1263,11 @@ def main(args):
|
|||
if options.logfile:
|
||||
filename = options.logfile
|
||||
try:
|
||||
logfile = os.open(filename,os.O_CREAT|os.O_RDWR|os.O_APPEND, 0o777)
|
||||
logfile = os.open(filename, os.O_CREAT | os.O_RDWR | os.O_APPEND, 0o777)
|
||||
except:
|
||||
logfile = None
|
||||
if logfile is not None:
|
||||
log ("logging to %s" % filename)
|
||||
log("logging to %s" % filename)
|
||||
os.write(logfile, "\n\n========================================================================\n")
|
||||
|
||||
if options.build:
|
||||
|
|
@ -1275,10 +1275,10 @@ def main(args):
|
|||
tracker.scanBuild(binfo['id'], tag=tag)
|
||||
else:
|
||||
if tag is None:
|
||||
log ("Tag is required")
|
||||
log("Tag is required")
|
||||
return
|
||||
else:
|
||||
log ("Working on tag %s" % (tag))
|
||||
log("Working on tag %s" % (tag))
|
||||
tracker.scanTag(tag)
|
||||
tracker.report()
|
||||
tracker.runRebuilds(tag)
|
||||
|
|
|
|||
46
util/kojira
46
util/kojira
|
|
@ -48,7 +48,7 @@ def getTag(session, tag, event=None):
|
|||
cache = tag_cache
|
||||
now = time.time()
|
||||
if (tag, event) in cache:
|
||||
ts, info = cache[(tag,event)]
|
||||
ts, info = cache[(tag, event)]
|
||||
if now - ts < 600:
|
||||
# use the cache
|
||||
return info
|
||||
|
|
@ -84,7 +84,7 @@ class ManagedRepo(object):
|
|||
if self.current:
|
||||
order = self.session.getFullInheritance(self.tag_id, event=self.event_id)
|
||||
# order may contain same tag more than once
|
||||
tags = {self.tag_id : 1}
|
||||
tags = {self.tag_id: 1}
|
||||
for x in order:
|
||||
tags[x['parent_id']] = 1
|
||||
self.taglist = to_list(tags.keys())
|
||||
|
|
@ -348,11 +348,11 @@ class RepoManager(object):
|
|||
# we're already tracking it
|
||||
if repo.state != data['state']:
|
||||
self.logger.info('State changed for repo %s: %s -> %s'
|
||||
%(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))
|
||||
% (repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))
|
||||
repo.state = data['state']
|
||||
else:
|
||||
self.logger.info('Found repo %s, state=%s'
|
||||
%(repo_id, koji.REPO_STATES[data['state']]))
|
||||
% (repo_id, koji.REPO_STATES[data['state']]))
|
||||
repo = ManagedRepo(self, data)
|
||||
self.repos[repo_id] = repo
|
||||
if not getTag(self.session, repo.tag_id) and not repo.expired():
|
||||
|
|
@ -516,7 +516,7 @@ class RepoManager(object):
|
|||
# use the cache
|
||||
return stats
|
||||
data = self.session.listBuildroots(tagID=tag_id,
|
||||
queryOpts={'order': '-create_event_id', 'limit' : 100})
|
||||
queryOpts={'order': '-create_event_id', 'limit': 100})
|
||||
# XXX magic number (limit)
|
||||
if data:
|
||||
tag_name = data[0]['tag_name']
|
||||
|
|
@ -525,7 +525,7 @@ class RepoManager(object):
|
|||
stats = {'data': data, 'ts': now, 'tag_name': tag_name}
|
||||
recent = [x for x in data if now - x['create_ts'] < 3600 * 24]
|
||||
# XXX magic number
|
||||
stats ['n_recent'] = len(recent)
|
||||
stats['n_recent'] = len(recent)
|
||||
self.tag_use_stats[tag_id] = stats
|
||||
self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent)))
|
||||
return stats
|
||||
|
|
@ -633,8 +633,8 @@ class RepoManager(object):
|
|||
# TODO: implement a timeout
|
||||
|
||||
# also check other newRepo tasks
|
||||
repo_tasks = self.session.listTasks(opts={'method':'newRepo',
|
||||
'state':([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])})
|
||||
repo_tasks = self.session.listTasks(opts={'method': 'newRepo',
|
||||
'state': ([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])})
|
||||
others = [t for t in repo_tasks if t['id'] not in self.tasks]
|
||||
for tinfo in others:
|
||||
if tinfo['id'] not in self.other_tasks:
|
||||
|
|
@ -701,7 +701,7 @@ class RepoManager(object):
|
|||
entry = {
|
||||
'taginfo': taginfo,
|
||||
'expire_ts': ts,
|
||||
'needed_since' : time.time(),
|
||||
'needed_since': time.time(),
|
||||
}
|
||||
self.setTagScore(entry)
|
||||
self.needed_tags[tag_id] = entry
|
||||
|
|
@ -824,7 +824,7 @@ def main(options, session):
|
|||
repomgr.readCurrentRepos()
|
||||
def shutdown(*args):
|
||||
raise SystemExit
|
||||
signal.signal(signal.SIGTERM,shutdown)
|
||||
signal.signal(signal.SIGTERM, shutdown)
|
||||
curr_chk_thread = start_currency_checker(session, repomgr)
|
||||
regen_thread = start_regen_loop(session, repomgr)
|
||||
# TODO also move rmtree jobs to threads
|
||||
|
|
@ -926,15 +926,15 @@ def get_options():
|
|||
'offline_retry_interval': 120,
|
||||
'no_ssl_verify': False,
|
||||
'max_delete_processes': 4,
|
||||
'max_repo_tasks' : 4,
|
||||
'max_repo_tasks_maven' : 2,
|
||||
'repo_tasks_limit' : 10,
|
||||
'delete_batch_size' : 3,
|
||||
'deleted_repo_lifetime': 7*24*3600,
|
||||
'max_repo_tasks': 4,
|
||||
'max_repo_tasks_maven': 2,
|
||||
'repo_tasks_limit': 10,
|
||||
'delete_batch_size': 3,
|
||||
'deleted_repo_lifetime': 7 * 24 * 3600,
|
||||
# XXX should really be called expired_repo_lifetime
|
||||
'dist_repo_lifetime': 7*24*3600,
|
||||
'dist_repo_lifetime': 7 * 24 * 3600,
|
||||
'recent_tasks_lifetime': 600,
|
||||
'sleeptime' : 15,
|
||||
'sleeptime': 15,
|
||||
'cert': None,
|
||||
'ca': '', # FIXME: unused, remove in next major release
|
||||
'serverca': None,
|
||||
|
|
@ -948,7 +948,7 @@ def get_options():
|
|||
str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',
|
||||
'cert', 'ca', 'serverca', 'debuginfo_tags',
|
||||
'source_tags', 'separate_source_tags', 'ignore_tags') # FIXME: remove ca here
|
||||
bool_opts = ('verbose','debug','ignore_stray_repos', 'offline_retry',
|
||||
bool_opts = ('verbose', 'debug', 'ignore_stray_repos', 'offline_retry',
|
||||
'krb_rdns', 'krb_canon_host', 'no_ssl_verify')
|
||||
for name in config.options(section):
|
||||
if name in int_opts:
|
||||
|
|
@ -962,7 +962,7 @@ def get_options():
|
|||
for name, value in defaults.items():
|
||||
if getattr(options, name, None) is None:
|
||||
setattr(options, name, value)
|
||||
if options.logfile in ('','None','none'):
|
||||
if options.logfile in ('', 'None', 'none'):
|
||||
options.logfile = None
|
||||
# special handling for cert defaults
|
||||
cert_defaults = {
|
||||
|
|
@ -983,10 +983,10 @@ def quit(msg=None, code=1):
|
|||
sys.stderr.flush()
|
||||
sys.exit(code)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == "__main__":
|
||||
|
||||
options = get_options()
|
||||
topdir = getattr(options,'topdir',None)
|
||||
topdir = getattr(options, 'topdir', None)
|
||||
pathinfo = koji.PathInfo(topdir)
|
||||
if options.show_config:
|
||||
pprint.pprint(options.__dict__)
|
||||
|
|
@ -999,7 +999,7 @@ if __name__ == "__main__":
|
|||
except:
|
||||
sys.stderr.write("Cannot create logfile: %s\n" % options.logfile)
|
||||
sys.exit(1)
|
||||
if not os.access(options.logfile,os.W_OK):
|
||||
if not os.access(options.logfile, os.W_OK):
|
||||
sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile)
|
||||
sys.exit(1)
|
||||
koji.add_file_logger("koji", options.logfile)
|
||||
|
|
@ -1015,7 +1015,7 @@ if __name__ == "__main__":
|
|||
logger.setLevel(logging.WARNING)
|
||||
|
||||
session_opts = koji.grab_session_options(options)
|
||||
session = koji.ClientSession(options.server,session_opts)
|
||||
session = koji.ClientSession(options.server, session_opts)
|
||||
if options.cert is not None and os.path.isfile(options.cert):
|
||||
# authenticate using SSL client certificates
|
||||
session.ssl_login(options.cert, None, options.serverca)
|
||||
|
|
|
|||
22
vm/kojivmd
22
vm/kojivmd
|
|
@ -214,8 +214,8 @@ def main(options, session):
|
|||
def restart(*args):
|
||||
logger.warn("Initiating graceful restart")
|
||||
tm.restart_pending = True
|
||||
signal.signal(signal.SIGTERM,shutdown)
|
||||
signal.signal(signal.SIGUSR1,restart)
|
||||
signal.signal(signal.SIGTERM, shutdown)
|
||||
signal.signal(signal.SIGUSR1, restart)
|
||||
taken = False
|
||||
tm.cleanupAllVMs()
|
||||
while True:
|
||||
|
|
@ -224,7 +224,7 @@ def main(options, session):
|
|||
tm.updateTasks()
|
||||
taken = tm.getNextTask()
|
||||
tm.cleanupExpiredVMs()
|
||||
except (SystemExit,ServerExit,KeyboardInterrupt):
|
||||
except (SystemExit, ServerExit, KeyboardInterrupt):
|
||||
logger.warn("Exiting")
|
||||
break
|
||||
except ServerRestart:
|
||||
|
|
@ -245,7 +245,7 @@ def main(options, session):
|
|||
# The load-balancing code in getNextTask() will prevent a single builder
|
||||
# from getting overloaded.
|
||||
time.sleep(options.sleeptime)
|
||||
except (SystemExit,KeyboardInterrupt):
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
logger.warn("Exiting")
|
||||
break
|
||||
logger.warn("Shutting down, please wait...")
|
||||
|
|
@ -293,7 +293,7 @@ class DaemonXMLRPCServer(six.moves.xmlrpc_server.SimpleXMLRPCServer):
|
|||
if sys.version_info[:2] <= (2, 4):
|
||||
# Copy and paste from SimpleXMLRPCServer, with the addition of passing
|
||||
# allow_none=True to xmlrpclib.dumps()
|
||||
def _marshaled_dispatch(self, data, dispatch_method = None):
|
||||
def _marshaled_dispatch(self, data, dispatch_method=None):
|
||||
params, method = six.moves.xmlrpc_client.loads(data)
|
||||
try:
|
||||
if dispatch_method is not None:
|
||||
|
|
@ -344,11 +344,11 @@ class WinBuildTask(MultiPlatformTask):
|
|||
if not repo_info:
|
||||
raise koji.BuildError('invalid repo ID: %s' % repo_id)
|
||||
policy_data = {
|
||||
'user_id' : task_info['owner'],
|
||||
'source' : source_url,
|
||||
'task_id' : self.id,
|
||||
'build_tag' : build_tag['id'],
|
||||
'skip_tag' : bool(opts.get('skip_tag')),
|
||||
'user_id': task_info['owner'],
|
||||
'source': source_url,
|
||||
'task_id': self.id,
|
||||
'build_tag': build_tag['id'],
|
||||
'skip_tag': bool(opts.get('skip_tag')),
|
||||
'target': target_info['id']
|
||||
}
|
||||
if not opts.get('skip_tag'):
|
||||
|
|
@ -840,7 +840,7 @@ class VMExecTask(BaseTaskHandler):
|
|||
|
||||
conn = libvirt.open(None)
|
||||
clone_name = self.clone(conn, name, opts)
|
||||
self.logger.debug('Cloned VM %s to %s',name, clone_name)
|
||||
self.logger.debug('Cloned VM %s to %s', name, clone_name)
|
||||
try:
|
||||
vm = conn.lookupByName(clone_name)
|
||||
macaddr = self.macAddr(vm)
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ def _setUserCookie(environ, user):
|
|||
value = "%s:%s" % (shasum.hexdigest(), value)
|
||||
cookies = six.moves.http_cookies.SimpleCookie()
|
||||
cookies['user'] = value
|
||||
c = cookies['user'] #morsel instance
|
||||
c = cookies['user'] # morsel instance
|
||||
c['secure'] = True
|
||||
c['path'] = os.path.dirname(environ['SCRIPT_NAME'])
|
||||
# the Cookie module treats integer expire times as relative seconds
|
||||
|
|
@ -75,7 +75,7 @@ def _setUserCookie(environ, user):
|
|||
def _clearUserCookie(environ):
|
||||
cookies = six.moves.http_cookies.SimpleCookie()
|
||||
cookies['user'] = ''
|
||||
c = cookies['user'] #morsel instance
|
||||
c = cookies['user'] # morsel instance
|
||||
c['path'] = os.path.dirname(environ['SCRIPT_NAME'])
|
||||
c['expires'] = 0
|
||||
out = c.OutputString()
|
||||
|
|
@ -697,7 +697,7 @@ def taskinfo(environ, taskID):
|
|||
pathinfo = koji.PathInfo(topdir=topurl)
|
||||
values['pathinfo'] = pathinfo
|
||||
|
||||
paths = [] # (volume, relpath) tuples
|
||||
paths = [] # (volume, relpath) tuples
|
||||
for relname, volumes in six.iteritems(server.listTaskOutput(task['id'], all_volumes=True)):
|
||||
paths += [(volume, relname) for volume in volumes]
|
||||
values['output'] = sorted(paths, key=_sortByExtAndName)
|
||||
|
|
@ -1071,7 +1071,7 @@ def tagparent(environ, tagID, parentID, action):
|
|||
if datum['priority'] > maxPriority:
|
||||
maxPriority = datum['priority']
|
||||
values['maxPriority'] = maxPriority
|
||||
inheritanceData = [datum for datum in inheritanceData \
|
||||
inheritanceData = [datum for datum in inheritanceData \
|
||||
if datum['parent_id'] == parent['id']]
|
||||
if len(inheritanceData) == 0:
|
||||
values['inheritanceData'] = None
|
||||
|
|
@ -1446,8 +1446,8 @@ def archiveinfo(environ, archiveID, fileOrder='name', fileStart=None, buildrootO
|
|||
values['wininfo'] = wininfo
|
||||
values['builtInRoot'] = builtInRoot
|
||||
values['buildroots'] = buildroots
|
||||
values['show_rpm_components'] = server.listRPMs(imageID=archive['id'], queryOpts={'limit':1})
|
||||
values['show_archive_components'] = server.listArchives(imageID=archive['id'], queryOpts={'limit':1})
|
||||
values['show_rpm_components'] = server.listRPMs(imageID=archive['id'], queryOpts={'limit': 1})
|
||||
values['show_archive_components'] = server.listArchives(imageID=archive['id'], queryOpts={'limit': 1})
|
||||
|
||||
return _genHTML(environ, 'archiveinfo.chtml')
|
||||
|
||||
|
|
|
|||
|
|
@ -448,7 +448,7 @@ def formatDep(name, version, flags):
|
|||
if flags & koji.RPMSENSE_EQUAL:
|
||||
s = s + "="
|
||||
if version:
|
||||
s = "%s %s" %(s, version)
|
||||
s = "%s %s" % (s, version)
|
||||
return s
|
||||
|
||||
def formatMode(mode):
|
||||
|
|
@ -706,7 +706,7 @@ class TaskResultLine(object):
|
|||
return composer(self, length, postscript)
|
||||
|
||||
self.composer = composer_wrapper
|
||||
self.size=self._size()
|
||||
self.size = self._size()
|
||||
|
||||
def default_composer(self, length=None, postscript=None):
|
||||
line_text = ''
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue