flake8: apply E501 with max-line-length=99
This commit is contained in:
parent
f1ba6cefd7
commit
c5db34a8e1
28 changed files with 1574 additions and 811 deletions
|
|
@ -75,7 +75,7 @@ try:
|
|||
from OpenSSL.SSL import Error as SSL_Error
|
||||
except Exception: # pragma: no cover
|
||||
# the hub imports koji, and sometimes this import fails there
|
||||
# see: https://cryptography.io/en/latest/faq/#starting-cryptography-using-mod-wsgi-produces-an-internalerror-during-a-call-in-register-osrandom-engine
|
||||
# see: https://cryptography.io/en/latest/faq/#starting-cryptography-using-mod-wsgi-produces-an-internalerror-during-a-call-in-register-osrandom-engine # noqa: E501
|
||||
# unfortunately the workaround at the above link does not always work, so
|
||||
# we ignore it here
|
||||
pass
|
||||
|
|
@ -1270,7 +1270,8 @@ def parse_pom(path=None, contents=None):
|
|||
fd.close()
|
||||
|
||||
if not contents:
|
||||
raise GenericError('either a path to a pom file or the contents of a pom file must be specified')
|
||||
raise GenericError(
|
||||
'either a path to a pom file or the contents of a pom file must be specified')
|
||||
|
||||
# A common problem is non-UTF8 characters in XML files, so we'll convert the string first
|
||||
|
||||
|
|
@ -1287,7 +1288,8 @@ def parse_pom(path=None, contents=None):
|
|||
|
||||
for field in fields:
|
||||
if field not in util.to_list(values.keys()):
|
||||
raise GenericError('could not extract %s from POM: %s' % (field, (path or '<contents>')))
|
||||
raise GenericError('could not extract %s from POM: %s' %
|
||||
(field, (path or '<contents>')))
|
||||
return values
|
||||
|
||||
|
||||
|
|
@ -1649,7 +1651,8 @@ name=build
|
|||
# The following macro values cannot be overridden by tag options
|
||||
macros['%_topdir'] = '%s/build' % config_opts['chroothome']
|
||||
macros['%_host_cpu'] = opts.get('target_arch', arch)
|
||||
macros['%_host'] = '%s-%s' % (opts.get('target_arch', arch), opts.get('mockhost', 'koji-linux-gnu'))
|
||||
macros['%_host'] = '%s-%s' % (opts.get('target_arch', arch),
|
||||
opts.get('mockhost', 'koji-linux-gnu'))
|
||||
|
||||
parts = ["""# Auto-generated by the Koji build system
|
||||
"""]
|
||||
|
|
@ -1681,7 +1684,9 @@ name=build
|
|||
if bind_opts:
|
||||
for key in bind_opts.keys():
|
||||
for mnt_src, mnt_dest in six.iteritems(bind_opts.get(key)):
|
||||
parts.append("config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" % (key, mnt_src, mnt_dest))
|
||||
parts.append(
|
||||
"config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" %
|
||||
(key, mnt_src, mnt_dest))
|
||||
parts.append("\n")
|
||||
|
||||
for key in sorted(macros):
|
||||
|
|
@ -1886,7 +1891,8 @@ def read_config(profile_name, user_config=None):
|
|||
try:
|
||||
result[name] = int(value)
|
||||
except ValueError:
|
||||
raise ConfigurationError("value for %s config option must be a valid integer" % name)
|
||||
raise ConfigurationError(
|
||||
"value for %s config option must be a valid integer" % name)
|
||||
else:
|
||||
result[name] = value
|
||||
|
||||
|
|
@ -2030,7 +2036,8 @@ def read_config_files(config_files, raw=False):
|
|||
|
||||
class PathInfo(object):
|
||||
# ASCII numbers and upper- and lower-case letter for use in tmpdir()
|
||||
ASCII_CHARS = [chr(i) for i in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]
|
||||
ASCII_CHARS = [chr(i)
|
||||
for i in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]
|
||||
|
||||
def __init__(self, topdir=None):
|
||||
self._topdir = topdir
|
||||
|
|
@ -2053,10 +2060,12 @@ class PathInfo(object):
|
|||
|
||||
def build(self, build):
|
||||
"""Return the directory where a build belongs"""
|
||||
return self.volumedir(build.get('volume_name')) + ("/packages/%(name)s/%(version)s/%(release)s" % build)
|
||||
return self.volumedir(build.get('volume_name')) + \
|
||||
("/packages/%(name)s/%(version)s/%(release)s" % build)
|
||||
|
||||
def mavenbuild(self, build):
|
||||
"""Return the directory where the Maven build exists in the global store (/mnt/koji/packages)"""
|
||||
"""Return the directory where the Maven build exists in the global store
|
||||
(/mnt/koji/packages)"""
|
||||
return self.build(build) + '/maven'
|
||||
|
||||
def mavenrepo(self, maveninfo):
|
||||
|
|
@ -2137,7 +2146,8 @@ class PathInfo(object):
|
|||
"""Return a path to a unique directory under work()/tmp/"""
|
||||
tmp = None
|
||||
while tmp is None or os.path.exists(tmp):
|
||||
tmp = self.work(volume) + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS) for dummy in '123456'])
|
||||
tmp = self.work(volume) + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS)
|
||||
for dummy in '123456'])
|
||||
return tmp
|
||||
|
||||
def scratch(self):
|
||||
|
|
@ -2781,9 +2791,9 @@ class ClientSession(object):
|
|||
# basically, we want to retry on most errors, with a few exceptions
|
||||
# - faults (this means the call completed and failed)
|
||||
# - SystemExit, KeyboardInterrupt
|
||||
# note that, for logged-in sessions the server should tell us (via a RetryError fault)
|
||||
# if the call cannot be retried. For non-logged-in sessions, all calls should be read-only
|
||||
# and hence retryable.
|
||||
# note that, for logged-in sessions the server should tell us (via a RetryError
|
||||
# fault) if the call cannot be retried. For non-logged-in sessions, all calls
|
||||
# should be read-only and hence retryable.
|
||||
except Fault as fault:
|
||||
# try to convert the fault to a known exception
|
||||
err = convertFault(fault)
|
||||
|
|
@ -2792,13 +2802,14 @@ class ClientSession(object):
|
|||
secs = self.opts.get('offline_retry_interval', interval)
|
||||
self.logger.debug("Server offline. Retrying in %i seconds", secs)
|
||||
time.sleep(secs)
|
||||
# reset try count - this isn't a typical error, this is a running server
|
||||
# correctly reporting an outage
|
||||
# reset try count - this isn't a typical error, this is a running
|
||||
# server correctly reporting an outage
|
||||
tries = 0
|
||||
continue
|
||||
raise err
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
# (depending on the python version, these may or may not be subclasses of Exception)
|
||||
# (depending on the python version, these may or may not be subclasses of
|
||||
# Exception)
|
||||
raise
|
||||
except Exception as e:
|
||||
tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
|
||||
|
|
@ -2809,8 +2820,9 @@ class ClientSession(object):
|
|||
raise
|
||||
|
||||
if not self.logged_in:
|
||||
# in the past, non-logged-in sessions did not retry. For compatibility purposes
|
||||
# this behavior is governed by the anon_retry opt.
|
||||
# in the past, non-logged-in sessions did not retry.
|
||||
# For compatibility purposes this behavior is governed by the anon_retry
|
||||
# opt.
|
||||
if not self.opts.get('anon_retry', False):
|
||||
raise
|
||||
|
||||
|
|
@ -2822,7 +2834,8 @@ class ClientSession(object):
|
|||
# otherwise keep retrying
|
||||
if self.logger.isEnabledFor(logging.DEBUG):
|
||||
self.logger.debug(tb_str)
|
||||
self.logger.info("Try #%s for call %s (%s) failed: %s", tries, self.callnum, name, e)
|
||||
self.logger.info("Try #%s for call %s (%s) failed: %s",
|
||||
tries, self.callnum, name, e)
|
||||
if tries > 1:
|
||||
# first retry is immediate, after that we honor retry_interval
|
||||
time.sleep(interval)
|
||||
|
|
@ -2864,7 +2877,8 @@ class ClientSession(object):
|
|||
transaction.
|
||||
"""
|
||||
if not self.multicall:
|
||||
raise GenericError('ClientSession.multicall must be set to True before calling multiCall()')
|
||||
raise GenericError(
|
||||
'ClientSession.multicall must be set to True before calling multiCall()')
|
||||
self.multicall = False
|
||||
if len(self._calls) == 0:
|
||||
return []
|
||||
|
|
@ -2896,7 +2910,8 @@ class ClientSession(object):
|
|||
return self.__dict__['_apidoc']
|
||||
return VirtualMethod(self._callMethod, name, self)
|
||||
|
||||
def fastUpload(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=False, volume=None):
|
||||
def fastUpload(self, localfile, path, name=None, callback=None, blocksize=None,
|
||||
overwrite=False, volume=None):
|
||||
if blocksize is None:
|
||||
blocksize = self.opts.get('upload_blocksize', 1048576)
|
||||
|
||||
|
|
@ -2930,7 +2945,8 @@ class ClientSession(object):
|
|||
hexdigest = util.adler32_constructor(chunk).hexdigest()
|
||||
full_chksum.update(chunk)
|
||||
if result['size'] != len(chunk):
|
||||
raise GenericError("server returned wrong chunk size: %s != %s" % (result['size'], len(chunk)))
|
||||
raise GenericError("server returned wrong chunk size: %s != %s" %
|
||||
(result['size'], len(chunk)))
|
||||
if result['hexdigest'] != hexdigest:
|
||||
raise GenericError('upload checksum failed: %s != %s'
|
||||
% (result['hexdigest'], hexdigest))
|
||||
|
|
@ -2957,9 +2973,11 @@ class ClientSession(object):
|
|||
if problems and result['hexdigest'] != full_chksum.hexdigest():
|
||||
raise GenericError("Uploaded file has wrong checksum: %s/%s, %s != %s"
|
||||
% (path, name, result['hexdigest'], full_chksum.hexdigest()))
|
||||
self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds", localfile, size, t2)
|
||||
self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds",
|
||||
localfile, size, t2)
|
||||
|
||||
def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False, volume=None):
|
||||
def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False,
|
||||
volume=None):
|
||||
"""prep a rawUpload call"""
|
||||
if not self.logged_in:
|
||||
raise ActionNotAllowed("you must be logged in to upload")
|
||||
|
|
@ -2989,7 +3007,8 @@ class ClientSession(object):
|
|||
request = chunk
|
||||
return handler, headers, request
|
||||
|
||||
def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=True, volume=None):
|
||||
def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None,
|
||||
overwrite=True, volume=None):
|
||||
"""upload a file in chunks using the uploadFile call"""
|
||||
if blocksize is None:
|
||||
blocksize = self.opts.get('upload_blocksize', 1048576)
|
||||
|
|
@ -3044,7 +3063,8 @@ class ClientSession(object):
|
|||
tries = 0
|
||||
while True:
|
||||
if debug:
|
||||
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" % (path, name, sz, digest, offset))
|
||||
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %
|
||||
(path, name, sz, digest, offset))
|
||||
if self.callMethod('uploadFile', path, name, sz, digest, offset, data, **volopts):
|
||||
break
|
||||
if tries <= retries:
|
||||
|
|
@ -3063,9 +3083,11 @@ class ClientSession(object):
|
|||
if t2 <= 0:
|
||||
t2 = 1
|
||||
if debug:
|
||||
self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" % (size, t1, size / t1 / 1024.0))
|
||||
self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" %
|
||||
(size, t1, size / t1 / 1024.0))
|
||||
if debug:
|
||||
self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" % (ofs, t2, ofs / t2 / 1024.0))
|
||||
self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" %
|
||||
(ofs, t2, ofs / t2 / 1024.0))
|
||||
if callback:
|
||||
callback(ofs, totalsize, size, t1, t2)
|
||||
fo.close()
|
||||
|
|
@ -3281,8 +3303,8 @@ class DBHandler(logging.Handler):
|
|||
cursor.execute(command, data)
|
||||
cursor.close()
|
||||
# self.cnx.commit()
|
||||
# XXX - committing here is most likely wrong, but we need to set commit_pending or something
|
||||
# ...and this is really the wrong place for that
|
||||
# XXX - committing here is most likely wrong, but we need to set commit_pending or
|
||||
# something...and this is really the wrong place for that
|
||||
except BaseException:
|
||||
self.handleError(record)
|
||||
|
||||
|
|
@ -3583,7 +3605,9 @@ def add_file_logger(logger, fn):
|
|||
|
||||
def add_stderr_logger(logger):
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))
|
||||
handler.setFormatter(
|
||||
logging.Formatter(
|
||||
'%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))
|
||||
handler.setLevel(logging.DEBUG)
|
||||
logging.getLogger(logger).addHandler(handler)
|
||||
|
||||
|
|
@ -3612,7 +3636,8 @@ def add_mail_logger(logger, addr):
|
|||
return
|
||||
addresses = addr.split(',')
|
||||
handler = logging.handlers.SMTPHandler("localhost",
|
||||
"%s@%s" % (pwd.getpwuid(os.getuid())[0], socket.getfqdn()),
|
||||
"%s@%s" % (pwd.getpwuid(os.getuid())[0],
|
||||
socket.getfqdn()),
|
||||
addresses,
|
||||
"%s: error notice" % socket.getfqdn())
|
||||
handler.setFormatter(logging.Formatter('%(pathname)s:%(lineno)d [%(levelname)s] %(message)s'))
|
||||
|
|
|
|||
10
koji/auth.py
10
koji/auth.py
|
|
@ -334,7 +334,8 @@ class Session(object):
|
|||
|
||||
# Successfully authenticated via Kerberos, now log in
|
||||
if proxyuser:
|
||||
proxyprincs = [princ.strip() for princ in context.opts.get('ProxyPrincipals', '').split(',')]
|
||||
proxyprincs = [princ.strip()
|
||||
for princ in context.opts.get('ProxyPrincipals', '').split(',')]
|
||||
if cprinc.name in proxyprincs:
|
||||
login_principal = proxyuser
|
||||
else:
|
||||
|
|
@ -408,12 +409,15 @@ class Session(object):
|
|||
authtype = koji.AUTHTYPE_GSSAPI
|
||||
else:
|
||||
if context.environ.get('SSL_CLIENT_VERIFY') != 'SUCCESS':
|
||||
raise koji.AuthError('could not verify client: %s' % context.environ.get('SSL_CLIENT_VERIFY'))
|
||||
raise koji.AuthError('could not verify client: %s' %
|
||||
context.environ.get('SSL_CLIENT_VERIFY'))
|
||||
|
||||
name_dn_component = context.opts.get('DNUsernameComponent', 'CN')
|
||||
username = context.environ.get('SSL_CLIENT_S_DN_%s' % name_dn_component)
|
||||
if not username:
|
||||
raise koji.AuthError('unable to get user information (%s) from client certificate' % name_dn_component)
|
||||
raise koji.AuthError(
|
||||
'unable to get user information (%s) from client certificate' %
|
||||
name_dn_component)
|
||||
client_dn = context.environ.get('SSL_CLIENT_S_DN')
|
||||
authtype = koji.AUTHTYPE_SSL
|
||||
|
||||
|
|
|
|||
102
koji/daemon.py
102
koji/daemon.py
|
|
@ -110,8 +110,9 @@ def fast_incremental_upload(session, fname, fd, path, retries, logger):
|
|||
break
|
||||
|
||||
|
||||
def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, append=0, chroot=None, env=None):
|
||||
"""Run command with output redirected. If chroot is not None, chroot to the directory specified
|
||||
def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, append=0,
|
||||
chroot=None, env=None):
|
||||
"""Run command with output redirected. If chroot is not None, chroot to the directory specified
|
||||
before running the command."""
|
||||
pid = os.fork()
|
||||
fd = None
|
||||
|
|
@ -287,11 +288,13 @@ class SCM(object):
|
|||
elif len(userhost) > 2:
|
||||
raise koji.GenericError('Invalid username@hostname specified: %s' % netloc)
|
||||
if not netloc:
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the netloc element.' % self.url)
|
||||
raise koji.GenericError(
|
||||
'Unable to parse SCM URL: %s . Could not find the netloc element.' % self.url)
|
||||
|
||||
# check for empty path before we apply normpath
|
||||
if not path:
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the path element.' % self.url)
|
||||
raise koji.GenericError(
|
||||
'Unable to parse SCM URL: %s . Could not find the path element.' % self.url)
|
||||
|
||||
path = os.path.normpath(path)
|
||||
|
||||
|
|
@ -306,14 +309,19 @@ class SCM(object):
|
|||
# any such url should have already been caught by is_scm_url
|
||||
raise koji.GenericError('Invalid SCM URL. Path should begin with /: %s) ')
|
||||
|
||||
# check for validity: params should be empty, query may be empty, everything else should be populated
|
||||
# check for validity: params should be empty, query may be empty, everything else should be
|
||||
# populated
|
||||
if params:
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Params element %s should be empty.' % (self.url, params))
|
||||
raise koji.GenericError(
|
||||
'Unable to parse SCM URL: %s . Params element %s should be empty.' %
|
||||
(self.url, params))
|
||||
if not scheme: # pragma: no cover
|
||||
# should not happen because of is_scm_url check earlier
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url)
|
||||
raise koji.GenericError(
|
||||
'Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url)
|
||||
if not fragment:
|
||||
raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the fragment element.' % self.url)
|
||||
raise koji.GenericError(
|
||||
'Unable to parse SCM URL: %s . Could not find the fragment element.' % self.url)
|
||||
|
||||
# return parsed values
|
||||
return (scheme, user, netloc, path, query, fragment)
|
||||
|
|
@ -356,7 +364,8 @@ class SCM(object):
|
|||
for allowed_scm in allowed.split():
|
||||
scm_tuple = allowed_scm.split(':')
|
||||
if len(scm_tuple) < 2:
|
||||
self.logger.warn('Ignoring incorrectly formatted SCM host:repository: %s' % allowed_scm)
|
||||
self.logger.warn('Ignoring incorrectly formatted SCM host:repository: %s' %
|
||||
allowed_scm)
|
||||
continue
|
||||
host_pat = scm_tuple[0]
|
||||
repo_pat = scm_tuple[1]
|
||||
|
|
@ -378,11 +387,13 @@ class SCM(object):
|
|||
if scm_tuple[3]:
|
||||
self.source_cmd = scm_tuple[3].split(',')
|
||||
else:
|
||||
# there was nothing after the trailing :, so they don't want to run a source_cmd at all
|
||||
# there was nothing after the trailing :,
|
||||
# so they don't want to run a source_cmd at all
|
||||
self.source_cmd = None
|
||||
break
|
||||
if not is_allowed:
|
||||
raise koji.BuildError('%s:%s is not in the list of allowed SCMs' % (self.host, self.repository))
|
||||
raise koji.BuildError(
|
||||
'%s:%s is not in the list of allowed SCMs' % (self.host, self.repository))
|
||||
|
||||
def checkout(self, scmdir, session=None, uploadpath=None, logfile=None):
|
||||
"""
|
||||
|
|
@ -416,16 +427,20 @@ class SCM(object):
|
|||
(self.scmtype, ' '.join(cmd), os.path.basename(logfile)))
|
||||
|
||||
if self.scmtype == 'CVS':
|
||||
pserver = ':pserver:%s@%s:%s' % ((self.user or 'anonymous'), self.host, self.repository)
|
||||
module_checkout_cmd = ['cvs', '-d', pserver, 'checkout', '-r', self.revision, self.module]
|
||||
pserver = ':pserver:%s@%s:%s' % ((self.user or 'anonymous'), self.host,
|
||||
self.repository)
|
||||
module_checkout_cmd = ['cvs', '-d', pserver, 'checkout', '-r', self.revision,
|
||||
self.module]
|
||||
common_checkout_cmd = ['cvs', '-d', pserver, 'checkout', 'common']
|
||||
|
||||
elif self.scmtype == 'CVS+SSH':
|
||||
if not self.user:
|
||||
raise koji.BuildError('No user specified for repository access scheme: %s' % self.scheme)
|
||||
raise koji.BuildError(
|
||||
'No user specified for repository access scheme: %s' % self.scheme)
|
||||
|
||||
cvsserver = ':ext:%s@%s:%s' % (self.user, self.host, self.repository)
|
||||
module_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', '-r', self.revision, self.module]
|
||||
module_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', '-r', self.revision,
|
||||
self.module]
|
||||
common_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', 'common']
|
||||
env = {'CVS_RSH': 'ssh'}
|
||||
|
||||
|
|
@ -453,14 +468,16 @@ class SCM(object):
|
|||
update_checkout_cmd = ['git', 'reset', '--hard', self.revision]
|
||||
update_checkout_dir = sourcedir
|
||||
|
||||
# self.module may be empty, in which case the specfile should be in the top-level directory
|
||||
# self.module may be empty, in which case the specfile should be in the top-level
|
||||
# directory
|
||||
if self.module:
|
||||
# Treat the module as a directory inside the git repository
|
||||
sourcedir = '%s/%s' % (sourcedir, self.module)
|
||||
|
||||
elif self.scmtype == 'GIT+SSH':
|
||||
if not self.user:
|
||||
raise koji.BuildError('No user specified for repository access scheme: %s' % self.scheme)
|
||||
raise koji.BuildError(
|
||||
'No user specified for repository access scheme: %s' % self.scheme)
|
||||
gitrepo = 'git+ssh://%s@%s%s' % (self.user, self.host, self.repository)
|
||||
commonrepo = os.path.dirname(gitrepo) + '/common'
|
||||
checkout_path = os.path.basename(self.repository)
|
||||
|
|
@ -481,7 +498,8 @@ class SCM(object):
|
|||
update_checkout_cmd = ['git', 'reset', '--hard', self.revision]
|
||||
update_checkout_dir = sourcedir
|
||||
|
||||
# self.module may be empty, in which case the specfile should be in the top-level directory
|
||||
# self.module may be empty, in which case the specfile should be in the top-level
|
||||
# directory
|
||||
if self.module:
|
||||
# Treat the module as a directory inside the git repository
|
||||
sourcedir = '%s/%s' % (sourcedir, self.module)
|
||||
|
|
@ -492,15 +510,18 @@ class SCM(object):
|
|||
scheme = scheme.split('+')[1]
|
||||
|
||||
svnserver = '%s%s%s' % (scheme, self.host, self.repository)
|
||||
module_checkout_cmd = ['svn', 'checkout', '-r', self.revision, '%s/%s' % (svnserver, self.module), self.module]
|
||||
module_checkout_cmd = ['svn', 'checkout', '-r', self.revision,
|
||||
'%s/%s' % (svnserver, self.module), self.module]
|
||||
common_checkout_cmd = ['svn', 'checkout', '%s/common' % svnserver]
|
||||
|
||||
elif self.scmtype == 'SVN+SSH':
|
||||
if not self.user:
|
||||
raise koji.BuildError('No user specified for repository access scheme: %s' % self.scheme)
|
||||
raise koji.BuildError(
|
||||
'No user specified for repository access scheme: %s' % self.scheme)
|
||||
|
||||
svnserver = 'svn+ssh://%s@%s%s' % (self.user, self.host, self.repository)
|
||||
module_checkout_cmd = ['svn', 'checkout', '-r', self.revision, '%s/%s' % (svnserver, self.module), self.module]
|
||||
module_checkout_cmd = ['svn', 'checkout', '-r', self.revision,
|
||||
'%s/%s' % (svnserver, self.module), self.module]
|
||||
common_checkout_cmd = ['svn', 'checkout', '%s/common' % svnserver]
|
||||
|
||||
else:
|
||||
|
|
@ -513,8 +534,10 @@ class SCM(object):
|
|||
# Currently only required for GIT checkouts
|
||||
# Run the command in the directory the source was checked out into
|
||||
if self.scmtype.startswith('GIT') and globals().get('KOJIKAMID'):
|
||||
_run(['git', 'config', 'core.autocrlf', 'true'], chdir=update_checkout_dir, fatal=True)
|
||||
_run(['git', 'config', 'core.safecrlf', 'true'], chdir=update_checkout_dir, fatal=True)
|
||||
_run(['git', 'config', 'core.autocrlf', 'true'],
|
||||
chdir=update_checkout_dir, fatal=True)
|
||||
_run(['git', 'config', 'core.safecrlf', 'true'],
|
||||
chdir=update_checkout_dir, fatal=True)
|
||||
_run(update_checkout_cmd, chdir=update_checkout_dir, fatal=True)
|
||||
|
||||
if self.use_common and not globals().get('KOJIKAMID'):
|
||||
|
|
@ -583,7 +606,8 @@ class TaskManager(object):
|
|||
|
||||
def registerHandler(self, entry):
|
||||
"""register and index task handler"""
|
||||
if isinstance(entry, type(koji.tasks.BaseTaskHandler)) and issubclass(entry, koji.tasks.BaseTaskHandler):
|
||||
if isinstance(entry, type(koji.tasks.BaseTaskHandler)) and \
|
||||
issubclass(entry, koji.tasks.BaseTaskHandler):
|
||||
for method in entry.Methods:
|
||||
self.handlers[method] = entry
|
||||
|
||||
|
|
@ -638,7 +662,9 @@ class TaskManager(object):
|
|||
# task not running - expire the buildroot
|
||||
# TODO - consider recycling hooks here (with strong sanity checks)
|
||||
self.logger.info("Expiring buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)
|
||||
self.logger.debug("Buildroot task: %r, Current tasks: %r" % (task_id, to_list(self.tasks.keys())))
|
||||
self.logger.debug(
|
||||
"Buildroot task: %r, Current tasks: %r" %
|
||||
(task_id, to_list(self.tasks.keys())))
|
||||
self.session.host.setBuildRootState(id, st_expired)
|
||||
continue
|
||||
if nolocal:
|
||||
|
|
@ -678,7 +704,8 @@ class TaskManager(object):
|
|||
if not task:
|
||||
self.logger.warn("%s: invalid task %s" % (desc, br['task_id']))
|
||||
continue
|
||||
if (task['state'] == koji.TASK_STATES['FAILED'] and age < self.options.failed_buildroot_lifetime):
|
||||
if task['state'] == koji.TASK_STATES['FAILED'] and \
|
||||
age < self.options.failed_buildroot_lifetime:
|
||||
# XXX - this could be smarter
|
||||
# keep buildroots for failed tasks around for a little while
|
||||
self.logger.debug("Keeping failed buildroot: %s" % desc)
|
||||
|
|
@ -1004,7 +1031,9 @@ class TaskManager(object):
|
|||
self.logger.info('%s (pid %i, taskID %i) is running' % (execname, pid, task_id))
|
||||
else:
|
||||
if signaled:
|
||||
self.logger.info('%s (pid %i, taskID %i) was killed by signal %i' % (execname, pid, task_id, sig))
|
||||
self.logger.info(
|
||||
'%s (pid %i, taskID %i) was killed by signal %i' %
|
||||
(execname, pid, task_id, sig))
|
||||
else:
|
||||
self.logger.info('%s (pid %i, taskID %i) exited' % (execname, pid, task_id))
|
||||
return True
|
||||
|
|
@ -1041,7 +1070,8 @@ class TaskManager(object):
|
|||
if not os.path.isfile(proc_path):
|
||||
return None
|
||||
proc_file = open(proc_path)
|
||||
procstats = [not field.isdigit() and field or int(field) for field in proc_file.read().split()]
|
||||
procstats = [not field.isdigit() and field or int(field)
|
||||
for field in proc_file.read().split()]
|
||||
proc_file.close()
|
||||
|
||||
cmd_path = '/proc/%i/cmdline' % pid
|
||||
|
|
@ -1084,9 +1114,9 @@ class TaskManager(object):
|
|||
while parents:
|
||||
for ppid in parents[:]:
|
||||
for procstats in statsByPPID.get(ppid, []):
|
||||
# get the /proc entries with ppid as their parent, and append their pid to the list,
|
||||
# then recheck for their children
|
||||
# pid is the 0th field, ppid is the 3rd field
|
||||
# get the /proc entries with ppid as their parent, and append their pid to the
|
||||
# list, then recheck for their children pid is the 0th field, ppid is the 3rd
|
||||
# field
|
||||
pids.append((procstats[0], procstats[1]))
|
||||
parents.append(procstats[0])
|
||||
parents.remove(ppid)
|
||||
|
|
@ -1154,7 +1184,8 @@ class TaskManager(object):
|
|||
availableMB = available // 1024 // 1024
|
||||
self.logger.debug("disk space available in '%s': %i MB", br_path, availableMB)
|
||||
if availableMB < self.options.minspace:
|
||||
self.status = "Insufficient disk space at %s: %i MB, %i MB required" % (br_path, availableMB, self.options.minspace)
|
||||
self.status = "Insufficient disk space at %s: %i MB, %i MB required" % \
|
||||
(br_path, availableMB, self.options.minspace)
|
||||
self.logger.warn(self.status)
|
||||
return False
|
||||
return True
|
||||
|
|
@ -1189,7 +1220,9 @@ class TaskManager(object):
|
|||
return False
|
||||
if self.task_load > self.hostdata['capacity']:
|
||||
self.status = "Over capacity"
|
||||
self.logger.info("Task load (%.2f) exceeds capacity (%.2f)" % (self.task_load, self.hostdata['capacity']))
|
||||
self.logger.info(
|
||||
"Task load (%.2f) exceeds capacity (%.2f)" %
|
||||
(self.task_load, self.hostdata['capacity']))
|
||||
return False
|
||||
if len(self.tasks) >= self.options.maxjobs:
|
||||
# This serves as a backup to the capacity check and prevents
|
||||
|
|
@ -1238,7 +1271,8 @@ class TaskManager(object):
|
|||
self.logger.warn('Error during host check')
|
||||
self.logger.warn(''.join(traceback.format_exception(*sys.exc_info())))
|
||||
if not valid_host:
|
||||
self.logger.info('Skipping task %s (%s) due to host check', task['id'], task['method'])
|
||||
self.logger.info(
|
||||
'Skipping task %s (%s) due to host check', task['id'], task['method'])
|
||||
return False
|
||||
data = self.session.host.openTask(task['id'])
|
||||
if data is None:
|
||||
|
|
|
|||
|
|
@ -110,7 +110,8 @@ class CursorWrapper:
|
|||
try:
|
||||
return quote(operation, parameters)
|
||||
except Exception:
|
||||
self.logger.exception('Unable to quote query:\n%s\nParameters: %s', operation, parameters)
|
||||
self.logger.exception(
|
||||
'Unable to quote query:\n%s\nParameters: %s', operation, parameters)
|
||||
return "INVALID QUERY"
|
||||
|
||||
def preformat(self, sql, params):
|
||||
|
|
|
|||
|
|
@ -154,10 +154,14 @@ LEGACY_SIGNATURES = {
|
|||
[['tag', 'newer_than', 'nvrs'], None, None, (None, None)],
|
||||
],
|
||||
'createLiveMedia': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',
|
||||
'opts'],
|
||||
None, None, (None,)],
|
||||
],
|
||||
'createAppliance': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',
|
||||
'opts'],
|
||||
None, None, (None,)],
|
||||
],
|
||||
'livecd': [
|
||||
[['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],
|
||||
|
|
@ -190,7 +194,9 @@ LEGACY_SIGNATURES = {
|
|||
[['spec_url', 'build_target', 'build', 'task', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'createLiveCD': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',
|
||||
'opts'],
|
||||
None, None, (None,)],
|
||||
],
|
||||
'appliance': [
|
||||
[['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],
|
||||
|
|
@ -199,19 +205,25 @@ LEGACY_SIGNATURES = {
|
|||
[['name', 'version', 'arches', 'target', 'inst_tree', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'tagBuild': [
|
||||
[['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'], None, None, (False, None, False)],
|
||||
[['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'],
|
||||
None, None, (False, None, False)],
|
||||
],
|
||||
'chainmaven': [
|
||||
[['builds', 'target', 'opts'], None, None, (None,)],
|
||||
],
|
||||
'newRepo': [
|
||||
[['tag', 'event', 'src', 'debuginfo', 'separate_src'], None, None, (None, False, False, False)],
|
||||
[['tag', 'event', 'src', 'debuginfo', 'separate_src'],
|
||||
None, None, (None, False, False, False)],
|
||||
],
|
||||
'createImage': [
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'inst_tree', 'opts'], None, None, (None,)],
|
||||
[['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info',
|
||||
'inst_tree', 'opts'],
|
||||
None, None, (None,)],
|
||||
],
|
||||
'tagNotification': [
|
||||
[['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info', 'ignore_success', 'failure_msg'], None, None, (None, '')],
|
||||
[['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info',
|
||||
'ignore_success', 'failure_msg'],
|
||||
None, None, (None, '')],
|
||||
],
|
||||
'buildArch': [
|
||||
[['pkg', 'root', 'arch', 'keep_srpm', 'opts'], None, None, (None,)],
|
||||
|
|
@ -253,7 +265,9 @@ LEGACY_SIGNATURES = {
|
|||
[['options'], None, None, (None,)],
|
||||
],
|
||||
'runroot': [
|
||||
[['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch', 'weight', 'upload_logs', 'new_chroot'], None, None, (False, [], [], None, False, None, None, False)],
|
||||
[['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch',
|
||||
'weight', 'upload_logs', 'new_chroot'],
|
||||
None, None, (False, [], [], None, False, None, None, False)],
|
||||
],
|
||||
'distRepo': [
|
||||
[['tag', 'repo_id', 'keys', 'task_opts'], None, None, None],
|
||||
|
|
@ -400,7 +414,9 @@ class BaseTaskHandler(object):
|
|||
self.session.getTaskResult(task)
|
||||
checked.add(task)
|
||||
except (koji.GenericError, six.moves.xmlrpc_client.Fault):
|
||||
self.logger.info("task %s failed or was canceled, cancelling unfinished tasks" % task)
|
||||
self.logger.info(
|
||||
"task %s failed or was canceled, cancelling unfinished tasks" %
|
||||
task)
|
||||
self.session.cancelTaskChildren(self.id)
|
||||
# reraise the original error now, rather than waiting for
|
||||
# an error in taskWaitResults()
|
||||
|
|
@ -743,8 +759,10 @@ class RestartHostsTask(BaseTaskHandler):
|
|||
my_tasks = None
|
||||
for host in hosts:
|
||||
# note: currently task assignments bypass channel restrictions
|
||||
task1 = self.subtask('restart', [host], assign=host['id'], label="restart %i" % host['id'])
|
||||
task2 = self.subtask('restartVerify', [task1, host], assign=host['id'], label="sleep %i" % host['id'])
|
||||
task1 = self.subtask('restart', [host],
|
||||
assign=host['id'], label="restart %i" % host['id'])
|
||||
task2 = self.subtask('restartVerify', [task1, host],
|
||||
assign=host['id'], label="sleep %i" % host['id'])
|
||||
subtasks.append(task1)
|
||||
subtasks.append(task2)
|
||||
if host['id'] == this_host:
|
||||
|
|
@ -790,8 +808,10 @@ class DependantTask(BaseTaskHandler):
|
|||
|
||||
subtasks = []
|
||||
for task in task_list:
|
||||
# **((len(task)>2 and task[2]) or {}) expands task[2] into opts if it exists, allows for things like 'priority=15'
|
||||
task_id = self.session.host.subtask(method=task[0], arglist=task[1], parent=self.id, **((len(task) > 2 and task[2]) or {}))
|
||||
# **((len(task)>2 and task[2]) or {}) expands task[2] into opts if it exists, allows
|
||||
# for things like 'priority=15'
|
||||
task_id = self.session.host.subtask(method=task[0], arglist=task[1], parent=self.id,
|
||||
**((len(task) > 2 and task[2]) or {}))
|
||||
if task_id:
|
||||
subtasks.append(task_id)
|
||||
if subtasks:
|
||||
|
|
|
|||
|
|
@ -54,7 +54,8 @@ def deprecated(message):
|
|||
|
||||
|
||||
def _changelogDate(cldate):
|
||||
return time.strftime('%a %b %d %Y', time.strptime(koji.formatTime(cldate), '%Y-%m-%d %H:%M:%S'))
|
||||
return time.strftime('%a %b %d %Y',
|
||||
time.strptime(koji.formatTime(cldate), '%Y-%m-%d %H:%M:%S'))
|
||||
|
||||
|
||||
def formatChangelog(entries):
|
||||
|
|
@ -813,7 +814,8 @@ def parse_maven_param(confs, chain=False, scratch=False, section=None):
|
|||
else:
|
||||
raise ValueError("Section %s does not exist in: %s" % (section, ', '.join(confs)))
|
||||
elif len(builds) > 1:
|
||||
raise ValueError("Multiple sections in: %s, you must specify the section" % ', '.join(confs))
|
||||
raise ValueError(
|
||||
"Multiple sections in: %s, you must specify the section" % ', '.join(confs))
|
||||
return builds
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue