flake8: apply E501 with max-line-length=99

This commit is contained in:
Yuming Zhu 2020-02-27 22:10:11 +08:00
parent f1ba6cefd7
commit c5db34a8e1
28 changed files with 1574 additions and 811 deletions

View file

@ -75,7 +75,7 @@ try:
from OpenSSL.SSL import Error as SSL_Error
except Exception: # pragma: no cover
# the hub imports koji, and sometimes this import fails there
# see: https://cryptography.io/en/latest/faq/#starting-cryptography-using-mod-wsgi-produces-an-internalerror-during-a-call-in-register-osrandom-engine
# see: https://cryptography.io/en/latest/faq/#starting-cryptography-using-mod-wsgi-produces-an-internalerror-during-a-call-in-register-osrandom-engine # noqa: E501
# unfortunately the workaround at the above link does not always work, so
# we ignore it here
pass
@ -1270,7 +1270,8 @@ def parse_pom(path=None, contents=None):
fd.close()
if not contents:
raise GenericError('either a path to a pom file or the contents of a pom file must be specified')
raise GenericError(
'either a path to a pom file or the contents of a pom file must be specified')
# A common problem is non-UTF8 characters in XML files, so we'll convert the string first
@ -1287,7 +1288,8 @@ def parse_pom(path=None, contents=None):
for field in fields:
if field not in util.to_list(values.keys()):
raise GenericError('could not extract %s from POM: %s' % (field, (path or '<contents>')))
raise GenericError('could not extract %s from POM: %s' %
(field, (path or '<contents>')))
return values
@ -1649,7 +1651,8 @@ name=build
# The following macro values cannot be overridden by tag options
macros['%_topdir'] = '%s/build' % config_opts['chroothome']
macros['%_host_cpu'] = opts.get('target_arch', arch)
macros['%_host'] = '%s-%s' % (opts.get('target_arch', arch), opts.get('mockhost', 'koji-linux-gnu'))
macros['%_host'] = '%s-%s' % (opts.get('target_arch', arch),
opts.get('mockhost', 'koji-linux-gnu'))
parts = ["""# Auto-generated by the Koji build system
"""]
@ -1681,7 +1684,9 @@ name=build
if bind_opts:
for key in bind_opts.keys():
for mnt_src, mnt_dest in six.iteritems(bind_opts.get(key)):
parts.append("config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" % (key, mnt_src, mnt_dest))
parts.append(
"config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" %
(key, mnt_src, mnt_dest))
parts.append("\n")
for key in sorted(macros):
@ -1886,7 +1891,8 @@ def read_config(profile_name, user_config=None):
try:
result[name] = int(value)
except ValueError:
raise ConfigurationError("value for %s config option must be a valid integer" % name)
raise ConfigurationError(
"value for %s config option must be a valid integer" % name)
else:
result[name] = value
@ -2030,7 +2036,8 @@ def read_config_files(config_files, raw=False):
class PathInfo(object):
# ASCII numbers and upper- and lower-case letter for use in tmpdir()
ASCII_CHARS = [chr(i) for i in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]
ASCII_CHARS = [chr(i)
for i in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]
def __init__(self, topdir=None):
self._topdir = topdir
@ -2053,10 +2060,12 @@ class PathInfo(object):
def build(self, build):
"""Return the directory where a build belongs"""
return self.volumedir(build.get('volume_name')) + ("/packages/%(name)s/%(version)s/%(release)s" % build)
return self.volumedir(build.get('volume_name')) + \
("/packages/%(name)s/%(version)s/%(release)s" % build)
def mavenbuild(self, build):
"""Return the directory where the Maven build exists in the global store (/mnt/koji/packages)"""
"""Return the directory where the Maven build exists in the global store
(/mnt/koji/packages)"""
return self.build(build) + '/maven'
def mavenrepo(self, maveninfo):
@ -2137,7 +2146,8 @@ class PathInfo(object):
"""Return a path to a unique directory under work()/tmp/"""
tmp = None
while tmp is None or os.path.exists(tmp):
tmp = self.work(volume) + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS) for dummy in '123456'])
tmp = self.work(volume) + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS)
for dummy in '123456'])
return tmp
def scratch(self):
@ -2781,9 +2791,9 @@ class ClientSession(object):
# basically, we want to retry on most errors, with a few exceptions
# - faults (this means the call completed and failed)
# - SystemExit, KeyboardInterrupt
# note that, for logged-in sessions the server should tell us (via a RetryError fault)
# if the call cannot be retried. For non-logged-in sessions, all calls should be read-only
# and hence retryable.
# note that, for logged-in sessions the server should tell us (via a RetryError
# fault) if the call cannot be retried. For non-logged-in sessions, all calls
# should be read-only and hence retryable.
except Fault as fault:
# try to convert the fault to a known exception
err = convertFault(fault)
@ -2792,13 +2802,14 @@ class ClientSession(object):
secs = self.opts.get('offline_retry_interval', interval)
self.logger.debug("Server offline. Retrying in %i seconds", secs)
time.sleep(secs)
# reset try count - this isn't a typical error, this is a running server
# correctly reporting an outage
# reset try count - this isn't a typical error, this is a running
# server correctly reporting an outage
tries = 0
continue
raise err
except (SystemExit, KeyboardInterrupt):
# (depending on the python version, these may or may not be subclasses of Exception)
# (depending on the python version, these may or may not be subclasses of
# Exception)
raise
except Exception as e:
tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
@ -2809,8 +2820,9 @@ class ClientSession(object):
raise
if not self.logged_in:
# in the past, non-logged-in sessions did not retry. For compatibility purposes
# this behavior is governed by the anon_retry opt.
# in the past, non-logged-in sessions did not retry.
# For compatibility purposes this behavior is governed by the anon_retry
# opt.
if not self.opts.get('anon_retry', False):
raise
@ -2822,7 +2834,8 @@ class ClientSession(object):
# otherwise keep retrying
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(tb_str)
self.logger.info("Try #%s for call %s (%s) failed: %s", tries, self.callnum, name, e)
self.logger.info("Try #%s for call %s (%s) failed: %s",
tries, self.callnum, name, e)
if tries > 1:
# first retry is immediate, after that we honor retry_interval
time.sleep(interval)
@ -2864,7 +2877,8 @@ class ClientSession(object):
transaction.
"""
if not self.multicall:
raise GenericError('ClientSession.multicall must be set to True before calling multiCall()')
raise GenericError(
'ClientSession.multicall must be set to True before calling multiCall()')
self.multicall = False
if len(self._calls) == 0:
return []
@ -2896,7 +2910,8 @@ class ClientSession(object):
return self.__dict__['_apidoc']
return VirtualMethod(self._callMethod, name, self)
def fastUpload(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=False, volume=None):
def fastUpload(self, localfile, path, name=None, callback=None, blocksize=None,
overwrite=False, volume=None):
if blocksize is None:
blocksize = self.opts.get('upload_blocksize', 1048576)
@ -2930,7 +2945,8 @@ class ClientSession(object):
hexdigest = util.adler32_constructor(chunk).hexdigest()
full_chksum.update(chunk)
if result['size'] != len(chunk):
raise GenericError("server returned wrong chunk size: %s != %s" % (result['size'], len(chunk)))
raise GenericError("server returned wrong chunk size: %s != %s" %
(result['size'], len(chunk)))
if result['hexdigest'] != hexdigest:
raise GenericError('upload checksum failed: %s != %s'
% (result['hexdigest'], hexdigest))
@ -2957,9 +2973,11 @@ class ClientSession(object):
if problems and result['hexdigest'] != full_chksum.hexdigest():
raise GenericError("Uploaded file has wrong checksum: %s/%s, %s != %s"
% (path, name, result['hexdigest'], full_chksum.hexdigest()))
self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds", localfile, size, t2)
self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds",
localfile, size, t2)
def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False, volume=None):
def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False,
volume=None):
"""prep a rawUpload call"""
if not self.logged_in:
raise ActionNotAllowed("you must be logged in to upload")
@ -2989,7 +3007,8 @@ class ClientSession(object):
request = chunk
return handler, headers, request
def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=True, volume=None):
def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None,
overwrite=True, volume=None):
"""upload a file in chunks using the uploadFile call"""
if blocksize is None:
blocksize = self.opts.get('upload_blocksize', 1048576)
@ -3044,7 +3063,8 @@ class ClientSession(object):
tries = 0
while True:
if debug:
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" % (path, name, sz, digest, offset))
self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %
(path, name, sz, digest, offset))
if self.callMethod('uploadFile', path, name, sz, digest, offset, data, **volopts):
break
if tries <= retries:
@ -3063,9 +3083,11 @@ class ClientSession(object):
if t2 <= 0:
t2 = 1
if debug:
self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" % (size, t1, size / t1 / 1024.0))
self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" %
(size, t1, size / t1 / 1024.0))
if debug:
self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" % (ofs, t2, ofs / t2 / 1024.0))
self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" %
(ofs, t2, ofs / t2 / 1024.0))
if callback:
callback(ofs, totalsize, size, t1, t2)
fo.close()
@ -3281,8 +3303,8 @@ class DBHandler(logging.Handler):
cursor.execute(command, data)
cursor.close()
# self.cnx.commit()
# XXX - committing here is most likely wrong, but we need to set commit_pending or something
# ...and this is really the wrong place for that
# XXX - committing here is most likely wrong, but we need to set commit_pending or
# something...and this is really the wrong place for that
except BaseException:
self.handleError(record)
@ -3583,7 +3605,9 @@ def add_file_logger(logger, fn):
def add_stderr_logger(logger):
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))
handler.setFormatter(
logging.Formatter(
'%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))
handler.setLevel(logging.DEBUG)
logging.getLogger(logger).addHandler(handler)
@ -3612,7 +3636,8 @@ def add_mail_logger(logger, addr):
return
addresses = addr.split(',')
handler = logging.handlers.SMTPHandler("localhost",
"%s@%s" % (pwd.getpwuid(os.getuid())[0], socket.getfqdn()),
"%s@%s" % (pwd.getpwuid(os.getuid())[0],
socket.getfqdn()),
addresses,
"%s: error notice" % socket.getfqdn())
handler.setFormatter(logging.Formatter('%(pathname)s:%(lineno)d [%(levelname)s] %(message)s'))