autopep8: Update with changes to make autopep8 -a -a -a happy

This commit is contained in:
Brian C. Lane 2023-08-07 10:08:32 -07:00 committed by Simon de Vlieger
parent a7541ad8b4
commit 44c28c8c16
26 changed files with 66 additions and 59 deletions

View file

@ -127,7 +127,7 @@ class LVService(devices.DeviceService):
if res.returncode == 5:
if count == 10:
raise RuntimeError("Could not find parent device")
time.sleep(1*count)
time.sleep(1 * count)
count += 1
continue

View file

@ -57,7 +57,7 @@ class LoopConfig(ctypes.Structure):
('fd', ctypes.c_uint32),
('block_size', ctypes.c_uint32),
('info', LoopInfo),
('__reserved', ctypes.c_uint64*8),
('__reserved', ctypes.c_uint64 * 8),
]
@ -652,7 +652,7 @@ class LoopControl:
if callable(setup):
try:
setup(lo)
except:
except BaseException:
lo.close()
raise

View file

@ -69,8 +69,13 @@ def parse_arguments(sys_argv):
help="directory containing stages, assemblers, and the osbuild library")
parser.add_argument("--cache-max-size", metavar="SIZE", type=parse_size, default=None,
help="maximum size of the cache (bytes) or 'unlimited' for no restriction")
parser.add_argument("--checkpoint", metavar="ID", action="append", type=str, default=None,
help="stage to commit to the object store during build (can be passed multiple times), accepts globs")
parser.add_argument(
"--checkpoint",
metavar="ID",
action="append",
type=str,
default=None,
help="stage to commit to the object store during build (can be passed multiple times), accepts globs")
parser.add_argument("--export", metavar="ID", action="append", type=str, default=[],
help="object to export, can be passed multiple times")
parser.add_argument("--json", action="store_true",

View file

@ -519,11 +519,11 @@ class RunnerInfo:
while i > 0 and name[i].isdigit():
i -= 1
vstr = name[i+1:]
vstr = name[i + 1:]
if vstr:
version = int(vstr)
return name[:i+1], version
return name[:i + 1], version
class Index:

View file

@ -575,7 +575,7 @@ class FsCache(contextlib.AbstractContextManager, os.PathLike):
raise
return (name, lockfd)
except:
except BaseException:
# On error, we might have already created the directory or even
# linked the lock-file. Try unlinking both, but ignore errors if
# they do not exist. Due to using UUIDs as names we cannot conflict
@ -703,7 +703,7 @@ class FsCache(contextlib.AbstractContextManager, os.PathLike):
self._active = True
return self
except:
except BaseException:
self.__exit__(None, None, None)
raise

View file

@ -216,7 +216,7 @@ class Socket(contextlib.AbstractContextManager):
# default destination for send operations.
if connect_to is not None:
sock.connect(os.fspath(connect_to))
except:
except BaseException:
if sock is not None:
sock.close()
raise
@ -255,7 +255,7 @@ class Socket(contextlib.AbstractContextManager):
sock.bind(os.fspath(bind_to))
unlink = os.open(os.path.join(".", path[0]), os.O_CLOEXEC | os.O_PATH)
sock.setblocking(False)
except:
except BaseException:
if unlink is not None:
os.close(unlink)
if sock is not None:

View file

@ -177,9 +177,9 @@ def brace_expand(s):
result = []
right = s.find('}')
left = s[:right].rfind('{')
prefix, choices, suffix = s[:left], s[left+1:right], s[right+1:]
prefix, choices, suffix = s[:left], s[left + 1:right], s[right + 1:]
for choice in choices.split(','):
result.extend(brace_expand(prefix+choice+suffix))
result.extend(brace_expand(prefix + choice + suffix))
return result

View file

@ -286,7 +286,7 @@ class MDAHeader(Header):
"magic": "16s", # int8_t[16] // Allows to scan for metadata
"version": "L", # uint32_t
"start": "Q", # uint64_t // Absolute start byte of itself
"size": "Q" # uint64_t // Size of metadata area
"size": "Q" # uint64_t // Size of metadata area
})
# followed by a null termiated list of type `RawLocN`
@ -419,7 +419,7 @@ class Metadata:
r"\]": " ]",
r'"': ' " ',
r"[=,]": "",
r"\s+": " ",
r"\s+": " ",
r"\0$": "",
}
@ -537,7 +537,7 @@ class Disk:
try:
self._init_headers()
except: # pylint: disable=broad-except
except BaseException: # pylint: disable=broad-except
self.fp.close()
raise

View file

@ -25,7 +25,7 @@ SCHEMA = """
def main(tree):
cmd = [
"/usr/sbin/chroot", tree,
"/usr/sbin/authconfig", "--nostart", "--updateall"
"/usr/sbin/authconfig", "--nostart", "--updateall"
]
subprocess.run(cmd, check=True)

View file

@ -408,7 +408,7 @@ def main(inputs, root, options, workdir, loop_client):
liveos = os.path.join(liveos_work, "LiveOS")
os.makedirs(liveos)
rootfs_size = rootfs.get("size", 3072) * 1024*1024
rootfs_size = rootfs.get("size", 3072) * 1024 * 1024
compression = rootfs.get("compression", {})
rootfs = os.path.join(liveos, "rootfs.img")

View file

@ -142,7 +142,7 @@ def make_dnf_config(tree, config_options):
for section, items in config_options.items():
make_section(dnf_config, section, items)
with open(dnf_config_path, "w", encoding="utf8") as f:
with open(dnf_config_path, "w", encoding="utf8") as f:
os.fchmod(f.fileno(), 0o644)
dnf_config.write(f)

View file

@ -67,9 +67,11 @@ SCHEMA = """
"""
# Corresponds to https://github.com/coreos/rpm-ostree/blob/7b9a20b20ecd5a2ceb11ca9edf86984dc3065183/rust/src/composepost.rs#L58
# Corresponds to
# https://github.com/coreos/rpm-ostree/blob/7b9a20b20ecd5a2ceb11ca9edf86984dc3065183/rust/src/composepost.rs#L58
TOPLEVEL_DIRS = ["dev", "proc", "run", "sys", "sysroot", "var"]
# Corresponds to https://github.com/coreos/rpm-ostree/blob/7b9a20b20ecd5a2ceb11ca9edf86984dc3065183/rust/src/composepost.rs#L123
# Corresponds to
# https://github.com/coreos/rpm-ostree/blob/7b9a20b20ecd5a2ceb11ca9edf86984dc3065183/rust/src/composepost.rs#L123
TOPLEVEL_LINKS = {
"home": "var/home",
"media": "run/media",

View file

@ -129,10 +129,10 @@ class PartitionTable:
]
if p.bootable:
commands += ["set", str(i+1), "boot", "on"]
commands += ["set", str(i + 1), "boot", "on"]
if p.type:
commands += ["set", str(i+1), p.type, "on"]
commands += ["set", str(i + 1), p.type, "on"]
subprocess.run(["parted", "-a", "none", "-s",
target, "--"] + commands,

View file

@ -347,7 +347,7 @@ def main(tree, inputs, options):
]
with tempfile.NamedTemporaryFile(prefix="manifest.", mode='w') as manifest:
manifest.writelines(c+'\n' for c in packages)
manifest.writelines(c + '\n' for c in packages)
manifest.flush()
subprocess.run([
"rpm",

View file

@ -129,7 +129,7 @@ class PartitionTable:
command += ["-U", self.uuid]
for i, part in enumerate(self.partitions):
idx = i+1 # partitions are 1-indexed
idx = i + 1 # partitions are 1-indexed
# format is 'partnum:start:end'
size = "0"

View file

@ -62,7 +62,7 @@ def main(tree, options):
cfg = options["config"]
# ensure the unit name + ".d" does not exceed maximum filename length
if len(unit+".d") > 255:
if len(unit + ".d") > 255:
raise ValueError(f"Error: the {unit} unit drop-in directory exceeds the maximum filename length.")
unit_dropins_dir = f"{tree}/usr/lib/systemd/system/{unit}.d"

View file

@ -10,7 +10,7 @@ def skipcpio(fd):
pos = 0
while True:
os.lseek(fd, pos, os.SEEK_SET)
data = os.read(fd, 2*cpio_len)
data = os.read(fd, 2 * cpio_len)
if data == b'':
# end of file, cpio_end not found, cat it all
pos = 0
@ -24,7 +24,7 @@ def skipcpio(fd):
if pos == 0:
return pos
# skip zeros
n = 2*cpio_len
n = 2 * cpio_len
while True:
data = os.read(fd, n)
if data == b'':

View file

@ -158,7 +158,7 @@ def test_clear_fd_wait(tempdir):
thread.start()
barrier.wait()
lo.clear_fd_wait(f.fileno(), 4*delay_time, delay_time/10)
lo.clear_fd_wait(f.fileno(), 4 * delay_time, delay_time / 10)
# no timeout exception has occurred and thus the device
# must not be be bound to the original file anymore

View file

@ -85,12 +85,12 @@ def test_runner_detection(tempdir):
},
"rhel": {
"base": 90,
"versions": [91, 92, 93],
"versions": [91, 92, 93],
"check": {94: 93},
},
"future": {
"base": 100,
"versions": [101, 102, 103],
"versions": [101, 102, 103],
"check": {110: 103},
}
}

View file

@ -38,7 +38,7 @@ def store_fixture():
def test_basic(object_store):
object_store.maximum_size = 1024*1024*1024
object_store.maximum_size = 1024 * 1024 * 1024
# No objects or references should be in the store
assert len(os.listdir(object_store.objects)) == 0
@ -85,7 +85,7 @@ def test_basic(object_store):
def test_cleanup(tmpdir):
with objectstore.ObjectStore(tmpdir) as object_store:
object_store.maximum_size = 1024*1024*1024
object_store.maximum_size = 1024 * 1024 * 1024
stage = os.path.join(object_store, "stage")
tree = object_store.new("a")
@ -156,7 +156,7 @@ def test_metadata(tmpdir):
# use tmpdir fixture from here on
with objectstore.ObjectStore(tmpdir) as store:
store.maximum_size = 1024*1024*1024
store.maximum_size = 1024 * 1024 * 1024
obj = store.new("a")
p = Path(obj, "A")
p.touch()

View file

@ -56,13 +56,13 @@ def test_pathlike(tmpdir):
dir_str: str = os.fspath(tmpdir)
cache1 = fscache.FsCache("osbuild-test-appid", dir_str)
assert os.fspath(cache1) == tmpdir
assert os.path.join(cache1, "foobar") == os.path.join(tmpdir, "foobar")
assert os.path.join(cache1, "foobar") == os.path.join(tmpdir, "foobar")
# Test with a wrapper-type as argument
dir_pathlike: Wrapper = Wrapper(os.fspath(tmpdir))
cache2 = fscache.FsCache("osbuild-test-appid", dir_pathlike)
assert os.fspath(cache2) == tmpdir
assert os.path.join(cache2, "foobar") == os.path.join(tmpdir, "foobar")
assert os.path.join(cache2, "foobar") == os.path.join(tmpdir, "foobar")
def test_path(tmpdir):
@ -235,19 +235,19 @@ def test_cache_info(tmpdir):
cache.info = fscache.FsCacheInfo(maximum_size=1024)
assert cache.info.maximum_size == 1024
assert cache.info.creation_boot_id is None
cache.info = fscache.FsCacheInfo(creation_boot_id="0"*32)
cache.info = fscache.FsCacheInfo(creation_boot_id="0" * 32)
assert cache.info.maximum_size == 1024
assert cache.info.creation_boot_id == "0"*32
cache.info = fscache.FsCacheInfo(maximum_size=2048, creation_boot_id="1"*32)
assert cache.info.creation_boot_id == "0" * 32
cache.info = fscache.FsCacheInfo(maximum_size=2048, creation_boot_id="1" * 32)
assert cache.info.maximum_size == 2048
assert cache.info.creation_boot_id == "1"*32
assert cache.info.creation_boot_id == "1" * 32
assert not fscache.FsCacheInfo().to_json()
assert fscache.FsCacheInfo(creation_boot_id="0"*32).to_json() == {
"creation-boot-id": "0"*32,
assert fscache.FsCacheInfo(creation_boot_id="0" * 32).to_json() == {
"creation-boot-id": "0" * 32,
}
assert fscache.FsCacheInfo(creation_boot_id="0"*32, maximum_size=1024).to_json() == {
"creation-boot-id": "0"*32,
assert fscache.FsCacheInfo(creation_boot_id="0" * 32, maximum_size=1024).to_json() == {
"creation-boot-id": "0" * 32,
"maximum-size": 1024,
}
@ -255,21 +255,21 @@ def test_cache_info(tmpdir):
assert fscache.FsCacheInfo.from_json(None) == fscache.FsCacheInfo()
assert fscache.FsCacheInfo.from_json("foobar") == fscache.FsCacheInfo()
assert fscache.FsCacheInfo.from_json({
"creation-boot-id": "0"*32,
}) == fscache.FsCacheInfo(creation_boot_id="0"*32)
"creation-boot-id": "0" * 32,
}) == fscache.FsCacheInfo(creation_boot_id="0" * 32)
assert fscache.FsCacheInfo.from_json({
"creation-boot-id": "0"*32,
"creation-boot-id": "0" * 32,
"maximum-size": 1024,
}) == fscache.FsCacheInfo(creation_boot_id="0"*32, maximum_size=1024)
}) == fscache.FsCacheInfo(creation_boot_id="0" * 32, maximum_size=1024)
assert fscache.FsCacheInfo.from_json({
"creation-boot-id": "0"*32,
"creation-boot-id": "0" * 32,
"maximum-size": 1024,
}) == fscache.FsCacheInfo(creation_boot_id="0"*32, maximum_size=1024)
}) == fscache.FsCacheInfo(creation_boot_id="0" * 32, maximum_size=1024)
assert fscache.FsCacheInfo.from_json({
"creation-boot-id": "0"*32,
"creation-boot-id": "0" * 32,
"unknown0": "foobar",
"unknown1": ["foo", "bar"],
}) == fscache.FsCacheInfo(creation_boot_id="0"*32)
}) == fscache.FsCacheInfo(creation_boot_id="0" * 32)
def test_store(tmpdir):
@ -317,7 +317,7 @@ def test_store_tree(tmpdir):
cache.store_tree("foobar", "invalid/dir")
with cache:
cache.info = cache.info._replace(maximum_size=1024*1024*1024)
cache.info = cache.info._replace(maximum_size=1024 * 1024 * 1024)
with pytest.raises(ValueError):
cache.store_tree("", "invalid/dir")

View file

@ -156,7 +156,7 @@ def test_rename_vg_group(tempdir):
vg = find_vg(vgs, new_name)
if vg:
break
time.sleep(0.250 * (i+1))
time.sleep(0.250 * (i + 1))
if not vg:
raise RuntimeError(f"Could not find vg {new_name}")
finally:

View file

@ -73,7 +73,7 @@ def create_image(tmpdir):
env = os.environ.copy()
env["PYTHONPATH"] = os.curdir
subprocess.run(
[os.path.join(os.curdir, "stages", "org.osbuild.mkfs.fat")],
[os.path.join(os.curdir, "stages", "org.osbuild.mkfs.fat")],
env=env,
check=True,
stdout=sys.stdout,

View file

@ -66,7 +66,7 @@ def can_setup_netns() -> bool:
try:
with netns():
return True
except: # pylint: disable=bare-except
except BaseException: # pylint: disable=bare-except
return False

View file

@ -77,7 +77,7 @@ def mapping_is_subset(subset, other):
"""
if isinstance(subset, Mapping) and isinstance(other, Mapping):
for key, value in subset.items():
if not key in other:
if key not in other:
return False
other_value = other[key]

View file

@ -590,7 +590,7 @@ LocalFileSigLevel = Optional
pkginfo = self._pacman("-Sii", "--sysroot", self._cachedir, pkg["name"])
pkgdata = self.parse_pkg_info(pkginfo)
p = PkgInfo(
"sha256:"+pkgdata["SHA-256 Sum"],
"sha256:" + pkgdata["SHA-256 Sum"],
pkg["name"],
pkg["version"],
pkgdata["Architecture"],
@ -644,7 +644,7 @@ class DepSolver:
if not result.scheme:
path = basedir.joinpath(baseurl)
return path.resolve().as_uri()
except: # pylint: disable=bare-except
except BaseException: # pylint: disable=bare-except
pass
return baseurl