fscache: add new FsCache._last_used() helper
This helper can be used to implement a strategy to find the oldest cache entries and evict them when the cache is full. The implementation uses the `atime` of the per object `cache.lock` file and ensures in `load()` that it's actually updated.
This commit is contained in:
parent
f52cabc3c1
commit
caddf0adfb
2 changed files with 92 additions and 7 deletions
|
|
@ -9,6 +9,7 @@ the cache under a given limit.
|
|||
# pylint: disable=too-many-lines
|
||||
|
||||
import contextlib
|
||||
import ctypes
|
||||
import errno
|
||||
import json
|
||||
import os
|
||||
|
|
@ -1016,14 +1017,12 @@ class FsCache(contextlib.AbstractContextManager, os.PathLike):
|
|||
# Use an ExitStack so we can catch exceptions raised by the
|
||||
# `__enter__()` call on the context-manager. We want to catch
|
||||
# `OSError` exceptions and convert them to cache-misses.
|
||||
obj_lock_path = os.path.join(
|
||||
self._dirname_objects, name, self._filename_object_lock)
|
||||
try:
|
||||
es.enter_context(
|
||||
lock_fd = es.enter_context(
|
||||
self._atomic_open(
|
||||
os.path.join(
|
||||
self._dirname_objects,
|
||||
name,
|
||||
self._filename_object_lock,
|
||||
),
|
||||
obj_lock_path,
|
||||
write=False,
|
||||
wait=False,
|
||||
)
|
||||
|
|
@ -1033,12 +1032,33 @@ class FsCache(contextlib.AbstractContextManager, os.PathLike):
|
|||
raise self.MissError() from None
|
||||
raise e
|
||||
|
||||
libc = linux.Libc.default()
|
||||
libc.futimens(lock_fd, ctypes.byref(linux.c_timespec_times2(
|
||||
atime=linux.c_timespec(tv_sec=0, tv_nsec=libc.UTIME_NOW),
|
||||
mtime=linux.c_timespec(tv_sec=0, tv_nsec=libc.UTIME_OMIT),
|
||||
)))
|
||||
|
||||
yield os.path.join(
|
||||
self._dirname_objects,
|
||||
name,
|
||||
self._dirname_data,
|
||||
)
|
||||
|
||||
def _last_used(self, name: str) -> float:
|
||||
"""Return the last time the given object was last used.
|
||||
|
||||
Note that the resolution is only as good as what the filesystem "atime"
|
||||
gives us.
|
||||
"""
|
||||
obj_lock_path = os.path.join(
|
||||
self._dirname_objects, name, self._filename_object_lock)
|
||||
try:
|
||||
return os.stat(self._path(obj_lock_path)).st_atime
|
||||
except OSError as e:
|
||||
if e.errno in [errno.EAGAIN, errno.ENOENT, errno.ENOTDIR]:
|
||||
raise self.MissError() from None
|
||||
raise e
|
||||
|
||||
@property
|
||||
def info(self) -> FsCacheInfo:
|
||||
"""Query Cache Information
|
||||
|
|
|
|||
|
|
@ -4,10 +4,14 @@
|
|||
|
||||
# pylint: disable=protected-access
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
|
|
@ -20,6 +24,22 @@ def tmpdir_fixture():
|
|||
yield tmp
|
||||
|
||||
|
||||
def sleep_for_fs():
|
||||
"""Sleep a tiny amount of time for atime/mtime updates to show up in fs"""
|
||||
time.sleep(0.05)
|
||||
|
||||
|
||||
def has_precise_fs_timestamps():
|
||||
with tempfile.TemporaryDirectory(dir="/var/tmp") as tmpdir:
|
||||
stamp_path = pathlib.Path(tmpdir) / "stamp"
|
||||
stamp_path.write_bytes(b"m1")
|
||||
mtime1 = stamp_path.stat().st_mtime
|
||||
sleep_for_fs()
|
||||
stamp_path.write_bytes(b"m2")
|
||||
mtime2 = stamp_path.stat().st_mtime
|
||||
return mtime2 > mtime1
|
||||
|
||||
|
||||
def test_calculate_space(tmpdir):
|
||||
#
|
||||
# Test the `_calculate_space()` helper and verify it only includes file
|
||||
|
|
@ -398,6 +418,51 @@ def test_size_discard(tmpdir):
|
|||
with cache.load("foo") as rpath:
|
||||
pass
|
||||
|
||||
def test_cache_last_used_noent(tmpdir):
|
||||
cache = fscache.FsCache("osbuild-test-appid", tmpdir)
|
||||
with pytest.raises(fscache.FsCache.MissError):
|
||||
cache._last_used("non-existant-entry")
|
||||
|
||||
|
||||
@pytest.mark.skipif(not has_precise_fs_timestamps(), reason="need precise fs timestamps")
|
||||
def test_cache_load_updates_last_used(tmpdir):
|
||||
cache = fscache.FsCache("osbuild-test-appid", tmpdir)
|
||||
with cache:
|
||||
cache.info = cache.info._replace(maximum_size=1024*1024)
|
||||
with cache.store("foo") as rpath:
|
||||
pass
|
||||
with cache.load("foo") as rpath:
|
||||
pass
|
||||
load_time1 = cache._last_used("foo")
|
||||
# would be nice to have a helper for this in cache
|
||||
obj_lock_path = os.path.join(
|
||||
cache._dirname_objects, "foo", cache._filename_object_lock)
|
||||
mtime1 = os.stat(cache._path(obj_lock_path)).st_mtime
|
||||
assert load_time1 > 0
|
||||
sleep_for_fs()
|
||||
with cache.load("foo") as rpath:
|
||||
pass
|
||||
# load time is updated
|
||||
load_time2 = cache._last_used("foo")
|
||||
assert load_time2 > load_time1
|
||||
# mtime is unchanged
|
||||
mtime2 = os.stat(cache._path(obj_lock_path)).st_mtime
|
||||
assert mtime1 == mtime2
|
||||
|
||||
|
||||
@pytest.mark.skipif(os.getuid() != 0, reason="needs root")
|
||||
def test_cache_load_updates_last_used_on_noatime(tmp_path):
|
||||
mnt_path = tmp_path / "mnt"
|
||||
mnt_path.mkdir()
|
||||
with contextlib.ExitStack() as cm:
|
||||
subprocess.check_call(
|
||||
["mount", "-t", "tmpfs", "-o", "noatime", "none", os.fspath(mnt_path)],
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr,
|
||||
)
|
||||
cm.callback(subprocess.check_call, ["umount", os.fspath(mnt_path)], stdout=sys.stdout, stderr=sys.stderr)
|
||||
test_cache_load_updates_last_used(mnt_path)
|
||||
|
||||
|
||||
def test_cache_full_behavior(tmp_path):
|
||||
cache = fscache.FsCache("osbuild-cache-evict", tmp_path)
|
||||
|
|
@ -422,7 +487,7 @@ def test_cache_full_behavior(tmp_path):
|
|||
assert cache._calculate_space(tmp_path) < 192 * 1024
|
||||
with cache.load("o2") as o:
|
||||
assert o != ""
|
||||
# adding a third one will (silently) fail
|
||||
# adding a third one will (silently) fail because the cache is full
|
||||
with cache.store("o3") as rpath:
|
||||
rpath_f3 = os.path.join(tmp_path, rpath, "f3")
|
||||
with open(rpath_f3, "wb") as fp:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue