[arvados] updated: 2.1.0-2965-g356fa1d4d
git repository hosting
git at public.arvados.org
Thu Oct 20 21:42:57 UTC 2022
Summary of changes:
sdk/python/arvados/diskcache.py | 38 +++++++++++++++++++++++++++++++++++++-
sdk/python/arvados/keep.py | 20 +++++++++++++++++---
2 files changed, 54 insertions(+), 4 deletions(-)
via 356fa1d4d17c402470a6b49ae943e7ec278b1a46 (commit)
from 2a16e128ab63f87aa4656bf860ae3f6b8633c4ca (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 356fa1d4d17c402470a6b49ae943e7ec278b1a46
Author: Peter Amstutz <peter.amstutz at curii.com>
Date: Thu Oct 20 17:37:12 2022 -0400
18842: put the file locking back
We don't strictly speaking need file locking for correctness, but it
does prevent a situation of having a bunch of ghost files that take up
space but are not visible on the file system.
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>
diff --git a/sdk/python/arvados/diskcache.py b/sdk/python/arvados/diskcache.py
index c2afd3bfc..6f1ccb97e 100644
--- a/sdk/python/arvados/diskcache.py
+++ b/sdk/python/arvados/diskcache.py
@@ -8,6 +8,7 @@ import os
import traceback
import stat
import tempfile
+import fcntl
class DiskCacheSlot(object):
__slots__ = ("locator", "ready", "content", "cachedir")
@@ -46,6 +47,10 @@ class DiskCacheSlot(object):
tmpfile = f.name
os.chmod(tmpfile, stat.S_IRUSR | stat.S_IWUSR)
+ # aquire a shared lock, this tells other processes that
+ # we're using this block and to please not delete it.
+ fcntl.flock(f, fcntl.LOCK_SH)
+
f.write(value)
f.flush()
os.rename(tmpfile, final)
@@ -86,9 +91,36 @@ class DiskCacheSlot(object):
blockdir = os.path.join(self.cachedir, self.locator[0:3])
final = os.path.join(blockdir, self.locator)
try:
- os.remove(final)
+ with open(final, "rb") as f:
+ # unlock,
+ fcntl.flock(f, fcntl.LOCK_UN)
+
+ # try to get an exclusive lock, this ensures other
+ # processes are not using the block. It is
+ # nonblocking and will throw an exception if we
+ # can't get it, which is fine because that means
+ # we just won't try to delete it.
+ #
+ # I should note here, the file locking is not
+ # strictly necessary, we could just remove it and
+ # the kernel would ensure that the underlying
+ # inode remains available as long as other
+ # processes still have the file open. However, if
+ # you have multiple processes sharing the cache
+ # and deleting each other's files, you'll end up
+ # with a bunch of ghost files that don't show up
+ # in the file system but are still taking up
+ # space, which isn't particularly user friendly.
+ # The locking strategy ensures that cache blocks
+ # in use remain visible.
+ #
+ fcntl.flock(filehandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+ os.remove(final)
+ return True
except OSError:
pass
+ return False
@staticmethod
def get_from_disk(locator, cachedir):
@@ -98,6 +130,10 @@ class DiskCacheSlot(object):
try:
filehandle = open(final, "rb")
+ # aquire a shared lock, this tells other processes that
+ # we're using this block and to please not delete it.
+ fcntl.flock(filehandle, fcntl.LOCK_SH)
+
content = mmap.mmap(filehandle.fileno(), 0, access=mmap.ACCESS_READ)
dc = DiskCacheSlot(locator, cachedir)
dc.content = content
diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py
index 8d95b2dc7..b6c98d143 100644
--- a/sdk/python/arvados/keep.py
+++ b/sdk/python/arvados/keep.py
@@ -235,7 +235,7 @@ class KeepBlockCache(object):
return len(self.content)
def evict(self):
- pass
+ return True
def cap_cache(self):
'''Cap the cache size to self.cache_max'''
@@ -246,11 +246,25 @@ class KeepBlockCache(object):
sm = sum([slot.size() for slot in self._cache])
while len(self._cache) > 0 and (sm > self.cache_max or len(self._cache) > self._max_slots):
for i in range(len(self._cache)-1, -1, -1):
+ # start from the back, find a slot that is a candidate to evict
if self._cache[i].ready.is_set():
- self._cache[i].evict()
+ sz = self._cache[i].size()
+
+ # If evict returns false it means the
+ # underlying disk cache couldn't lock the file
+ # for deletion because another process was using
+ # it. Don't count it as reducing the amount
+ # of data in the cache, find something else to
+ # throw out.
+ if self._cache[i].evict():
+ sm -= sz
+
+ # either way we forget about it. either the
+ # other process will delete it, or if we need
+ # it again and it is still there, we'll find
+ # it on disk.
del self._cache[i]
break
- sm = sum([slot.size() for slot in self._cache])
def _get(self, locator):
# Test if the locator is already in the cache
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list