Building on top of https://stackoverflow.com/a/33672499, we can avoid overriding the class method with an instance method on first execution by exploiting WeakKeyDictionary:
from weakref import ref, WeakKeyDictionary
from functools import lru_cache, wraps
def lru_cache_method(*lru_args, **lru_kwargs):
def decorator(method):
cache = WeakKeyDictionary()
@wraps(method)
def cached_method(self, *args, **kwargs):
bound_cached_method = cache.get(self)
if bound_cached_method is None:
self_weak = ref(self)
@wraps(method)
@lru_cache(*lru_args, **lru_kwargs)
def bound_cached_method(*args, **kwargs):
return method(self_weak(), *args, **kwargs)
cache[self] = bound_cached_method
return bound_cached_method(*args, **kwargs)
return cached_method
return decorator
Doing so addresses the following limitations of the original implementation:
Unfortunately, it has the limitation of only handling hashable types, and cannot be applied to the __hash__ method, which can be circumvented (see also Python WeakKeyDictionary for unhashable types):
from weakref import WeakKeyDictionary, WeakValueDictionary, ref
from functools import lru_cache, wraps
class IdKey:
def __init__(self, value):
self._id = id(value)
def __hash__(self):
return self._id
def __eq__(self, other):
return self._id == other._id
def __repr__(self):
return f"<IdKey(_id={self._id})>"
def lru_cache_method(*lru_args, **lru_kwargs):
def decorator(method):
instances = WeakValueDictionary()
methods = WeakKeyDictionary()
@wraps(method)
def cached_method(self, *args, **kwargs):
key = IdKey(self)
weakly_bound_cached_method = methods.get(key)
if weakly_bound_cached_method is None:
# NOTE This prevents `key` from being GCed until `self` is GCed.
instances[key] = self
# NOTE This makes sure self can be GCed before `bound_cached_method` is GCed,
# by avoiding a mutual dependency.
_self = ref(self)
@wraps(method)
@lru_cache(*lru_args, **lru_kwargs)
def weakly_bound_cached_method(*args, **kwargs):
return method(_self(), *args, **kwargs)
# NOTE This entry can be GCed as soon as `self` is GCed.
methods[key] = weakly_bound_cached_method
return weakly_bound_cached_method(*args, **kwargs)
return cached_method
return decorator
which you can test as follows:
from time import sleep
class X:
def __init__(self, *args, **kwargs):
self.args = tuple(args)
self.kwargs = tuple(kwargs.items())
@lru_cache_method(maxsize = 1)
def __hash__(self):
return hash((self.args, self.kwargs))
class Y(dict):
def __init__(self):
pass
@lru_cache_method(maxsize = 1)
def sleep(self):
sleep(1)
if __name__ == '__main__':
x1 = X(*[1, 2, 3], **dict(a=1, b=2))
x2 = X(*[1, 2], **dict(a=2, b=1))
print(hash(x1))
print(hash(x2))
x1.args = (1, 2)
print(hash(x1))
print(hash(x2))
x2.kwargs = dict(a=1, b=2)
print(hash(x1))
print(hash(x2))
import gc
gc.collect()
print([obj for obj in gc.get_objects() if isinstance(obj, IdKey)])
print([obj for obj in gc.get_objects() if isinstance(obj, X)])
del x1
del x2
print([obj for obj in gc.get_objects() if isinstance(obj, IdKey)])
print([obj for obj in gc.get_objects() if isinstance(obj, X)])
from timeit import timeit
print(timeit('y1.sleep()', 'from main import Y; y1 = Y()', number = 100))
hash(Y())
B019 Use of 'functools.lru_cache' or 'functools.cache' on class methods can lead to memory leaks. The cache may retain instance references, preventing garbage collection..lru_cacheholding on to the instance at all? Doesn't the cache use some hash and not the actual object?