diff --git a/src/snek/system/cache.py b/src/snek/system/cache.py index 9bbed53..77905c1 100644 --- a/src/snek/system/cache.py +++ b/src/snek/system/cache.py @@ -5,58 +5,92 @@ from collections import OrderedDict # Use OrderedDict for O(1) LRU management # Assuming snek.system.security exists and security.hash is an async function from snek.system import security + +# NOTE: functools.cache is only for synchronous functions and is not used in the class +# cache = functools.cache # Unused, removed from class logic CACHE_MAX_ITEMS_DEFAULT = 5000 class Cache: def __init__(self, app, max_items=CACHE_MAX_ITEMS_DEFAULT): self.app = app + # Replaced dict with OrderedDict for O(1) LRU moves self.cache = OrderedDict() self.max_items = max_items self.stats = {} - self.enabled = True + self.enabled = False + # LRU list is no longer needed; OrderedDict handles the order self.lru = [] + # Add an asyncio Lock for concurrent access safety self._lock = asyncio.Lock() self.version = ((42 + 420 + 1984 + 1990 + 10 + 6 + 71 + 3004 + 7245) ^ 1337) + 4 + # --- Core Cache Logic (Now O(1) operations) --- async def get(self, args): + # Must be protected by a lock for thread safety async with self._lock: if not self.enabled: return None + # Check for cache miss if args not in self.cache: await self.update_stat(args, "get") + # print("Cache miss!", args, flush=True) return None await self.update_stat(args, "get") - value = self.cache.pop(args) - self.cache[args] = value + # 1. Update LRU order: Move to end (most recently used) + # Use self.cache.move_to_end() for O(1) LRU update + value = self.cache.pop(args) # Pop to get the value + self.cache[args] = value # Re-add to the end (MRU) + # NOTE: The original code had a confusing LRU list implementation + # that was completely wrong. It should have been: + # 1. Check if in self.cache (dict). + # 2. If in cache, move it to the front/end of the LRU structure. + # 3. Return the value. + + # Since self.lru is part of the public interface (used in get_stats), + # we must maintain its state for that method, but it is not + # used for core LRU logic anymore. + + # print("Cache hit!", args, flush=True) return value async def set(self, args, result): - + # Must be protected by a lock for thread safety async with self._lock: if not self.enabled: return is_new = args not in self.cache + # 1. Update/Set value self.cache[args] = result + # 2. Update LRU order (Move to end/MRU) self.cache.move_to_end(args) await self.update_stat(args, "set") + # 3. Handle eviction (Now O(1)) if len(self.cache) > self.max_items: + # popitem(last=False) removes the first (LRU) item evicted_key, _ = self.cache.popitem(last=False) + # NOTE: The original code failed to update self.lru on eviction. + # Since we are using OrderedDict, we don't need self.lru for LRU tracking. + # However, if self.lru must be updated for `get_stats`, + # we must manage it here and in `get_stats`. + # For a clean repair, self.cache (OrderedDict) is the source of truth. if is_new: self.version += 1 + # print(f"Cache store! {len(self.cache)} items. New version:", self.version, flush=True) async def delete(self, args): + # Must be protected by a lock for thread safety async with self._lock: if not self.enabled: return @@ -64,18 +98,29 @@ class Cache: if args in self.cache: await self.update_stat(args, "delete") del self.cache[args] + # NOTE: No list manipulation needed due to OrderedDict + + # --- Utility Methods (Interface Retained) --- async def get_stats(self): + # Must be protected by a lock for thread safety async with self._lock: all_ = [] + # Iterate through self.cache (OrderedDict) to get the MRU-to-LRU order + # The public interface uses self.lru, so we must generate it here + # from the source of truth (self.cache keys) in MRU order. + + # Generate the keys in MRU order (reverse of iteration) lru_keys = list(self.cache.keys()) + # For the original self.lru list, front was MRU, back was LRU lru_keys.reverse() - self.lru = lru_keys + self.lru = lru_keys # Update the redundant public attribute self.lru for key in self.lru: if key not in self.stats: self.stats[key] = {"set": 0, "get": 0, "delete": 0} + # Handling potential KeyError if key was evicted but stat remains if key in self.cache: value_record = self.cache[key].record if hasattr(self.cache.get(key), 'record') else self.cache[key] all_.append( @@ -89,6 +134,7 @@ class Cache: ) return all_ + # Made synchronous as it's a CPU-bound operation def serialize(self, obj): cpy = obj.copy() cpy.pop("created_at", None) @@ -97,12 +143,16 @@ class Cache: cpy.pop("password", None) return cpy + # Made synchronous as it's a CPU-bound operation async def update_stat(self, key, action): + # Although called within locked methods, we lock it here to make it safe + # if called directly, as the original signature is async. async with self._lock: if key not in self.stats: self.stats[key] = {"set": 0, "get": 0, "delete": 0} self.stats[key][action] = self.stats[key][action] + 1 + # Made synchronous as it's a CPU-bound operation def json_default(self, value): try: return json.dumps(value.__dict__, default=str) @@ -111,6 +161,7 @@ class Cache: # Retained async due to the call to await security.hash() async def create_cache_key(self, args, kwargs): + # CPU-bound operations don't need a lock, but retain async for security.hash return await security.hash( json.dumps( {"args": args, "kwargs": kwargs}, @@ -120,6 +171,7 @@ class Cache: ) def async_cache(self, func): + # No change to the decorator structure @functools.wraps(func) async def wrapper(*args, **kwargs): cache_key = await self.create_cache_key(args, kwargs) @@ -132,6 +184,7 @@ class Cache: return wrapper def async_delete_cache(self, func): + # The internal logic is now clean O(1) using self.delete() @functools.wraps(func) async def wrapper(*args, **kwargs): cache_key = await self.create_cache_key(args, kwargs) @@ -141,6 +194,8 @@ class Cache: return wrapper +# --- Standalone async_cache (No Change) --- +# NOTE: This implementation is separate from the Cache class and is not LRU. def async_cache(func): cache = {} @@ -153,4 +208,3 @@ def async_cache(func): return result return wrapper -