LRU Cache Implementation and Real-World Applications

An LRU (Least Recently Used) cache evicts the least recently accessed item when the cache is full. It is the most common eviction policy for in-memory caches because it approximates optimal eviction f

Introduction#

An LRU (Least Recently Used) cache evicts the least recently accessed item when the cache is full. It is the most common eviction policy for in-memory caches because it approximates optimal eviction for workloads with temporal locality. Understanding the implementation helps you use caching correctly and design efficient systems.

Data Structure#

The canonical LRU cache uses a doubly linked list combined with a hash map:

  • Hash map: O(1) lookup by key
  • Doubly linked list: O(1) move to head and remove from tail
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from collections import OrderedDict
from threading import RLock
from typing import TypeVar, Generic, Optional

K = TypeVar("K")
V = TypeVar("V")

class LRUCache(Generic[K, V]):
    def __init__(self, capacity: int):
        self.capacity = capacity
        self._cache: OrderedDict[K, V] = OrderedDict()
        self._lock = RLock()

    def get(self, key: K) -> Optional[V]:
        with self._lock:
            if key not in self._cache:
                return None
            # Move to end (most recently used)
            self._cache.move_to_end(key)
            return self._cache[key]

    def put(self, key: K, value: V) -> None:
        with self._lock:
            if key in self._cache:
                self._cache.move_to_end(key)
            self._cache[key] = value
            if len(self._cache) > self.capacity:
                # Remove least recently used (first item)
                self._cache.popitem(last=False)

    def __len__(self) -> int:
        return len(self._cache)

Manual Linked List Implementation#

For completeness and interview preparation:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
class Node:
    def __init__(self, key=0, value=0):
        self.key = key
        self.value = value
        self.prev = None
        self.next = None

class LRUCacheManual:
    def __init__(self, capacity: int):
        self.capacity = capacity
        self.cache: dict[int, Node] = {}
        # Sentinel nodes simplify edge cases
        self.head = Node()  # MRU end
        self.tail = Node()  # LRU end
        self.head.next = self.tail
        self.tail.prev = self.head

    def _remove(self, node: Node) -> None:
        node.prev.next = node.next
        node.next.prev = node.prev

    def _insert_at_head(self, node: Node) -> None:
        node.next = self.head.next
        node.prev = self.head
        self.head.next.prev = node
        self.head.next = node

    def get(self, key: int) -> int:
        if key not in self.cache:
            return -1
        node = self.cache[key]
        self._remove(node)
        self._insert_at_head(node)
        return node.value

    def put(self, key: int, value: int) -> None:
        if key in self.cache:
            self._remove(self.cache[key])
        node = Node(key, value)
        self.cache[key] = node
        self._insert_at_head(node)
        if len(self.cache) > self.capacity:
            lru = self.tail.prev
            self._remove(lru)
            del self.cache[lru.key]

TTL-Aware LRU Cache#

Production caches need TTL (time-to-live) in addition to capacity-based eviction.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import time
from dataclasses import dataclass
from typing import Any

@dataclass
class CacheEntry:
    value: Any
    expires_at: float

class TTLLRUCache:
    def __init__(self, capacity: int, default_ttl: float = 300.0):
        self.capacity = capacity
        self.default_ttl = default_ttl
        self._cache: OrderedDict[str, CacheEntry] = OrderedDict()
        self._lock = RLock()

    def get(self, key: str) -> Optional[Any]:
        with self._lock:
            if key not in self._cache:
                return None
            entry = self._cache[key]
            if time.monotonic() > entry.expires_at:
                del self._cache[key]
                return None
            self._cache.move_to_end(key)
            return entry.value

    def put(self, key: str, value: Any, ttl: Optional[float] = None) -> None:
        with self._lock:
            expires_at = time.monotonic() + (ttl or self.default_ttl)
            if key in self._cache:
                self._cache.move_to_end(key)
            self._cache[key] = CacheEntry(value=value, expires_at=expires_at)
            if len(self._cache) > self.capacity:
                self._cache.popitem(last=False)

Decorator Pattern#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import functools
import asyncio

def lru_cached(maxsize: int = 128, ttl: float = 300.0):
    """Decorator that caches async function results."""
    cache = TTLLRUCache(capacity=maxsize, default_ttl=ttl)

    def decorator(func):
        @functools.wraps(func)
        async def wrapper(*args, **kwargs):
            # Build cache key from arguments
            key = f"{func.__name__}:{args}:{sorted(kwargs.items())}"
            cached = cache.get(key)
            if cached is not None:
                return cached
            result = await func(*args, **kwargs)
            cache.put(key, result)
            return result
        return wrapper
    return decorator

# Usage
@lru_cached(maxsize=1000, ttl=60.0)
async def get_user_profile(user_id: int) -> dict:
    return await db.fetchrow("SELECT * FROM users WHERE id = $1", user_id)

Go: sync.Map Based LRU#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
package cache

import (
    "container/list"
    "sync"
)

type entry struct {
    key   string
    value any
}

type LRU struct {
    cap   int
    mu    sync.Mutex
    ll    *list.List
    items map[string]*list.Element
}

func New(capacity int) *LRU {
    return &LRU{
        cap:   capacity,
        ll:    list.New(),
        items: make(map[string]*list.Element),
    }
}

func (c *LRU) Get(key string) (any, bool) {
    c.mu.Lock()
    defer c.mu.Unlock()
    if el, ok := c.items[key]; ok {
        c.ll.MoveToFront(el)
        return el.Value.(*entry).value, true
    }
    return nil, false
}

func (c *LRU) Put(key string, value any) {
    c.mu.Lock()
    defer c.mu.Unlock()
    if el, ok := c.items[key]; ok {
        c.ll.MoveToFront(el)
        el.Value.(*entry).value = value
        return
    }
    el := c.ll.PushFront(&entry{key, value})
    c.items[key] = el
    if c.ll.Len() > c.cap {
        oldest := c.ll.Back()
        c.ll.Remove(oldest)
        delete(c.items, oldest.Value.(*entry).key)
    }
}

Cache Hit Rate Monitoring#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
class InstrumentedLRUCache(LRUCache):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.hits = 0
        self.misses = 0

    def get(self, key):
        result = super().get(key)
        if result is not None:
            self.hits += 1
        else:
            self.misses += 1
        return result

    @property
    def hit_rate(self) -> float:
        total = self.hits + self.misses
        return self.hits / total if total > 0 else 0.0

Target hit rates: 80%+ for application caches, 95%+ for CDNs. Below 50% suggests the cache capacity or TTL is too small for the workload’s access patterns.

Conclusion#

The doubly linked list + hash map combination gives O(1) get and put operations. Python’s OrderedDict provides this without manual linked list management. In production, add TTL eviction alongside capacity eviction and monitor hit rates. For distributed caching, Redis provides a ready-made LRU implementation via maxmemory-policy allkeys-lru.

Contents