Decorator Pattern: Extending Behavior Without Subclassing

The decorator pattern adds behavior to an object dynamically without modifying its class or using subclassing. It wraps the original object, forwarding calls while adding pre/post logic. This is how P

Introduction#

The decorator pattern adds behavior to an object dynamically without modifying its class or using subclassing. It wraps the original object, forwarding calls while adding pre/post logic. This is how Python’s @functools.wraps, caching decorators, logging wrappers, and retry logic are built.

Core Structure#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from abc import ABC, abstractmethod
from typing import Any

# Component interface
class DataSource(ABC):
    @abstractmethod
    def read(self, key: str) -> Any:
        pass

    @abstractmethod
    def write(self, key: str, value: Any) -> None:
        pass

# Concrete component
class DatabaseSource(DataSource):
    def read(self, key: str) -> Any:
        return db_query(f"SELECT value FROM store WHERE key = '{key}'")

    def write(self, key: str, value: Any) -> None:
        db_execute(f"INSERT INTO store (key, value) VALUES ('{key}', '{value}')")

# Base decorator: delegates to wrapped component
class DataSourceDecorator(DataSource):
    def __init__(self, wrapped: DataSource):
        self._wrapped = wrapped

    def read(self, key: str) -> Any:
        return self._wrapped.read(key)

    def write(self, key: str, value: Any) -> None:
        self._wrapped.write(key, value)

Cache Decorator#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import time
from typing import Optional

class CachingDecorator(DataSourceDecorator):
    def __init__(self, wrapped: DataSource, ttl: float = 300.0):
        super().__init__(wrapped)
        self._cache: dict[str, tuple[Any, float]] = {}
        self._ttl = ttl

    def read(self, key: str) -> Any:
        cached, ts = self._cache.get(key, (None, 0))
        if cached is not None and time.time() - ts < self._ttl:
            return cached

        value = self._wrapped.read(key)
        self._cache[key] = (value, time.time())
        return value

    def write(self, key: str, value: Any) -> None:
        self._wrapped.write(key, value)
        self._cache[key] = (value, time.time())  # update cache on write

Logging Decorator#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import logging
import time

class LoggingDecorator(DataSourceDecorator):
    def __init__(self, wrapped: DataSource, logger: Optional[logging.Logger] = None):
        super().__init__(wrapped)
        self._logger = logger or logging.getLogger(__name__)

    def read(self, key: str) -> Any:
        start = time.perf_counter()
        try:
            result = self._wrapped.read(key)
            elapsed = (time.perf_counter() - start) * 1000
            self._logger.debug("read key=%s duration=%.1fms", key, elapsed)
            return result
        except Exception as e:
            self._logger.error("read failed key=%s error=%s", key, e)
            raise

    def write(self, key: str, value: Any) -> None:
        start = time.perf_counter()
        try:
            self._wrapped.write(key, value)
            elapsed = (time.perf_counter() - start) * 1000
            self._logger.debug("write key=%s duration=%.1fms", key, elapsed)
        except Exception as e:
            self._logger.error("write failed key=%s error=%s", key, e)
            raise

Retry Decorator#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import time
from typing import Type

class RetryDecorator(DataSourceDecorator):
    def __init__(
        self,
        wrapped: DataSource,
        max_retries: int = 3,
        backoff_base: float = 0.5,
        exceptions: tuple[Type[Exception], ...] = (Exception,),
    ):
        super().__init__(wrapped)
        self._max_retries = max_retries
        self._backoff_base = backoff_base
        self._exceptions = exceptions

    def read(self, key: str) -> Any:
        return self._with_retry(lambda: self._wrapped.read(key))

    def write(self, key: str, value: Any) -> None:
        self._with_retry(lambda: self._wrapped.write(key, value))

    def _with_retry(self, fn):
        last_error = None
        for attempt in range(self._max_retries + 1):
            try:
                return fn()
            except self._exceptions as e:
                last_error = e
                if attempt < self._max_retries:
                    delay = self._backoff_base * (2 ** attempt)
                    time.sleep(delay)
        raise last_error

Composing Decorators#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Stack decorators from innermost to outermost
db = DatabaseSource()
logged_db = LoggingDecorator(db)
retry_db = RetryDecorator(logged_db, max_retries=3)
cached_db = CachingDecorator(retry_db, ttl=60)

# Request flow for a read:
# CachingDecorator → cache miss → RetryDecorator → LoggingDecorator → DatabaseSource
# CachingDecorator → cache hit → return immediately

value = cached_db.read("user:123")

# Order matters:
# Cache BEFORE retry: don't retry on cache hit (correct)
# Log AFTER retry: see final success/failure (correct)
# Or log AROUND retry: see each attempt (useful for debugging)

Function Decorators in Python#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import functools
import time
import logging

# Python decorators follow the same pattern at the function level

def log_execution(func=None, *, level=logging.DEBUG, logger=None):
    """Decorator that logs function call timing."""
    if func is None:
        return functools.partial(log_execution, level=level, logger=logger)

    _logger = logger or logging.getLogger(func.__module__)

    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        start = time.perf_counter()
        try:
            result = func(*args, **kwargs)
            elapsed = (time.perf_counter() - start) * 1000
            _logger.log(level, "%s completed in %.1fms", func.__qualname__, elapsed)
            return result
        except Exception as e:
            elapsed = (time.perf_counter() - start) * 1000
            _logger.error("%s failed after %.1fms: %s", func.__qualname__, elapsed, e)
            raise

    return wrapper

def retry(max_attempts=3, backoff=0.5, exceptions=(Exception,)):
    """Decorator that retries a function on failure."""
    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            last_error = None
            for attempt in range(max_attempts):
                try:
                    return func(*args, **kwargs)
                except exceptions as e:
                    last_error = e
                    if attempt < max_attempts - 1:
                        time.sleep(backoff * (2 ** attempt))
            raise last_error
        return wrapper
    return decorator

def cache_result(ttl: float = 300.0):
    """Simple in-memory cache decorator."""
    def decorator(func):
        _cache: dict = {}

        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            key = (args, tuple(sorted(kwargs.items())))
            if key in _cache:
                result, ts = _cache[key]
                if time.time() - ts < ttl:
                    return result
            result = func(*args, **kwargs)
            _cache[key] = (result, time.time())
            return result

        wrapper.cache_clear = _cache.clear
        return wrapper
    return decorator

# Usage: stack decorators
@log_execution(level=logging.INFO)
@retry(max_attempts=3, exceptions=(ConnectionError, TimeoutError))
@cache_result(ttl=60)
def fetch_user_profile(user_id: str) -> dict:
    return api_client.get(f"/users/{user_id}")

Decorator vs Inheritance vs Composition#

1
2
3
4
5
6
7
8
9
10
11
12
13
14
Inheritance: "is-a" — LoggingDatabase IS-A Database
  Problem: deep hierarchies, cannot combine CachingLoggingDatabase
           without creating a new class for each combination

Decorator: "wraps" — LoggingDecorator wraps any DataSource
  Benefit: any combination works — mix and stack at runtime
  Benefit: each decorator has a single responsibility
  Tradeoff: more objects, harder to debug deep stacks

When to use the decorator pattern:
  - You need many orthogonal behaviors (logging, caching, retry, auth)
  - You want to combine behaviors dynamically at runtime
  - You cannot (or don't want to) modify the original class
  - Python functools.wraps for function-level decoration

Conclusion#

The decorator pattern is one of the most widely-used patterns in Python — Python’s built-in decorator syntax (@) is syntactic sugar for the same concept. Object-level decorators are powerful when you have a stable interface and multiple independent behaviors to add: caching, logging, retry, validation, and circuit breaking can all be implemented as decorators and composed freely. The key is that each decorator wraps exactly one component and handles exactly one concern, keeping each piece simple and independently testable.

Contents