Skip to content

vllm.compilation.cuda_graph

logger module-attribute

logger = init_logger(__name__)

CUDAGraphEntry dataclass

Source code in vllm/compilation/cuda_graph.py
@dataclasses.dataclass
class CUDAGraphEntry:
    batch_descriptor: BatchDescriptor
    cudagraph: Optional[torch.cuda.CUDAGraph] = None
    output: Optional[Any] = None

    # for cudagraph debugging, track the input addresses
    # during capture, and check if they are the same during replay
    input_addresses: Optional[list[int]] = None

batch_descriptor instance-attribute

batch_descriptor: BatchDescriptor

cudagraph class-attribute instance-attribute

cudagraph: Optional[CUDAGraph] = None

input_addresses class-attribute instance-attribute

input_addresses: Optional[list[int]] = None

output class-attribute instance-attribute

output: Optional[Any] = None

__init__

__init__(
    batch_descriptor: BatchDescriptor,
    cudagraph: Optional[CUDAGraph] = None,
    output: Optional[Any] = None,
    input_addresses: Optional[list[int]] = None,
) -> None

CUDAGraphOptions dataclass

Source code in vllm/compilation/cuda_graph.py
@dataclasses.dataclass
class CUDAGraphOptions:
    debug_log_enable: bool = True
    gc_disable: bool = False
    weak_ref_output: bool = True

debug_log_enable class-attribute instance-attribute

debug_log_enable: bool = True

gc_disable class-attribute instance-attribute

gc_disable: bool = False

weak_ref_output class-attribute instance-attribute

weak_ref_output: bool = True

__init__

__init__(
    debug_log_enable: bool = True,
    gc_disable: bool = False,
    weak_ref_output: bool = True,
) -> None

CUDAGraphWrapper

Wraps a runnable to add CUDA graph capturing and replaying ability. And provide attribute access to the underlying runnable via __getattr__.

The workflow of this wrapper in the cudagraph dispatching is as follows: 1. At initialization, a runtime mode is assigned to the wrapper (FULL or PIECEWISE). 2. At runtime, the wrapper receives a runtime_mode and a batch_descriptor(key) from the forward context and blindly trust them for cudagraph dispatching. 3. If runtime_mode is NONE or runtime_mode does not match the mode of the wrapper, just call the runnable directly. 4. Otherwise, i.e., the runtime_mode matches the mode of the wrapper, the wrapper will perform cudagraph capture(if key does not exist, create a new entry and cache it) or replay (if key exists in the cache).

Note: CUDAGraphWrapper does not store persistent buffers or copy any runtime inputs into that buffers for replay. We assume implementing them is done outside of the wrapper. That is because we do not make any assumption on the dynamic shape (batch size) of the runtime inputs, as a trade-off for staying orthogonal to compilation logic. Nevertheless, tracing and checking the input addresses to be consistent during replay is guaranteed when VLLM_LOGGING_LEVEL == "DEBUG".

Source code in vllm/compilation/cuda_graph.py
class CUDAGraphWrapper:
    """Wraps a runnable to add CUDA graph capturing and replaying ability. And
    provide attribute access to the underlying `runnable` via `__getattr__`.

    The workflow of this wrapper in the cudagraph dispatching is as follows:
    1. At initialization, a runtime mode is assigned to the wrapper (FULL or
    PIECEWISE). 
    2. At runtime, the wrapper receives a runtime_mode and a 
    batch_descriptor(key) from the forward context and blindly trust them
    for cudagraph dispatching. 
    3. If runtime_mode is NONE or runtime_mode does not match the mode of the
    wrapper, just call the runnable directly.
    4. Otherwise, i.e., the runtime_mode matches the mode of the wrapper,
    the wrapper will perform cudagraph capture(if key does not exist, create
    a new entry and cache it) or replay (if key exists in the cache).

    Note: CUDAGraphWrapper does not store persistent buffers or copy any
    runtime inputs into that buffers for replay. We assume implementing them
    is done outside of the wrapper. That is because we do not make any 
    assumption on the dynamic shape (batch size) of the runtime inputs, as a
    trade-off for staying orthogonal to compilation logic. Nevertheless, 
    tracing and checking the input addresses to be consistent during replay is
    guaranteed when VLLM_LOGGING_LEVEL == "DEBUG".
    """

    def __init__(self,
                 runnable: Callable,
                 vllm_config: VllmConfig,
                 runtime_mode: CUDAGraphMode,
                 cudagraph_options: Optional[CUDAGraphOptions] = None):
        self.runnable = runnable
        self.vllm_config = vllm_config
        self.runtime_mode = runtime_mode
        self.compilation_config = vllm_config.compilation_config

        self.first_run_finished = False
        self.is_debugging_mode = envs.VLLM_LOGGING_LEVEL == "DEBUG"

        # assert runtime_mode is not NONE(no cudagraph), otherwise, we don't
        # need to initialize a CUDAGraphWrapper.
        assert self.runtime_mode != CUDAGraphMode.NONE
        # TODO: in the future, if we want to use multiple
        # streams, it might not be safe to share a global pool.
        # only investigate this when we use multiple streams
        self.graph_pool = current_platform.get_global_graph_pool()

        if cudagraph_options is None:
            cudagraph_options = CUDAGraphOptions()
        self.cudagraph_options = cudagraph_options
        # the entries for different batch descriptors that we need to capture
        # cudagraphs for.
        self.concrete_cudagraph_entries: dict[BatchDescriptor, CUDAGraphEntry]\
                                                                        = {}

    def __getattr__(self, key: str):
        # allow accessing the attributes of the runnable.
        if hasattr(self.runnable, key):
            return getattr(self.runnable, key)
        raise AttributeError(f"Attribute {key} not exists in the runnable of "
                             f"cudagraph wrapper: {self.runnable}")

    def unwrap(self) -> Callable:
        # in case we need to access the original runnable.
        return self.runnable

    def __call__(self, *args, **kwargs):
        forward_context = get_forward_context()
        batch_descriptor = forward_context.batch_descriptor
        cudagraph_runtime_mode = forward_context.cudagraph_runtime_mode

        if cudagraph_runtime_mode == CUDAGraphMode.NONE or \
                            cudagraph_runtime_mode != self.runtime_mode:
            # CUDAGraphMode.NONE could mean the profile run, a warmup run, or
            # running without cudagraphs.
            # We do not trigger capture/replay if the runtime mode is not
            # matches. This enables properly dispatching to the correct
            # CUDAGraphWrapper when nesting multiple instances with different
            # runtime modes.
            return self.runnable(*args, **kwargs)

        if batch_descriptor not in self.concrete_cudagraph_entries:
            # create a new entry for this batch descriptor
            self.concrete_cudagraph_entries[batch_descriptor] = \
                CUDAGraphEntry(batch_descriptor=batch_descriptor)

        entry = self.concrete_cudagraph_entries[batch_descriptor]

        if entry.cudagraph is None:
            if self.cudagraph_options.debug_log_enable:
                # Since we capture cudagraph for many different shapes and
                # capturing is fast, we don't need to log it for every
                # shape. E.g. we only log it for the first subgraph in
                # piecewise mode.
                logger.debug("Capturing a cudagraph on (%s,%s)",
                             self.runtime_mode.name, entry.batch_descriptor)
            # validate that cudagraph capturing is legal at this point.
            validate_cudagraph_capturing_enabled()

            input_addresses = [
                x.data_ptr() for x in args if isinstance(x, torch.Tensor)
            ]
            entry.input_addresses = input_addresses
            cudagraph = torch.cuda.CUDAGraph()

            with ExitStack() as stack:
                if self.cudagraph_options.gc_disable:
                    # during every model forward for piecewise cudagraph
                    # mode, we will capture many pieces of cudagraphs
                    # (roughly one per layer). running gc again and again
                    # across layers will make the cudagraph capture very slow.
                    # therefore, we only run gc for the first graph,
                    # and disable gc for the rest of the graphs.
                    stack.enter_context(patch("gc.collect", lambda: None))
                    stack.enter_context(
                        patch("torch.cuda.empty_cache", lambda: None))

                # mind-exploding: carefully manage the reference and memory.
                with torch.cuda.graph(cudagraph, pool=self.graph_pool):
                    # `output` is managed by pytorch's cudagraph pool
                    output = self.runnable(*args, **kwargs)
                    if self.cudagraph_options.weak_ref_output:
                        # by converting it to weak ref,
                        # the original `output` will immediately be released
                        # to save memory. It is only safe to do this for
                        # the last graph in piecewise cuadgraph mode, because
                        # the output of the last graph will not be used by
                        # any other cuda graph.
                        output = weak_ref_tensors(output)

            # here we always use weak ref for the output
            # to save memory
            entry.output = weak_ref_tensors(output)
            entry.cudagraph = cudagraph

            compilation_counter.num_cudagraph_captured += 1

            # important: we need to return the output, rather than
            # the weak ref of the output, so that pytorch can correctly
            # manage the memory during cuda graph capture
            return output

        if self.is_debugging_mode:
            # check if the input addresses are the same
            new_input_addresses = [
                x.data_ptr() for x in args if isinstance(x, torch.Tensor)
            ]
            assert new_input_addresses == entry.input_addresses, (
                f"Input addresses for cudagraphs are different "
                f"during replay. Expected {entry.input_addresses}, "
                f"got {new_input_addresses}")

        entry.cudagraph.replay()
        return entry.output

compilation_config instance-attribute

compilation_config = compilation_config

concrete_cudagraph_entries instance-attribute

concrete_cudagraph_entries: dict[
    BatchDescriptor, CUDAGraphEntry
] = {}

cudagraph_options instance-attribute

cudagraph_options = cudagraph_options

first_run_finished instance-attribute

first_run_finished = False

graph_pool instance-attribute

graph_pool = get_global_graph_pool()

is_debugging_mode instance-attribute

is_debugging_mode = VLLM_LOGGING_LEVEL == 'DEBUG'

runnable instance-attribute

runnable = runnable

runtime_mode instance-attribute

runtime_mode = runtime_mode

vllm_config instance-attribute

vllm_config = vllm_config

__call__

__call__(*args, **kwargs)
Source code in vllm/compilation/cuda_graph.py
def __call__(self, *args, **kwargs):
    forward_context = get_forward_context()
    batch_descriptor = forward_context.batch_descriptor
    cudagraph_runtime_mode = forward_context.cudagraph_runtime_mode

    if cudagraph_runtime_mode == CUDAGraphMode.NONE or \
                        cudagraph_runtime_mode != self.runtime_mode:
        # CUDAGraphMode.NONE could mean the profile run, a warmup run, or
        # running without cudagraphs.
        # We do not trigger capture/replay if the runtime mode is not
        # matches. This enables properly dispatching to the correct
        # CUDAGraphWrapper when nesting multiple instances with different
        # runtime modes.
        return self.runnable(*args, **kwargs)

    if batch_descriptor not in self.concrete_cudagraph_entries:
        # create a new entry for this batch descriptor
        self.concrete_cudagraph_entries[batch_descriptor] = \
            CUDAGraphEntry(batch_descriptor=batch_descriptor)

    entry = self.concrete_cudagraph_entries[batch_descriptor]

    if entry.cudagraph is None:
        if self.cudagraph_options.debug_log_enable:
            # Since we capture cudagraph for many different shapes and
            # capturing is fast, we don't need to log it for every
            # shape. E.g. we only log it for the first subgraph in
            # piecewise mode.
            logger.debug("Capturing a cudagraph on (%s,%s)",
                         self.runtime_mode.name, entry.batch_descriptor)
        # validate that cudagraph capturing is legal at this point.
        validate_cudagraph_capturing_enabled()

        input_addresses = [
            x.data_ptr() for x in args if isinstance(x, torch.Tensor)
        ]
        entry.input_addresses = input_addresses
        cudagraph = torch.cuda.CUDAGraph()

        with ExitStack() as stack:
            if self.cudagraph_options.gc_disable:
                # during every model forward for piecewise cudagraph
                # mode, we will capture many pieces of cudagraphs
                # (roughly one per layer). running gc again and again
                # across layers will make the cudagraph capture very slow.
                # therefore, we only run gc for the first graph,
                # and disable gc for the rest of the graphs.
                stack.enter_context(patch("gc.collect", lambda: None))
                stack.enter_context(
                    patch("torch.cuda.empty_cache", lambda: None))

            # mind-exploding: carefully manage the reference and memory.
            with torch.cuda.graph(cudagraph, pool=self.graph_pool):
                # `output` is managed by pytorch's cudagraph pool
                output = self.runnable(*args, **kwargs)
                if self.cudagraph_options.weak_ref_output:
                    # by converting it to weak ref,
                    # the original `output` will immediately be released
                    # to save memory. It is only safe to do this for
                    # the last graph in piecewise cuadgraph mode, because
                    # the output of the last graph will not be used by
                    # any other cuda graph.
                    output = weak_ref_tensors(output)

        # here we always use weak ref for the output
        # to save memory
        entry.output = weak_ref_tensors(output)
        entry.cudagraph = cudagraph

        compilation_counter.num_cudagraph_captured += 1

        # important: we need to return the output, rather than
        # the weak ref of the output, so that pytorch can correctly
        # manage the memory during cuda graph capture
        return output

    if self.is_debugging_mode:
        # check if the input addresses are the same
        new_input_addresses = [
            x.data_ptr() for x in args if isinstance(x, torch.Tensor)
        ]
        assert new_input_addresses == entry.input_addresses, (
            f"Input addresses for cudagraphs are different "
            f"during replay. Expected {entry.input_addresses}, "
            f"got {new_input_addresses}")

    entry.cudagraph.replay()
    return entry.output

__getattr__

__getattr__(key: str)
Source code in vllm/compilation/cuda_graph.py
def __getattr__(self, key: str):
    # allow accessing the attributes of the runnable.
    if hasattr(self.runnable, key):
        return getattr(self.runnable, key)
    raise AttributeError(f"Attribute {key} not exists in the runnable of "
                         f"cudagraph wrapper: {self.runnable}")

__init__

__init__(
    runnable: Callable,
    vllm_config: VllmConfig,
    runtime_mode: CUDAGraphMode,
    cudagraph_options: Optional[CUDAGraphOptions] = None,
)
Source code in vllm/compilation/cuda_graph.py
def __init__(self,
             runnable: Callable,
             vllm_config: VllmConfig,
             runtime_mode: CUDAGraphMode,
             cudagraph_options: Optional[CUDAGraphOptions] = None):
    self.runnable = runnable
    self.vllm_config = vllm_config
    self.runtime_mode = runtime_mode
    self.compilation_config = vllm_config.compilation_config

    self.first_run_finished = False
    self.is_debugging_mode = envs.VLLM_LOGGING_LEVEL == "DEBUG"

    # assert runtime_mode is not NONE(no cudagraph), otherwise, we don't
    # need to initialize a CUDAGraphWrapper.
    assert self.runtime_mode != CUDAGraphMode.NONE
    # TODO: in the future, if we want to use multiple
    # streams, it might not be safe to share a global pool.
    # only investigate this when we use multiple streams
    self.graph_pool = current_platform.get_global_graph_pool()

    if cudagraph_options is None:
        cudagraph_options = CUDAGraphOptions()
    self.cudagraph_options = cudagraph_options
    # the entries for different batch descriptors that we need to capture
    # cudagraphs for.
    self.concrete_cudagraph_entries: dict[BatchDescriptor, CUDAGraphEntry]\
                                                                    = {}

unwrap

unwrap() -> Callable
Source code in vllm/compilation/cuda_graph.py
def unwrap(self) -> Callable:
    # in case we need to access the original runnable.
    return self.runnable