Skip to content

vllm.v1.attention.backends.mla.cutlass_mla

g_sm100_workspace module-attribute

g_sm100_workspace = SM100Workspace(128 * 1024 * 1024)

logger module-attribute

logger = init_logger(__name__)

CutlassMLABackend

Bases: MLACommonBackend

Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
class CutlassMLABackend(MLACommonBackend):

    @staticmethod
    def get_name() -> str:
        return "CUTLASS_MLA"

    @staticmethod
    def get_impl_cls() -> type["CutlassMLAImpl"]:
        return CutlassMLAImpl

    @staticmethod
    def get_builder_cls() -> type["CutlassMLAMetadataBuilder"]:
        return CutlassMLAMetadataBuilder

get_builder_cls staticmethod

get_builder_cls() -> type[CutlassMLAMetadataBuilder]
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
@staticmethod
def get_builder_cls() -> type["CutlassMLAMetadataBuilder"]:
    return CutlassMLAMetadataBuilder

get_impl_cls staticmethod

get_impl_cls() -> type[CutlassMLAImpl]
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
@staticmethod
def get_impl_cls() -> type["CutlassMLAImpl"]:
    return CutlassMLAImpl

get_name staticmethod

get_name() -> str
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
@staticmethod
def get_name() -> str:
    return "CUTLASS_MLA"

CutlassMLAImpl

Bases: MLACommonImpl[MLACommonMetadata]

Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
class CutlassMLAImpl(MLACommonImpl[MLACommonMetadata]):

    def __init__(
            self,
            num_heads: int,
            head_size: int,
            scale: float,
            num_kv_heads: int,
            alibi_slopes: Optional[list[float]],
            sliding_window: Optional[int],
            kv_cache_dtype: str,
            logits_soft_cap: Optional[float],
            attn_type: str,
            kv_sharing_target_layer_name: Optional[str],
            # MLA Specific Arguments
            **mla_args) -> None:
        super().__init__(num_heads, head_size, scale, num_kv_heads,
                         alibi_slopes, sliding_window, kv_cache_dtype,
                         logits_soft_cap, attn_type,
                         kv_sharing_target_layer_name, **mla_args)

        unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
        if any(unsupported_features):
            raise NotImplementedError(
                "CutlassMLAImpl does not support one of the following: "
                "alibi_slopes, sliding_window, logits_soft_cap")

        if attn_type != AttentionType.DECODER:
            raise NotImplementedError("Encoder self-attention and "
                                      "encoder/decoder cross-attention "
                                      "are not implemented for "
                                      "CutlassMLAImpl")

        if is_quantized_kv_cache(self.kv_cache_dtype):
            raise NotImplementedError(
                "CutlassMLA V1 with FP8 KV cache not yet supported")

        self._use_old_cutlass_mla = False
        force_old_cutlass = os.environ.get("FORCE_OLD_CUTLASS_MLA", None)
        if force_old_cutlass:
            logger.warning_once("Forcing old cutlass mla kernel")
            self._use_old_cutlass_mla = True

        # TODO: Currently, num_kv_splits is limited to 16 to avoid hanging
        #       issues. In case the code hangs, use:
        #       FORCE_NUM_KV_SPLITS=1
        force_num_kv_splits = os.environ.get("FORCE_NUM_KV_SPLITS", None)
        if force_num_kv_splits:
            logger.warning_once("Forcing num_kv_splits to %d",
                                int(force_num_kv_splits))
            self._num_kv_splits = int(force_num_kv_splits)
        else:
            self._num_kv_splits = -1  # => Auto-detect

        # Share workspace buffer across all executions
        self._workspace = g_sm100_workspace

    def _sm100_cutlass_mla_decode(
        self,
        q_nope: torch.Tensor,
        q_pe: torch.Tensor,
        kv_c_and_k_pe_cache: torch.Tensor,
        seq_lens: torch.Tensor,
        page_table: torch.Tensor,
        workspace: torch.Tensor,
        sm_scale: float,
        num_kv_splits: int,
    ) -> torch.Tensor:
        assert (q_nope.ndim == 3
                ), f"q_nope must be a 3D tensor, but got {q_nope.ndim}"
        assert (
            q_pe.ndim == 3), f"q_pe must be a 3D tensor, but got {q_pe.ndim}"
        assert (
            kv_c_and_k_pe_cache.ndim == 3
        ), "kv_c_and_k_pe_cache must be a 3D tensor, but got {}".format(
            kv_c_and_k_pe_cache.ndim)

        B_q, H, D_q_nope = q_nope.shape
        B_q_2, H_2, D_q_pe = q_pe.shape
        assert (B_q == B_q_2) and (H == H_2)

        _, PAGE_SIZE, D_ckv = kv_c_and_k_pe_cache.shape

        D_latent = 512
        D_rope = 64
        assert D_q_nope == D_latent
        assert D_q_pe == D_rope
        assert D_ckv == D_latent + D_rope

        MAX_HEADS = 128
        assert H <= MAX_HEADS, f"H must be <= {MAX_HEADS}, but got {H}"
        if H < MAX_HEADS:
            q_nope_padded = q_nope.new_empty((B_q, MAX_HEADS, D_q_nope))
            q_nope_padded[:, :H] = q_nope
            q_nope = q_nope_padded

            q_pe_padded = q_pe.new_empty((B_q, MAX_HEADS, D_q_pe))
            q_pe_padded[:, :H] = q_pe
            q_pe = q_pe_padded

        assert len(page_table.shape) == 2
        B_block_table, block_num = page_table.shape
        assert B_block_table == B_q
        assert (block_num
                > 0), f"block num must be greater than 0, got {block_num}"
        assert block_num % (128 / PAGE_SIZE) == 0

        # TODO(kaixih@nvidia): support fp8
        assert q_nope.dtype in (
            torch.float16,
            torch.bfloat16,
        ), f"q_nope.dtype needs to be fp16 or bf16 but got {q_nope.dtype}."
        assert q_nope.dtype == q_pe.dtype == kv_c_and_k_pe_cache.dtype
        assert (
            seq_lens.dtype == torch.int32
        ), f"seq_lens.dtype needs to be int32 but got {seq_lens.dtype}."
        assert (
            page_table.dtype == torch.int32
        ), f"page_table.dtype needs to be int32 but got {page_table.dtype}."

        out = q_nope.new_empty((B_q, MAX_HEADS, D_latent))

        ops.sm100_cutlass_mla_decode(
            out,
            q_nope,
            q_pe,
            kv_c_and_k_pe_cache,
            seq_lens,
            page_table,
            workspace,
            sm_scale,
            num_kv_splits,
        )
        return out[:, :H].contiguous()

    def _sm100_forward_decode(
        self,
        q_nope: torch.Tensor,
        q_pe: torch.Tensor,
        kv_c_and_k_pe_cache: torch.Tensor,
        attn_metadata: MLACommonMetadata,
    ) -> torch.Tensor:
        assert kv_c_and_k_pe_cache.numel() > 0
        assert attn_metadata.decode is not None

        if self.kv_cache_dtype.startswith("fp8"):
            raise NotImplementedError("FP8 Cutlass MLA not yet supported")

        # Adjust workspace size (if necessary)
        self._workspace.ensure_size(attn_metadata, self._num_kv_splits)

        # Run MLA
        # Clone q_nope and q_pe to make sure strides computation is correct.
        # TODO: Check if we really need it
        q_nope = q_nope.clone()
        q_pe = q_pe.clone()

        o = self._sm100_cutlass_mla_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
                                           attn_metadata.decode.seq_lens,
                                           attn_metadata.decode.block_table,
                                           self._workspace.get_buf(),
                                           self.scale, self._num_kv_splits)

        return self._v_up_proj(o)

    # TODO: Currently we leave it here only for backup in case something is
    #       wrong with the new SM100 CUTLASS MLA kernel
    def _old_forward_decode(
        self,
        q_nope: torch.Tensor,
        q_pe: torch.Tensor,
        kv_c_and_k_pe_cache: torch.Tensor,
        attn_metadata: MLACommonMetadata,
    ) -> torch.Tensor:
        assert kv_c_and_k_pe_cache.numel() > 0
        assert attn_metadata.decode is not None

        if self.kv_cache_dtype.startswith("fp8"):
            raise NotImplementedError("FP8 Cutlass MLA not yet supported")

        B = q_nope.shape[0]

        o = torch.empty((B, self.num_heads, self.kv_lora_rank),
                        dtype=q_nope.dtype,
                        device=q_nope.device)

        # Run MLA
        # Clone q_nope and q_pe to make sure strides computation is correct.
        q_nope = q_nope.clone()
        q_pe = q_pe.clone()

        ops.cutlass_mla_decode(o, q_nope, q_pe, kv_c_and_k_pe_cache,
                               attn_metadata.decode.seq_lens,
                               attn_metadata.decode.block_table, self.scale)

        return self._v_up_proj(o)

    def _forward_decode(
        self,
        q_nope: torch.Tensor,
        q_pe: torch.Tensor,
        kv_c_and_k_pe_cache: torch.Tensor,
        attn_metadata: MLACommonMetadata,
        layer: AttentionLayer,
    ) -> torch.Tensor:
        if self._use_old_cutlass_mla:
            # TODO: Remove the old cutlass MLA kernel after more extensive
            #       testing
            return self._old_forward_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
                                            attn_metadata)

        return self._sm100_forward_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
                                          attn_metadata)

_num_kv_splits instance-attribute

_num_kv_splits = int(force_num_kv_splits)

_use_old_cutlass_mla instance-attribute

_use_old_cutlass_mla = False

_workspace instance-attribute

_workspace = g_sm100_workspace

__init__

__init__(
    num_heads: int,
    head_size: int,
    scale: float,
    num_kv_heads: int,
    alibi_slopes: Optional[list[float]],
    sliding_window: Optional[int],
    kv_cache_dtype: str,
    logits_soft_cap: Optional[float],
    attn_type: str,
    kv_sharing_target_layer_name: Optional[str],
    **mla_args,
) -> None
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def __init__(
        self,
        num_heads: int,
        head_size: int,
        scale: float,
        num_kv_heads: int,
        alibi_slopes: Optional[list[float]],
        sliding_window: Optional[int],
        kv_cache_dtype: str,
        logits_soft_cap: Optional[float],
        attn_type: str,
        kv_sharing_target_layer_name: Optional[str],
        # MLA Specific Arguments
        **mla_args) -> None:
    super().__init__(num_heads, head_size, scale, num_kv_heads,
                     alibi_slopes, sliding_window, kv_cache_dtype,
                     logits_soft_cap, attn_type,
                     kv_sharing_target_layer_name, **mla_args)

    unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
    if any(unsupported_features):
        raise NotImplementedError(
            "CutlassMLAImpl does not support one of the following: "
            "alibi_slopes, sliding_window, logits_soft_cap")

    if attn_type != AttentionType.DECODER:
        raise NotImplementedError("Encoder self-attention and "
                                  "encoder/decoder cross-attention "
                                  "are not implemented for "
                                  "CutlassMLAImpl")

    if is_quantized_kv_cache(self.kv_cache_dtype):
        raise NotImplementedError(
            "CutlassMLA V1 with FP8 KV cache not yet supported")

    self._use_old_cutlass_mla = False
    force_old_cutlass = os.environ.get("FORCE_OLD_CUTLASS_MLA", None)
    if force_old_cutlass:
        logger.warning_once("Forcing old cutlass mla kernel")
        self._use_old_cutlass_mla = True

    # TODO: Currently, num_kv_splits is limited to 16 to avoid hanging
    #       issues. In case the code hangs, use:
    #       FORCE_NUM_KV_SPLITS=1
    force_num_kv_splits = os.environ.get("FORCE_NUM_KV_SPLITS", None)
    if force_num_kv_splits:
        logger.warning_once("Forcing num_kv_splits to %d",
                            int(force_num_kv_splits))
        self._num_kv_splits = int(force_num_kv_splits)
    else:
        self._num_kv_splits = -1  # => Auto-detect

    # Share workspace buffer across all executions
    self._workspace = g_sm100_workspace

_forward_decode

_forward_decode(
    q_nope: Tensor,
    q_pe: Tensor,
    kv_c_and_k_pe_cache: Tensor,
    attn_metadata: MLACommonMetadata,
    layer: AttentionLayer,
) -> Tensor
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def _forward_decode(
    self,
    q_nope: torch.Tensor,
    q_pe: torch.Tensor,
    kv_c_and_k_pe_cache: torch.Tensor,
    attn_metadata: MLACommonMetadata,
    layer: AttentionLayer,
) -> torch.Tensor:
    if self._use_old_cutlass_mla:
        # TODO: Remove the old cutlass MLA kernel after more extensive
        #       testing
        return self._old_forward_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
                                        attn_metadata)

    return self._sm100_forward_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
                                      attn_metadata)

_old_forward_decode

_old_forward_decode(
    q_nope: Tensor,
    q_pe: Tensor,
    kv_c_and_k_pe_cache: Tensor,
    attn_metadata: MLACommonMetadata,
) -> Tensor
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def _old_forward_decode(
    self,
    q_nope: torch.Tensor,
    q_pe: torch.Tensor,
    kv_c_and_k_pe_cache: torch.Tensor,
    attn_metadata: MLACommonMetadata,
) -> torch.Tensor:
    assert kv_c_and_k_pe_cache.numel() > 0
    assert attn_metadata.decode is not None

    if self.kv_cache_dtype.startswith("fp8"):
        raise NotImplementedError("FP8 Cutlass MLA not yet supported")

    B = q_nope.shape[0]

    o = torch.empty((B, self.num_heads, self.kv_lora_rank),
                    dtype=q_nope.dtype,
                    device=q_nope.device)

    # Run MLA
    # Clone q_nope and q_pe to make sure strides computation is correct.
    q_nope = q_nope.clone()
    q_pe = q_pe.clone()

    ops.cutlass_mla_decode(o, q_nope, q_pe, kv_c_and_k_pe_cache,
                           attn_metadata.decode.seq_lens,
                           attn_metadata.decode.block_table, self.scale)

    return self._v_up_proj(o)

_sm100_cutlass_mla_decode

_sm100_cutlass_mla_decode(
    q_nope: Tensor,
    q_pe: Tensor,
    kv_c_and_k_pe_cache: Tensor,
    seq_lens: Tensor,
    page_table: Tensor,
    workspace: Tensor,
    sm_scale: float,
    num_kv_splits: int,
) -> Tensor
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def _sm100_cutlass_mla_decode(
    self,
    q_nope: torch.Tensor,
    q_pe: torch.Tensor,
    kv_c_and_k_pe_cache: torch.Tensor,
    seq_lens: torch.Tensor,
    page_table: torch.Tensor,
    workspace: torch.Tensor,
    sm_scale: float,
    num_kv_splits: int,
) -> torch.Tensor:
    assert (q_nope.ndim == 3
            ), f"q_nope must be a 3D tensor, but got {q_nope.ndim}"
    assert (
        q_pe.ndim == 3), f"q_pe must be a 3D tensor, but got {q_pe.ndim}"
    assert (
        kv_c_and_k_pe_cache.ndim == 3
    ), "kv_c_and_k_pe_cache must be a 3D tensor, but got {}".format(
        kv_c_and_k_pe_cache.ndim)

    B_q, H, D_q_nope = q_nope.shape
    B_q_2, H_2, D_q_pe = q_pe.shape
    assert (B_q == B_q_2) and (H == H_2)

    _, PAGE_SIZE, D_ckv = kv_c_and_k_pe_cache.shape

    D_latent = 512
    D_rope = 64
    assert D_q_nope == D_latent
    assert D_q_pe == D_rope
    assert D_ckv == D_latent + D_rope

    MAX_HEADS = 128
    assert H <= MAX_HEADS, f"H must be <= {MAX_HEADS}, but got {H}"
    if H < MAX_HEADS:
        q_nope_padded = q_nope.new_empty((B_q, MAX_HEADS, D_q_nope))
        q_nope_padded[:, :H] = q_nope
        q_nope = q_nope_padded

        q_pe_padded = q_pe.new_empty((B_q, MAX_HEADS, D_q_pe))
        q_pe_padded[:, :H] = q_pe
        q_pe = q_pe_padded

    assert len(page_table.shape) == 2
    B_block_table, block_num = page_table.shape
    assert B_block_table == B_q
    assert (block_num
            > 0), f"block num must be greater than 0, got {block_num}"
    assert block_num % (128 / PAGE_SIZE) == 0

    # TODO(kaixih@nvidia): support fp8
    assert q_nope.dtype in (
        torch.float16,
        torch.bfloat16,
    ), f"q_nope.dtype needs to be fp16 or bf16 but got {q_nope.dtype}."
    assert q_nope.dtype == q_pe.dtype == kv_c_and_k_pe_cache.dtype
    assert (
        seq_lens.dtype == torch.int32
    ), f"seq_lens.dtype needs to be int32 but got {seq_lens.dtype}."
    assert (
        page_table.dtype == torch.int32
    ), f"page_table.dtype needs to be int32 but got {page_table.dtype}."

    out = q_nope.new_empty((B_q, MAX_HEADS, D_latent))

    ops.sm100_cutlass_mla_decode(
        out,
        q_nope,
        q_pe,
        kv_c_and_k_pe_cache,
        seq_lens,
        page_table,
        workspace,
        sm_scale,
        num_kv_splits,
    )
    return out[:, :H].contiguous()

_sm100_forward_decode

_sm100_forward_decode(
    q_nope: Tensor,
    q_pe: Tensor,
    kv_c_and_k_pe_cache: Tensor,
    attn_metadata: MLACommonMetadata,
) -> Tensor
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def _sm100_forward_decode(
    self,
    q_nope: torch.Tensor,
    q_pe: torch.Tensor,
    kv_c_and_k_pe_cache: torch.Tensor,
    attn_metadata: MLACommonMetadata,
) -> torch.Tensor:
    assert kv_c_and_k_pe_cache.numel() > 0
    assert attn_metadata.decode is not None

    if self.kv_cache_dtype.startswith("fp8"):
        raise NotImplementedError("FP8 Cutlass MLA not yet supported")

    # Adjust workspace size (if necessary)
    self._workspace.ensure_size(attn_metadata, self._num_kv_splits)

    # Run MLA
    # Clone q_nope and q_pe to make sure strides computation is correct.
    # TODO: Check if we really need it
    q_nope = q_nope.clone()
    q_pe = q_pe.clone()

    o = self._sm100_cutlass_mla_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
                                       attn_metadata.decode.seq_lens,
                                       attn_metadata.decode.block_table,
                                       self._workspace.get_buf(),
                                       self.scale, self._num_kv_splits)

    return self._v_up_proj(o)

CutlassMLAMetadataBuilder

Bases: MLACommonMetadataBuilder[MLACommonMetadata]

Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
class CutlassMLAMetadataBuilder(MLACommonMetadataBuilder[MLACommonMetadata]):
    # enable full CUDA Graph support for decode-only capture
    cudagraph_support: ClassVar[
        AttentionCGSupport] = AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE

cudagraph_support class-attribute

SM100Workspace

Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
class SM100Workspace:

    def __init__(self, initial_workspace_size):
        self._workspace_buf = torch.empty(initial_workspace_size,
                                          device="cuda",
                                          dtype=torch.uint8)

        self._block_size = 128  # Forced to 128

        # Pre-compute sm_count to avoid recomputing it. Use device 0 as a proxy
        # (assumes all devices are similar)
        properties = torch.cuda.get_device_properties(torch.device("cuda:0"))
        self._sm_count = properties.multi_processor_count

    def get_buf(self):
        return self._workspace_buf

    def ensure_size(self, attn_metadata: MLACommonMetadata,
                    num_kv_splits: int):
        batch_size = attn_metadata.num_reqs
        max_seq_len = attn_metadata.max_query_len

        workspace_size = ops.sm100_cutlass_mla_get_workspace_size(
            max_seq_len * self._block_size,
            batch_size,
            self._sm_count,
            num_kv_splits=num_kv_splits)

        if self._workspace_buf.shape[0] < workspace_size:
            self._workspace_buf.resize_(workspace_size)

_block_size instance-attribute

_block_size = 128

_sm_count instance-attribute

_sm_count = multi_processor_count

_workspace_buf instance-attribute

_workspace_buf = empty(
    initial_workspace_size, device="cuda", dtype=uint8
)

__init__

__init__(initial_workspace_size)
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def __init__(self, initial_workspace_size):
    self._workspace_buf = torch.empty(initial_workspace_size,
                                      device="cuda",
                                      dtype=torch.uint8)

    self._block_size = 128  # Forced to 128

    # Pre-compute sm_count to avoid recomputing it. Use device 0 as a proxy
    # (assumes all devices are similar)
    properties = torch.cuda.get_device_properties(torch.device("cuda:0"))
    self._sm_count = properties.multi_processor_count

ensure_size

ensure_size(
    attn_metadata: MLACommonMetadata, num_kv_splits: int
)
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def ensure_size(self, attn_metadata: MLACommonMetadata,
                num_kv_splits: int):
    batch_size = attn_metadata.num_reqs
    max_seq_len = attn_metadata.max_query_len

    workspace_size = ops.sm100_cutlass_mla_get_workspace_size(
        max_seq_len * self._block_size,
        batch_size,
        self._sm_count,
        num_kv_splits=num_kv_splits)

    if self._workspace_buf.shape[0] < workspace_size:
        self._workspace_buf.resize_(workspace_size)

get_buf

get_buf()
Source code in vllm/v1/attention/backends/mla/cutlass_mla.py
def get_buf(self):
    return self._workspace_buf