Skip to content

vllm.v1.attention.backends.mamba2_attn

Mamba2AttentionBackend

Bases: AttentionBackend

Source code in vllm/v1/attention/backends/mamba2_attn.py
class Mamba2AttentionBackend(AttentionBackend):

    @staticmethod
    def get_builder_cls() -> type["Mamba2AttentionMetadataBuilder"]:
        return Mamba2AttentionMetadataBuilder

get_builder_cls staticmethod

get_builder_cls() -> type[Mamba2AttentionMetadataBuilder]
Source code in vllm/v1/attention/backends/mamba2_attn.py
@staticmethod
def get_builder_cls() -> type["Mamba2AttentionMetadataBuilder"]:
    return Mamba2AttentionMetadataBuilder

Mamba2AttentionMetadata dataclass

Source code in vllm/v1/attention/backends/mamba2_attn.py
@dataclass
class Mamba2AttentionMetadata:
    num_prefills: int
    num_prefill_tokens: int
    num_decodes: int
    num_decode_tokens: int
    query_start_loc: torch.Tensor
    seq_lens: torch.Tensor

    prep_initial_states: bool
    chunk_size: int

    # The following tensors only contain prefill requests and will be None if
    # the batch has no prefill request.
    has_initial_states_p: Optional[torch.Tensor]
    seq_idx_p: Optional[torch.Tensor]
    chunk_indices_p: Optional[torch.Tensor]
    chunk_offsets_p: Optional[torch.Tensor]

    state_indices_tensor: torch.Tensor  # shape: [batch,]

    # The following attributes are for triton implementation of causal_conv1d
    nums_dict: Optional[dict] = None
    cu_seqlen: Optional[int] = None
    batch_ptr: Optional[torch.tensor] = None
    token_chunk_offset_ptr: Optional[torch.tensor] = None

batch_ptr class-attribute instance-attribute

batch_ptr: Optional[tensor] = None

chunk_indices_p instance-attribute

chunk_indices_p: Optional[Tensor]

chunk_offsets_p instance-attribute

chunk_offsets_p: Optional[Tensor]

chunk_size instance-attribute

chunk_size: int

cu_seqlen class-attribute instance-attribute

cu_seqlen: Optional[int] = None

has_initial_states_p instance-attribute

has_initial_states_p: Optional[Tensor]

num_decode_tokens instance-attribute

num_decode_tokens: int

num_decodes instance-attribute

num_decodes: int

num_prefill_tokens instance-attribute

num_prefill_tokens: int

num_prefills instance-attribute

num_prefills: int

nums_dict class-attribute instance-attribute

nums_dict: Optional[dict] = None

prep_initial_states instance-attribute

prep_initial_states: bool

query_start_loc instance-attribute

query_start_loc: Tensor

seq_idx_p instance-attribute

seq_idx_p: Optional[Tensor]

seq_lens instance-attribute

seq_lens: Tensor

state_indices_tensor instance-attribute

state_indices_tensor: Tensor

token_chunk_offset_ptr class-attribute instance-attribute

token_chunk_offset_ptr: Optional[tensor] = None

__init__

__init__(
    num_prefills: int,
    num_prefill_tokens: int,
    num_decodes: int,
    num_decode_tokens: int,
    query_start_loc: Tensor,
    seq_lens: Tensor,
    prep_initial_states: bool,
    chunk_size: int,
    has_initial_states_p: Optional[Tensor],
    seq_idx_p: Optional[Tensor],
    chunk_indices_p: Optional[Tensor],
    chunk_offsets_p: Optional[Tensor],
    state_indices_tensor: Tensor,
    nums_dict: Optional[dict] = None,
    cu_seqlen: Optional[int] = None,
    batch_ptr: Optional[tensor] = None,
    token_chunk_offset_ptr: Optional[tensor] = None,
) -> None

Mamba2AttentionMetadataBuilder

Bases: BaseMambaAttentionMetadataBuilder[Mamba2AttentionMetadata]

Source code in vllm/v1/attention/backends/mamba2_attn.py
class Mamba2AttentionMetadataBuilder(
        BaseMambaAttentionMetadataBuilder[Mamba2AttentionMetadata]):

    def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
                 vllm_config: VllmConfig, device: torch.device):
        super().__init__(kv_cache_spec, layer_names, vllm_config, device)
        self.chunk_size = vllm_config.model_config.get_mamba_chunk_size()
        assert self.chunk_size is not None, (
            "chunk_size needs to be set in the model config for Mamba2 models")

    def build(self,
              common_prefix_len: int,
              common_attn_metadata: CommonAttentionMetadata,
              fast_build: bool = False) -> Mamba2AttentionMetadata:
        num_reqs = common_attn_metadata.num_reqs
        query_start_loc = common_attn_metadata.query_start_loc
        seq_lens = common_attn_metadata.seq_lens

        seq_idx_p = None
        chunk_indices_p, chunk_offsets_p = None, None
        # Need flags to indicate if there are initial states
        # currently we really only support the FlashAttention backend
        has_initial_states_p = None
        prep_initial_states = False

        state_indices_tensor = common_attn_metadata.block_table_tensor[:, 0]

        num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
            split_decodes_and_prefills(common_attn_metadata,
                                       decode_threshold=1))

        # Compute seq_idx, chunk_indices and chunk_offsets for prefill only
        if num_prefills > 0:
            #[batch,]
            has_initial_states_cpu = (
                common_attn_metadata.
                num_computed_tokens_cpu[num_reqs - num_prefills:num_reqs] > 0)
            prep_initial_states = torch.any(has_initial_states_cpu).item()
            has_initial_states_p = has_initial_states_cpu.to(
                query_start_loc.device)

            query_start_loc_p = common_attn_metadata.query_start_loc[
                -num_prefills - 1:] - num_decode_tokens

            seq_idx_p = torch.repeat_interleave(torch.arange(
                num_prefills,
                dtype=torch.int32,
                device=query_start_loc_p.device),
                                                query_start_loc_p.diff(),
                                                output_size=num_prefill_tokens)
            seq_idx_p.unsqueeze_(0)

            # We compute metadata for chunked prefill once at the top level
            # model forward and reuse them in mamba layers. If not needed,
            # they will be ignored inside mamba kernels.
            if prep_initial_states:
                chunk_indices_p, chunk_offsets_p = (
                    _query_start_loc_to_chunk_indices_offsets(
                        query_start_loc_p, self.chunk_size,
                        num_prefill_tokens))

        elif num_decodes <= self.decode_cudagraph_max_bs:
            # Pad state tensor for CUDA graph
            num_input_tokens = self.vllm_config.pad_for_cudagraph(num_decodes)
            self.state_indices_tensor[:num_decodes].copy_(state_indices_tensor,
                                                          non_blocking=True)
            state_indices_tensor = self.state_indices_tensor[:num_input_tokens]
            state_indices_tensor[num_decodes:] = PAD_SLOT_ID

        attn_metadata = Mamba2AttentionMetadata(
            num_prefills=num_prefills,
            num_prefill_tokens=num_prefill_tokens,
            num_decodes=num_decodes,
            num_decode_tokens=num_decode_tokens,
            query_start_loc=query_start_loc,
            seq_lens=seq_lens,
            prep_initial_states=prep_initial_states,
            chunk_size=self.chunk_size,
            has_initial_states_p=has_initial_states_p,
            seq_idx_p=seq_idx_p,
            chunk_indices_p=chunk_indices_p,
            chunk_offsets_p=chunk_offsets_p,
            state_indices_tensor=state_indices_tensor,
        )
        return attn_metadata

chunk_size instance-attribute

chunk_size = get_mamba_chunk_size()

__init__

__init__(
    kv_cache_spec: AttentionSpec,
    layer_names: list[str],
    vllm_config: VllmConfig,
    device: device,
)
Source code in vllm/v1/attention/backends/mamba2_attn.py
def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
             vllm_config: VllmConfig, device: torch.device):
    super().__init__(kv_cache_spec, layer_names, vllm_config, device)
    self.chunk_size = vllm_config.model_config.get_mamba_chunk_size()
    assert self.chunk_size is not None, (
        "chunk_size needs to be set in the model config for Mamba2 models")

build

build(
    common_prefix_len: int,
    common_attn_metadata: CommonAttentionMetadata,
    fast_build: bool = False,
) -> Mamba2AttentionMetadata
Source code in vllm/v1/attention/backends/mamba2_attn.py
def build(self,
          common_prefix_len: int,
          common_attn_metadata: CommonAttentionMetadata,
          fast_build: bool = False) -> Mamba2AttentionMetadata:
    num_reqs = common_attn_metadata.num_reqs
    query_start_loc = common_attn_metadata.query_start_loc
    seq_lens = common_attn_metadata.seq_lens

    seq_idx_p = None
    chunk_indices_p, chunk_offsets_p = None, None
    # Need flags to indicate if there are initial states
    # currently we really only support the FlashAttention backend
    has_initial_states_p = None
    prep_initial_states = False

    state_indices_tensor = common_attn_metadata.block_table_tensor[:, 0]

    num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
        split_decodes_and_prefills(common_attn_metadata,
                                   decode_threshold=1))

    # Compute seq_idx, chunk_indices and chunk_offsets for prefill only
    if num_prefills > 0:
        #[batch,]
        has_initial_states_cpu = (
            common_attn_metadata.
            num_computed_tokens_cpu[num_reqs - num_prefills:num_reqs] > 0)
        prep_initial_states = torch.any(has_initial_states_cpu).item()
        has_initial_states_p = has_initial_states_cpu.to(
            query_start_loc.device)

        query_start_loc_p = common_attn_metadata.query_start_loc[
            -num_prefills - 1:] - num_decode_tokens

        seq_idx_p = torch.repeat_interleave(torch.arange(
            num_prefills,
            dtype=torch.int32,
            device=query_start_loc_p.device),
                                            query_start_loc_p.diff(),
                                            output_size=num_prefill_tokens)
        seq_idx_p.unsqueeze_(0)

        # We compute metadata for chunked prefill once at the top level
        # model forward and reuse them in mamba layers. If not needed,
        # they will be ignored inside mamba kernels.
        if prep_initial_states:
            chunk_indices_p, chunk_offsets_p = (
                _query_start_loc_to_chunk_indices_offsets(
                    query_start_loc_p, self.chunk_size,
                    num_prefill_tokens))

    elif num_decodes <= self.decode_cudagraph_max_bs:
        # Pad state tensor for CUDA graph
        num_input_tokens = self.vllm_config.pad_for_cudagraph(num_decodes)
        self.state_indices_tensor[:num_decodes].copy_(state_indices_tensor,
                                                      non_blocking=True)
        state_indices_tensor = self.state_indices_tensor[:num_input_tokens]
        state_indices_tensor[num_decodes:] = PAD_SLOT_ID

    attn_metadata = Mamba2AttentionMetadata(
        num_prefills=num_prefills,
        num_prefill_tokens=num_prefill_tokens,
        num_decodes=num_decodes,
        num_decode_tokens=num_decode_tokens,
        query_start_loc=query_start_loc,
        seq_lens=seq_lens,
        prep_initial_states=prep_initial_states,
        chunk_size=self.chunk_size,
        has_initial_states_p=has_initial_states_p,
        seq_idx_p=seq_idx_p,
        chunk_indices_p=chunk_indices_p,
        chunk_offsets_p=chunk_offsets_p,
        state_indices_tensor=state_indices_tensor,
    )
    return attn_metadata

_query_start_loc_to_chunk_indices_offsets

_query_start_loc_to_chunk_indices_offsets(
    query_start_loc: Tensor,
    chunk_size: int,
    total_seqlens: int,
)
Source code in vllm/v1/attention/backends/mamba2_attn.py
def _query_start_loc_to_chunk_indices_offsets(query_start_loc: torch.Tensor,
                                              chunk_size: int,
                                              total_seqlens: int):

    cu_seqlens = query_start_loc[1:]  # remove prepended 0

    # outputs will have length expansion of chunks that do not divide
    # chunk_size
    N = math.ceil(total_seqlens / chunk_size) + (cu_seqlens[:-1] % chunk_size
                                                 > 0).sum()
    chunk_indices = torch.arange(N,
                                 dtype=torch.int,
                                 device=query_start_loc.device)
    chunk_offsets = torch.zeros((N, ),
                                dtype=torch.int,
                                device=query_start_loc.device)

    p = 0  # num of insertions
    for s, e in zip(cu_seqlens[:-1], cu_seqlens[1:]):

        # if does not divide chunk_size, then there is one chunk insertion
        p += (s % chunk_size > 0)

        # get the dimensions
        # - the + 1 for _e is to shift the boundary by one chunk
        # - this shifting is not needed if chunk_size divides e
        _s, _e = s // chunk_size + p, e // chunk_size + p + (e % chunk_size
                                                             > 0)

        # adjust indices and offsets
        chunk_indices[_s:_e] -= p
        chunk_offsets[_s] = s % chunk_size

    return chunk_indices, chunk_offsets