Skip to content

vllm.model_executor.models.deepseek_eagle

DeepseekV2Model

Bases: Module

Source code in vllm/model_executor/models/deepseek_eagle.py
@support_torch_compile
class DeepseekV2Model(nn.Module):

    def __init__(
        self,
        *,
        vllm_config: VllmConfig,
        prefix: str = "",
        start_layer_id: int = 0,
    ) -> None:
        super().__init__()
        self.config = vllm_config. \
            speculative_config.draft_model_config.hf_config
        model_config = vllm_config.model_config
        cache_config = vllm_config.cache_config
        quant_config = vllm_config.quant_config
        self.vocab_size = self.config.vocab_size

        self.embed_tokens = VocabParallelEmbedding(
            self.config.vocab_size,
            self.config.hidden_size,
            quant_config=quant_config,
            prefix=maybe_prefix(prefix, "embed_tokens"),
        )

        self.layers = nn.ModuleList([
            DeepseekV2DecoderLayer(
                self.config,
                prefix=maybe_prefix(prefix, f"layers.{i + start_layer_id}"),
                model_config=model_config,
                cache_config=cache_config,
                quant_config=quant_config,
            ) for i in range(self.config.num_hidden_layers)
        ])

        self.fc = nn.Linear(
            self.config.model.hidden_size * 2,
            self.config.model.hidden_size,
            bias=False,
        )

        self.enorm = RMSNorm(self.config.hidden_size,
                             eps=self.config.rms_norm_eps)
        self.hnorm = RMSNorm(self.config.hidden_size,
                             eps=self.config.rms_norm_eps)
        self.norm = RMSNorm(self.config.hidden_size,
                            eps=self.config.rms_norm_eps)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        input_embeds = self.embed_tokens(input_ids)

        inputs = torch.cat(
            [self.enorm(input_embeds),
             self.hnorm(hidden_states)], dim=-1)
        hidden_states = self.fc(inputs)
        residual = None
        for layer in self.layers:
            hidden_states, residual = layer(
                positions,
                hidden_states,
                residual,
            )
        hidden_states, _ = self.norm(hidden_states, residual)
        return hidden_states, hidden_states

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
            ("fused_qkv_a_proj", "q_a_proj", 0),
            ("fused_qkv_a_proj", "kv_a_proj_with_mqa", 1),
        ]

        # Params for weights, fp8 weight scales, fp8 activation scales
        # (param_name, weight_name, expert_id, shard_id)
        expert_params_mapping = FusedMoE.make_expert_params_mapping(
            ckpt_gate_proj_name="gate_proj",
            ckpt_down_proj_name="down_proj",
            ckpt_up_proj_name="up_proj",
            num_experts=self.config.n_routed_experts)

        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:
            if "rotary_emb.inv_freq" in name:
                continue

            for param_name, weight_name, shard_id in stacked_params_mapping:
                # Skip non-stacked layers and experts (experts handled below).
                if weight_name not in name:
                    continue
                # We have mlp.experts[0].gate_proj in the checkpoint.
                # Since we handle the experts below in expert_params_mapping,
                # we need to skip here BEFORE we update the name, otherwise
                # name will be updated to mlp.experts[0].gate_up_proj, which
                # will then be updated below in expert_params_mapping
                # for mlp.experts[0].gate_gate_up_proj, which breaks load.
                if ("mlp.experts." in name) and name not in params_dict:
                    continue
                name_mapped = name.replace(weight_name, param_name)

                # QKV fusion is optional, fall back to normal
                # weight loading if it's not enabled
                # if go with fusion option, then update name
                if ((param_name == "fused_qkv_a_proj")
                        and name_mapped not in params_dict):
                    continue
                else:
                    name = name_mapped

                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                for mapping in expert_params_mapping:
                    param_name, weight_name, expert_id, shard_id = mapping
                    if weight_name not in name:
                        continue
                    name = name.replace(weight_name, param_name)

                    param = params_dict[name]
                    weight_loader = param.weight_loader
                    weight_loader(
                        param,
                        loaded_weight,
                        name,
                        shard_id=shard_id,
                        expert_id=expert_id,
                    )
                    break
                else:
                    # if PP disabled then draft will share embed with target
                    if get_pp_group().world_size == 1 and \
                            "embed_tokens." in name:
                        continue

                    # Skip loading extra bias for GPTQ models.
                    if name.endswith(".bias") and name not in params_dict:
                        continue

                    # Remapping the name of FP8 kv-scale.
                    name = maybe_remap_kv_scale_name(name, params_dict)
                    if name is None:
                        continue

                    param = params_dict[name]
                    weight_loader = getattr(param, "weight_loader",
                                            default_weight_loader)
                    weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

config instance-attribute

config = hf_config

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size,
    hidden_size,
    quant_config=quant_config,
    prefix=maybe_prefix(prefix, "embed_tokens"),
)

enorm instance-attribute

enorm = RMSNorm(hidden_size, eps=rms_norm_eps)

fc instance-attribute

fc = Linear(hidden_size * 2, hidden_size, bias=False)

hnorm instance-attribute

hnorm = RMSNorm(hidden_size, eps=rms_norm_eps)

layers instance-attribute

layers = ModuleList(
    [
        (
            DeepseekV2DecoderLayer(
                config,
                prefix=maybe_prefix(
                    prefix, f"layers.{i + start_layer_id}"
                ),
                model_config=model_config,
                cache_config=cache_config,
                quant_config=quant_config,
            )
        )
        for i in (range(num_hidden_layers))
    ]
)

norm instance-attribute

norm = RMSNorm(hidden_size, eps=rms_norm_eps)

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    start_layer_id: int = 0,
) -> None
Source code in vllm/model_executor/models/deepseek_eagle.py
def __init__(
    self,
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    start_layer_id: int = 0,
) -> None:
    super().__init__()
    self.config = vllm_config. \
        speculative_config.draft_model_config.hf_config
    model_config = vllm_config.model_config
    cache_config = vllm_config.cache_config
    quant_config = vllm_config.quant_config
    self.vocab_size = self.config.vocab_size

    self.embed_tokens = VocabParallelEmbedding(
        self.config.vocab_size,
        self.config.hidden_size,
        quant_config=quant_config,
        prefix=maybe_prefix(prefix, "embed_tokens"),
    )

    self.layers = nn.ModuleList([
        DeepseekV2DecoderLayer(
            self.config,
            prefix=maybe_prefix(prefix, f"layers.{i + start_layer_id}"),
            model_config=model_config,
            cache_config=cache_config,
            quant_config=quant_config,
        ) for i in range(self.config.num_hidden_layers)
    ])

    self.fc = nn.Linear(
        self.config.model.hidden_size * 2,
        self.config.model.hidden_size,
        bias=False,
    )

    self.enorm = RMSNorm(self.config.hidden_size,
                         eps=self.config.rms_norm_eps)
    self.hnorm = RMSNorm(self.config.hidden_size,
                         eps=self.config.rms_norm_eps)
    self.norm = RMSNorm(self.config.hidden_size,
                        eps=self.config.rms_norm_eps)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/deepseek_eagle.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
    input_embeds = self.embed_tokens(input_ids)

    inputs = torch.cat(
        [self.enorm(input_embeds),
         self.hnorm(hidden_states)], dim=-1)
    hidden_states = self.fc(inputs)
    residual = None
    for layer in self.layers:
        hidden_states, residual = layer(
            positions,
            hidden_states,
            residual,
        )
    hidden_states, _ = self.norm(hidden_states, residual)
    return hidden_states, hidden_states

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/deepseek_eagle.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        ("gate_up_proj", "gate_proj", 0),
        ("gate_up_proj", "up_proj", 1),
        ("fused_qkv_a_proj", "q_a_proj", 0),
        ("fused_qkv_a_proj", "kv_a_proj_with_mqa", 1),
    ]

    # Params for weights, fp8 weight scales, fp8 activation scales
    # (param_name, weight_name, expert_id, shard_id)
    expert_params_mapping = FusedMoE.make_expert_params_mapping(
        ckpt_gate_proj_name="gate_proj",
        ckpt_down_proj_name="down_proj",
        ckpt_up_proj_name="up_proj",
        num_experts=self.config.n_routed_experts)

    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:
        if "rotary_emb.inv_freq" in name:
            continue

        for param_name, weight_name, shard_id in stacked_params_mapping:
            # Skip non-stacked layers and experts (experts handled below).
            if weight_name not in name:
                continue
            # We have mlp.experts[0].gate_proj in the checkpoint.
            # Since we handle the experts below in expert_params_mapping,
            # we need to skip here BEFORE we update the name, otherwise
            # name will be updated to mlp.experts[0].gate_up_proj, which
            # will then be updated below in expert_params_mapping
            # for mlp.experts[0].gate_gate_up_proj, which breaks load.
            if ("mlp.experts." in name) and name not in params_dict:
                continue
            name_mapped = name.replace(weight_name, param_name)

            # QKV fusion is optional, fall back to normal
            # weight loading if it's not enabled
            # if go with fusion option, then update name
            if ((param_name == "fused_qkv_a_proj")
                    and name_mapped not in params_dict):
                continue
            else:
                name = name_mapped

            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            for mapping in expert_params_mapping:
                param_name, weight_name, expert_id, shard_id = mapping
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(
                    param,
                    loaded_weight,
                    name,
                    shard_id=shard_id,
                    expert_id=expert_id,
                )
                break
            else:
                # if PP disabled then draft will share embed with target
                if get_pp_group().world_size == 1 and \
                        "embed_tokens." in name:
                    continue

                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue

                # Remapping the name of FP8 kv-scale.
                name = maybe_remap_kv_scale_name(name, params_dict)
                if name is None:
                    continue

                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

EagleDeepseekV3ForCausalLM

Bases: DeepseekV3ForCausalLM

Source code in vllm/model_executor/models/deepseek_eagle.py
class EagleDeepseekV3ForCausalLM(DeepseekV3ForCausalLM):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        nn.Module.__init__(self)
        self.config = vllm_config. \
            speculative_config.draft_model_config.hf_config
        quant_config = vllm_config.quant_config
        target_layer_num = vllm_config.model_config.get_num_layers(
            vllm_config.parallel_config)
        self.model = DeepseekV2Model(vllm_config=vllm_config,
                                     prefix="model",
                                     start_layer_id=target_layer_num)

        self.lm_head = ParallelLMHead(self.config.vocab_size,
                                      self.config.hidden_size,
                                      quant_config=quant_config)

        logit_scale = getattr(self.config, "logit_scale", 1.0)
        self.logits_processor = LogitsProcessor(self.config.vocab_size,
                                                scale=logit_scale)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        inputs_embeds: Optional[torch.Tensor] = None,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        if inputs_embeds is not None:
            raise NotImplementedError(
                f"{type(self).__name__} does not support multimodal inputs yet."
            )
        return self.model(input_ids, positions, hidden_states)

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[torch.Tensor]:
        logits = self.logits_processor(self.lm_head, hidden_states,
                                       sampling_metadata)
        return logits

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
        loader = AutoWeightsLoader(
            self,
            skip_prefixes=None,
        )

        model_weights = {}
        for name, loaded_weight in weights:
            if "lm_head" not in name:
                name = "model." + name
            model_weights[name] = loaded_weight
        loader.load_weights(model_weights.items())

config instance-attribute

config = hf_config

lm_head instance-attribute

lm_head = ParallelLMHead(
    vocab_size, hidden_size, quant_config=quant_config
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    vocab_size, scale=logit_scale
)

model instance-attribute

model = DeepseekV2Model(
    vllm_config=vllm_config,
    prefix="model",
    start_layer_id=target_layer_num,
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/deepseek_eagle.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    nn.Module.__init__(self)
    self.config = vllm_config. \
        speculative_config.draft_model_config.hf_config
    quant_config = vllm_config.quant_config
    target_layer_num = vllm_config.model_config.get_num_layers(
        vllm_config.parallel_config)
    self.model = DeepseekV2Model(vllm_config=vllm_config,
                                 prefix="model",
                                 start_layer_id=target_layer_num)

    self.lm_head = ParallelLMHead(self.config.vocab_size,
                                  self.config.hidden_size,
                                  quant_config=quant_config)

    logit_scale = getattr(self.config, "logit_scale", 1.0)
    self.logits_processor = LogitsProcessor(self.config.vocab_size,
                                            scale=logit_scale)

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/deepseek_eagle.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
    logits = self.logits_processor(self.lm_head, hidden_states,
                                   sampling_metadata)
    return logits

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    inputs_embeds: Optional[Tensor] = None,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/deepseek_eagle.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    inputs_embeds: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
    if inputs_embeds is not None:
        raise NotImplementedError(
            f"{type(self).__name__} does not support multimodal inputs yet."
        )
    return self.model(input_ids, positions, hidden_states)

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/deepseek_eagle.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
    loader = AutoWeightsLoader(
        self,
        skip_prefixes=None,
    )

    model_weights = {}
    for name, loaded_weight in weights:
        if "lm_head" not in name:
            name = "model." + name
        model_weights[name] = loaded_weight
    loader.load_weights(model_weights.items())