Skip to content

vllm.model_executor.models.qwen2_rm

Inference-only Qwen2-RM model compatible with HuggingFace weights.

Qwen2ForProcessRewardModel

Bases: Qwen2RewardBaseModel

Source code in vllm/model_executor/models/qwen2_rm.py
@default_pooling_type("STEP")
class Qwen2ForProcessRewardModel(Qwen2RewardBaseModel):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        vllm_config.model_config.hf_config.num_labels = 2
        super().__init__(vllm_config=vllm_config, prefix=prefix)

        pooler_config = vllm_config.model_config.pooler_config
        assert pooler_config is not None

        self.pooler = DispatchPooler(
            {"encode": Pooler.for_encode(pooler_config)})

pooler instance-attribute

pooler = DispatchPooler(
    {"encode": for_encode(pooler_config)}
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen2_rm.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    vllm_config.model_config.hf_config.num_labels = 2
    super().__init__(vllm_config=vllm_config, prefix=prefix)

    pooler_config = vllm_config.model_config.pooler_config
    assert pooler_config is not None

    self.pooler = DispatchPooler(
        {"encode": Pooler.for_encode(pooler_config)})

Qwen2ForRewardModel

Bases: Qwen2RewardBaseModel

Source code in vllm/model_executor/models/qwen2_rm.py
@default_pooling_type("ALL")
class Qwen2ForRewardModel(Qwen2RewardBaseModel):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        vllm_config.model_config.hf_config.num_labels = 1
        super().__init__(vllm_config=vllm_config, prefix=prefix)

        pooler_config = vllm_config.model_config.pooler_config
        assert pooler_config is not None

        self.pooler = DispatchPooler(
            {"encode": Pooler.for_encode(pooler_config)}, )

pooler instance-attribute

pooler = DispatchPooler(
    {"encode": for_encode(pooler_config)}
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen2_rm.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    vllm_config.model_config.hf_config.num_labels = 1
    super().__init__(vllm_config=vllm_config, prefix=prefix)

    pooler_config = vllm_config.model_config.pooler_config
    assert pooler_config is not None

    self.pooler = DispatchPooler(
        {"encode": Pooler.for_encode(pooler_config)}, )

Qwen2RewardBaseModel

Bases: Module, SupportsLoRA, SupportsPP

Source code in vllm/model_executor/models/qwen2_rm.py
class Qwen2RewardBaseModel(nn.Module, SupportsLoRA, SupportsPP):

    is_pooling_model = True
    pooler: Pooler

    packed_modules_mapping = {
        "qkv_proj": [
            "q_proj",
            "k_proj",
            "v_proj",
        ],
        "gate_up_proj": [
            "gate_proj",
            "up_proj",
        ],
    }

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config
        lora_config = vllm_config.lora_config

        self.config = config
        self.lora_config = lora_config

        self.quant_config = quant_config
        self.model = Qwen2Model(vllm_config=vllm_config,
                                prefix=maybe_prefix(prefix, "model"))

        self.score = nn.Sequential(
            ColumnParallelLinear(config.hidden_size,
                                 config.hidden_size,
                                 quant_config=quant_config,
                                 return_bias=False),
            nn.ReLU(),
            RowParallelLinear(config.hidden_size,
                              config.num_labels,
                              quant_config=quant_config,
                              return_bias=False),
        )
        self.make_empty_intermediate_tensors = (
            self.model.make_empty_intermediate_tensors)

    def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
        return self.model.get_input_embeddings(input_ids)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
    ) -> Union[torch.Tensor, IntermediateTensors]:
        hidden_states = self.model(input_ids, positions, intermediate_tensors,
                                   inputs_embeds)
        logits = self.score(hidden_states)
        return logits

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        loader = AutoWeightsLoader(self,
                                   ignore_unexpected_prefixes=["lm_head."])
        return loader.load_weights(weights)

config instance-attribute

config = config

is_pooling_model class-attribute instance-attribute

is_pooling_model = True

lora_config instance-attribute

lora_config = lora_config

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors
)

model instance-attribute

model = Qwen2Model(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "model"),
)

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "qkv_proj": ["q_proj", "k_proj", "v_proj"],
    "gate_up_proj": ["gate_proj", "up_proj"],
}

pooler instance-attribute

pooler: Pooler

quant_config instance-attribute

quant_config = quant_config

score instance-attribute

score = Sequential(
    ColumnParallelLinear(
        hidden_size,
        hidden_size,
        quant_config=quant_config,
        return_bias=False,
    ),
    ReLU(),
    RowParallelLinear(
        hidden_size,
        num_labels,
        quant_config=quant_config,
        return_bias=False,
    ),
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen2_rm.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config
    lora_config = vllm_config.lora_config

    self.config = config
    self.lora_config = lora_config

    self.quant_config = quant_config
    self.model = Qwen2Model(vllm_config=vllm_config,
                            prefix=maybe_prefix(prefix, "model"))

    self.score = nn.Sequential(
        ColumnParallelLinear(config.hidden_size,
                             config.hidden_size,
                             quant_config=quant_config,
                             return_bias=False),
        nn.ReLU(),
        RowParallelLinear(config.hidden_size,
                          config.num_labels,
                          quant_config=quant_config,
                          return_bias=False),
    )
    self.make_empty_intermediate_tensors = (
        self.model.make_empty_intermediate_tensors)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
) -> Union[Tensor, IntermediateTensors]
Source code in vllm/model_executor/models/qwen2_rm.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
    hidden_states = self.model(input_ids, positions, intermediate_tensors,
                               inputs_embeds)
    logits = self.score(hidden_states)
    return logits

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_rm.py
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
    return self.model.get_input_embeddings(input_ids)

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen2_rm.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    loader = AutoWeightsLoader(self,
                               ignore_unexpected_prefixes=["lm_head."])
    return loader.load_weights(weights)