Skip to content

vllm.model_executor.models.jina_vl

logger module-attribute

logger = init_logger(__name__)

JinaVLForSequenceClassification

Bases: Qwen2VLForConditionalGeneration, SupportsCrossEncoding, SupportsMultiModal, SupportsScoreTemplate

Source code in vllm/model_executor/models/jina_vl.py
@MULTIMODAL_REGISTRY.register_processor(JinaVLMultiModalProcessor,
                                        info=Qwen2VLProcessingInfo,
                                        dummy_inputs=Qwen2VLDummyInputsBuilder)
class JinaVLForSequenceClassification(Qwen2VLForConditionalGeneration,
                                      SupportsCrossEncoding,
                                      SupportsMultiModal,
                                      SupportsScoreTemplate):

    is_pooling_model = True
    weight_mapper = WeightsMapper(
        orig_to_new_prefix={
            "score.0.": "score.dense.",
            "score.2.": "score.out_proj.",
            # mapping for new names in checkpoint saved after transformers v4.52
            "model.language_model.": "language_model.model.",
            "visual.": "visual.",
            # mapping for original checkpoint
            "lm_head.": "language_model.lm_head.",
            "model.": "language_model.model.",
        })

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__(vllm_config=vllm_config,
                         prefix=maybe_prefix(prefix, "qwen2_vl"))
        config = vllm_config.model_config.hf_config
        pooler_config = vllm_config.model_config.pooler_config
        assert pooler_config is not None

        # logit bias for sigmoid normalization
        self.LOGIT_BIAS = 2.65

        self.score = JinaVLScorer(config)
        self.pooler = DispatchPooler({
            "encode":
            Pooler.for_encode(pooler_config),
            "classify":
            Pooler.for_classify(pooler_config, classifier=None),
            "score":
            Pooler.for_classify(pooler_config, classifier=None),
        })

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
        if modality.startswith("image"):
            return "<|vision_start|><|image_pad|><|vision_end|>"

        raise ValueError("Only image modality is supported")

    @classmethod
    def get_score_template(cls, query: str, document: str) -> Optional[str]:
        return f"**Document**:\n{document}\n**Query**:\n{query}"

    @classmethod
    def post_process_tokens(cls, prompt: TokensPrompt) -> None:

        # add score target token at the end of prompt tokens
        prompt['prompt_token_ids'].append(100)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        **kwargs: object,
    ) -> torch.Tensor:
        hidden_states = super().forward(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
            **kwargs,
        )

        logits = self.score(hidden_states) - self.LOGIT_BIAS
        return logits

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
        loader = AutoWeightsLoader(self)
        return loader.load_weights(weights, mapper=self.weight_mapper)

LOGIT_BIAS instance-attribute

LOGIT_BIAS = 2.65

is_pooling_model class-attribute instance-attribute

is_pooling_model = True

pooler instance-attribute

pooler = DispatchPooler(
    {
        "encode": for_encode(pooler_config),
        "classify": for_classify(
            pooler_config, classifier=None
        ),
        "score": for_classify(
            pooler_config, classifier=None
        ),
    }
)

score instance-attribute

weight_mapper class-attribute instance-attribute

weight_mapper = WeightsMapper(
    orig_to_new_prefix={
        "score.0.": "score.dense.",
        "score.2.": "score.out_proj.",
        "model.language_model.": "language_model.model.",
        "visual.": "visual.",
        "lm_head.": "language_model.lm_head.",
        "model.": "language_model.model.",
    }
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/jina_vl.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__(vllm_config=vllm_config,
                     prefix=maybe_prefix(prefix, "qwen2_vl"))
    config = vllm_config.model_config.hf_config
    pooler_config = vllm_config.model_config.pooler_config
    assert pooler_config is not None

    # logit bias for sigmoid normalization
    self.LOGIT_BIAS = 2.65

    self.score = JinaVLScorer(config)
    self.pooler = DispatchPooler({
        "encode":
        Pooler.for_encode(pooler_config),
        "classify":
        Pooler.for_classify(pooler_config, classifier=None),
        "score":
        Pooler.for_classify(pooler_config, classifier=None),
    })

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs: object,
) -> Tensor
Source code in vllm/model_executor/models/jina_vl.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    **kwargs: object,
) -> torch.Tensor:
    hidden_states = super().forward(
        input_ids=input_ids,
        positions=positions,
        intermediate_tensors=intermediate_tensors,
        inputs_embeds=inputs_embeds,
        **kwargs,
    )

    logits = self.score(hidden_states) - self.LOGIT_BIAS
    return logits

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> Optional[str]
Source code in vllm/model_executor/models/jina_vl.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
    if modality.startswith("image"):
        return "<|vision_start|><|image_pad|><|vision_end|>"

    raise ValueError("Only image modality is supported")

get_score_template classmethod

get_score_template(
    query: str, document: str
) -> Optional[str]
Source code in vllm/model_executor/models/jina_vl.py
@classmethod
def get_score_template(cls, query: str, document: str) -> Optional[str]:
    return f"**Document**:\n{document}\n**Query**:\n{query}"

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/jina_vl.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
    loader = AutoWeightsLoader(self)
    return loader.load_weights(weights, mapper=self.weight_mapper)

post_process_tokens classmethod

post_process_tokens(prompt: TokensPrompt) -> None
Source code in vllm/model_executor/models/jina_vl.py
@classmethod
def post_process_tokens(cls, prompt: TokensPrompt) -> None:

    # add score target token at the end of prompt tokens
    prompt['prompt_token_ids'].append(100)

JinaVLMultiModalProcessor

Bases: Qwen2VLMultiModalProcessor

Source code in vllm/model_executor/models/jina_vl.py
class JinaVLMultiModalProcessor(Qwen2VLMultiModalProcessor):

    def _call_hf_processor(
        self,
        prompt: str,
        mm_data: Mapping[str, object],
        mm_kwargs: Mapping[str, object],
        tok_kwargs: Mapping[str, object],
    ) -> BatchFeature:

        # NOTE: We should reverse the order of the mm_data because the
        # query prompt is placed after the document prompt in the score
        # template for JinaVLForRanking model, but in mm_data they are
        # stored in the opposite order (query first, then document).
        for _, value in mm_data.items():
            value.reverse()
        return super()._call_hf_processor(prompt, mm_data, mm_kwargs,
                                          tok_kwargs)

_call_hf_processor

_call_hf_processor(
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature
Source code in vllm/model_executor/models/jina_vl.py
def _call_hf_processor(
    self,
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature:

    # NOTE: We should reverse the order of the mm_data because the
    # query prompt is placed after the document prompt in the score
    # template for JinaVLForRanking model, but in mm_data they are
    # stored in the opposite order (query first, then document).
    for _, value in mm_data.items():
        value.reverse()
    return super()._call_hf_processor(prompt, mm_data, mm_kwargs,
                                      tok_kwargs)

JinaVLScorer

Bases: Module

Source code in vllm/model_executor/models/jina_vl.py
class JinaVLScorer(nn.Module):

    def __init__(self, config: PretrainedConfig):
        super().__init__()
        self.dense = ColumnParallelLinear(config.hidden_size,
                                          config.hidden_size,
                                          bias=True)
        self.out_proj = RowParallelLinear(config.hidden_size,
                                          config.num_labels,
                                          bias=True)

    def forward(self, x, **kwargs):
        x, _ = self.dense(x)
        x = torch.relu(x)
        x, _ = self.out_proj(x)
        return x

dense instance-attribute

dense = ColumnParallelLinear(
    hidden_size, hidden_size, bias=True
)

out_proj instance-attribute

out_proj = RowParallelLinear(
    hidden_size, num_labels, bias=True
)

__init__

__init__(config: PretrainedConfig)
Source code in vllm/model_executor/models/jina_vl.py
def __init__(self, config: PretrainedConfig):
    super().__init__()
    self.dense = ColumnParallelLinear(config.hidden_size,
                                      config.hidden_size,
                                      bias=True)
    self.out_proj = RowParallelLinear(config.hidden_size,
                                      config.num_labels,
                                      bias=True)

forward

forward(x, **kwargs)
Source code in vllm/model_executor/models/jina_vl.py
def forward(self, x, **kwargs):
    x, _ = self.dense(x)
    x = torch.relu(x)
    x, _ = self.out_proj(x)
    return x