Skip to content

vllm.model_executor.layers.pooler

ClassifierFn module-attribute

ClassifierFn = Callable[[Tensor], Tensor]

PoolingFn module-attribute

PoolingMetadata module-attribute

_T module-attribute

_T = TypeVar('_T', Tensor, list[Tensor])

AllPool

Bases: PoolingMethod

Source code in vllm/model_executor/layers/pooler.py
class AllPool(PoolingMethod):

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return {"encode"}

    def forward_all(
        self,
        hidden_states: torch.Tensor,
        pooling_cursor: PoolingCursor,
    ) -> Union[list[torch.Tensor], torch.Tensor]:

        assert not pooling_cursor.is_partial_prefill(), \
            "partial prefill not supported with ALL pooling"

        hidden_states_lst = list(
            hidden_states.split(
                pooling_cursor.num_scheduled_tokens_cpu.tolist()))
        return [hidden_states_lst[i] for i in pooling_cursor.index]

forward_all

forward_all(
    hidden_states: Tensor, pooling_cursor: PoolingCursor
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
def forward_all(
    self,
    hidden_states: torch.Tensor,
    pooling_cursor: PoolingCursor,
) -> Union[list[torch.Tensor], torch.Tensor]:

    assert not pooling_cursor.is_partial_prefill(), \
        "partial prefill not supported with ALL pooling"

    hidden_states_lst = list(
        hidden_states.split(
            pooling_cursor.num_scheduled_tokens_cpu.tolist()))
    return [hidden_states_lst[i] for i in pooling_cursor.index]

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return {"encode"}

BasePoolerActivation

Bases: Module, ABC

Source code in vllm/model_executor/layers/pooler.py
class BasePoolerActivation(nn.Module, ABC):

    @abstractmethod
    def forward(self, pooled_data: _T) -> _T:
        # shape:
        # classify (& score) -> (batch_size, num_classes)
        # embed -> (batch_size, embedding_dim) or list(embedding_dim)
        #          (batch_size, dimensions) or list(dimensions) if using MRL
        raise NotImplementedError

forward abstractmethod

forward(pooled_data: _T) -> _T
Source code in vllm/model_executor/layers/pooler.py
@abstractmethod
def forward(self, pooled_data: _T) -> _T:
    # shape:
    # classify (& score) -> (batch_size, num_classes)
    # embed -> (batch_size, embedding_dim) or list(embedding_dim)
    #          (batch_size, dimensions) or list(dimensions) if using MRL
    raise NotImplementedError

CLSPool

Bases: PoolingMethod

Source code in vllm/model_executor/layers/pooler.py
class CLSPool(PoolingMethod):

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return {"encode", "embed", "classify", "score"}

    def forward_all(
        self,
        hidden_states: torch.Tensor,
        pooling_cursor: PoolingCursor,
    ) -> Union[list[torch.Tensor], torch.Tensor]:
        assert not pooling_cursor.is_partial_prefill(), \
            "partial prefill not supported with CLS pooling"

        return hidden_states[pooling_cursor.first_token_indices_gpu]

forward_all

forward_all(
    hidden_states: Tensor, pooling_cursor: PoolingCursor
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
def forward_all(
    self,
    hidden_states: torch.Tensor,
    pooling_cursor: PoolingCursor,
) -> Union[list[torch.Tensor], torch.Tensor]:
    assert not pooling_cursor.is_partial_prefill(), \
        "partial prefill not supported with CLS pooling"

    return hidden_states[pooling_cursor.first_token_indices_gpu]

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return {"encode", "embed", "classify", "score"}

ClassifierPooler

Bases: Pooler

A pooling layer for classification tasks.

This layer does the following: 1. Applies a classification layer to the hidden states. 2. Optionally applies a pooler layer. 3. Applies an activation function to the output.

Source code in vllm/model_executor/layers/pooler.py
class ClassifierPooler(Pooler):
    """A pooling layer for classification tasks.

    This layer does the following:
    1. Applies a classification layer to the hidden states.
    2. Optionally applies a pooler layer.
    3. Applies an activation function to the output.
    """

    @staticmethod
    def act_fn_for_seq_cls(config: ModelConfig):
        return get_classification_activation_function(config.hf_config)

    @staticmethod
    def act_fn_for_cross_encoder(config: ModelConfig):
        return get_cross_encoder_activation_function(config.hf_config)

    def __init__(
        self,
        pooling: PoolingFn,
        classifier: Optional[ClassifierFn],
        act_fn: Optional[PoolerActivation] = None,
    ) -> None:
        super().__init__()

        self.pooling = pooling
        self.classifier = classifier
        self.act_fn = act_fn or PoolerClassify()

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return {"classify", "score"}

    def forward(
        self,
        hidden_states: Union[torch.Tensor, list[torch.Tensor]],
        pooling_metadata: PoolingMetadata,
    ) -> PoolerOutput:
        pooled_data = self.pooling(hidden_states, pooling_metadata)

        if isinstance(pooled_data, list):
            pooled_data = torch.stack(pooled_data)
        # pooled_data shape: [batchsize, hidden_size]

        if self.classifier is not None:
            # apply classifier once on the full batch if possible
            if isinstance(pooled_data, torch.Tensor):
                pooled_data = self.classifier(pooled_data)
            elif len({data.shape for data in pooled_data}) <= 1:
                pooled_data = self.classifier(torch.stack(pooled_data))
            else:
                pooled_data = [self.classifier(data) for data in pooled_data]

        pooling_params = get_pooling_params(pooling_metadata)
        flags = [p.activation for p in pooling_params]

        if len(set(flags)) == 1:
            scores = self.act_fn(pooled_data) if flags[0] else pooled_data
        else:
            scores = [
                self.act_fn(vecs) if f else vecs
                for vecs, f in zip(pooled_data, flags)
            ]

        return build_output(scores)

act_fn instance-attribute

act_fn = act_fn or PoolerClassify()

classifier instance-attribute

classifier = classifier

pooling instance-attribute

pooling = pooling

__init__

__init__(
    pooling: PoolingFn,
    classifier: Optional[ClassifierFn],
    act_fn: Optional[PoolerActivation] = None,
) -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(
    self,
    pooling: PoolingFn,
    classifier: Optional[ClassifierFn],
    act_fn: Optional[PoolerActivation] = None,
) -> None:
    super().__init__()

    self.pooling = pooling
    self.classifier = classifier
    self.act_fn = act_fn or PoolerClassify()

act_fn_for_cross_encoder staticmethod

act_fn_for_cross_encoder(config: ModelConfig)
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def act_fn_for_cross_encoder(config: ModelConfig):
    return get_cross_encoder_activation_function(config.hf_config)

act_fn_for_seq_cls staticmethod

act_fn_for_seq_cls(config: ModelConfig)
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def act_fn_for_seq_cls(config: ModelConfig):
    return get_classification_activation_function(config.hf_config)

forward

forward(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput
Source code in vllm/model_executor/layers/pooler.py
def forward(
    self,
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
    pooled_data = self.pooling(hidden_states, pooling_metadata)

    if isinstance(pooled_data, list):
        pooled_data = torch.stack(pooled_data)
    # pooled_data shape: [batchsize, hidden_size]

    if self.classifier is not None:
        # apply classifier once on the full batch if possible
        if isinstance(pooled_data, torch.Tensor):
            pooled_data = self.classifier(pooled_data)
        elif len({data.shape for data in pooled_data}) <= 1:
            pooled_data = self.classifier(torch.stack(pooled_data))
        else:
            pooled_data = [self.classifier(data) for data in pooled_data]

    pooling_params = get_pooling_params(pooling_metadata)
    flags = [p.activation for p in pooling_params]

    if len(set(flags)) == 1:
        scores = self.act_fn(pooled_data) if flags[0] else pooled_data
    else:
        scores = [
            self.act_fn(vecs) if f else vecs
            for vecs, f in zip(pooled_data, flags)
        ]

    return build_output(scores)

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return {"classify", "score"}

DispatchPooler

Bases: Pooler

Dispatches calls to a sub-pooler based on the pooling task.

Source code in vllm/model_executor/layers/pooler.py
class DispatchPooler(Pooler):
    """Dispatches calls to a sub-pooler based on the pooling task."""

    def __init__(self, poolers_by_task: Mapping[PoolingTask, Pooler]) -> None:
        super().__init__()

        for task, pooler in poolers_by_task.items():
            if task not in pooler.get_supported_tasks():
                raise ValueError(
                    f"{pooler=} does not support {task=}. "
                    f"Supported tasks: {pooler.get_supported_tasks()}")

        self.poolers_by_task = poolers_by_task

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return set(self.poolers_by_task)

    def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
        return self.poolers_by_task[task].get_pooling_updates(task)

    def forward(
        self,
        hidden_states: Union[torch.Tensor, list[torch.Tensor]],
        pooling_metadata: PoolingMetadata,
    ) -> PoolerOutput:
        poolers_by_task = self.poolers_by_task

        outputs = list[PoolingSequenceGroupOutput]()
        offset = 0
        for task, group in groupby(get_tasks(pooling_metadata)):
            if not (pooler := poolers_by_task.get(task)):
                raise ValueError(
                    f"Unsupported task: {task} "
                    f"Supported tasks: {self.get_supported_tasks()}")

            num_items = len(list(group))
            group_output: PoolerOutput = pooler(
                hidden_states,
                pooling_metadata[offset:offset + num_items],
            )

            outputs.extend(group_output.outputs)
            offset += num_items

        return PoolerOutput(outputs)

poolers_by_task instance-attribute

poolers_by_task = poolers_by_task

__init__

__init__(
    poolers_by_task: Mapping[PoolingTask, Pooler],
) -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(self, poolers_by_task: Mapping[PoolingTask, Pooler]) -> None:
    super().__init__()

    for task, pooler in poolers_by_task.items():
        if task not in pooler.get_supported_tasks():
            raise ValueError(
                f"{pooler=} does not support {task=}. "
                f"Supported tasks: {pooler.get_supported_tasks()}")

    self.poolers_by_task = poolers_by_task

forward

forward(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput
Source code in vllm/model_executor/layers/pooler.py
def forward(
    self,
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
    poolers_by_task = self.poolers_by_task

    outputs = list[PoolingSequenceGroupOutput]()
    offset = 0
    for task, group in groupby(get_tasks(pooling_metadata)):
        if not (pooler := poolers_by_task.get(task)):
            raise ValueError(
                f"Unsupported task: {task} "
                f"Supported tasks: {self.get_supported_tasks()}")

        num_items = len(list(group))
        group_output: PoolerOutput = pooler(
            hidden_states,
            pooling_metadata[offset:offset + num_items],
        )

        outputs.extend(group_output.outputs)
        offset += num_items

    return PoolerOutput(outputs)

get_pooling_updates

get_pooling_updates(
    task: PoolingTask,
) -> PoolingParamsUpdate
Source code in vllm/model_executor/layers/pooler.py
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
    return self.poolers_by_task[task].get_pooling_updates(task)

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return set(self.poolers_by_task)

EmbeddingPoolerHead

Bases: PoolerHead

Source code in vllm/model_executor/layers/pooler.py
class EmbeddingPoolerHead(PoolerHead):

    def __init__(self) -> None:
        super().__init__(activation=PoolerNormalize())

        # Load ST projector if available
        from vllm.config import get_current_vllm_config
        from vllm.model_executor.models.adapters import _load_st_projector

        vllm_config = get_current_vllm_config()
        self.projector = _load_st_projector(
            vllm_config.model_config) if vllm_config else None

    def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor],
                pooling_metadata: PoolingMetadata):

        # Apply ST projector
        if self.projector is not None:
            projector = cast(nn.Module, self.projector)

            def _proj(x: torch.Tensor) -> torch.Tensor:
                orig_dtype = x.dtype
                y = projector(x.to(torch.float32))
                return y.to(orig_dtype)

            if isinstance(pooled_data, torch.Tensor):
                pooled_data = _proj(pooled_data)
            else:
                pooled_data = [_proj(t) for t in pooled_data]

        pooling_params = get_pooling_params(pooling_metadata)

        if isinstance(pooled_data, list):
            pooled_data = torch.stack(pooled_data)
        # pooled_data shape: [batchsize, embedding_dimension]

        # for matryoshka representation
        dimensions_list = [
            pooling_param.dimensions for pooling_param in pooling_params
        ]
        if any(d is not None for d in dimensions_list):
            # change the output dimension
            assert len(pooled_data) == len(dimensions_list)
            if len(set(dimensions_list)) == 1 and not isinstance(
                    pooled_data, list):
                # if all dimensions are the same
                d = dimensions_list[0]
                pooled_data = pooled_data[..., :d]
            else:
                pooled_data = [
                    vecs if d is None else vecs[..., :d]
                    for vecs, d in zip(pooled_data, dimensions_list)
                ]

        # for normalize
        flags = [p.normalize for p in pooling_params]
        if len(set(flags)) == 1:
            if flags[0]:
                pooled_data = self.activation(pooled_data)
        else:
            pooled_data = [
                self.activation(vecs) if f else vecs
                for vecs, f in zip(pooled_data, flags)
            ]

        return pooled_data

projector instance-attribute

projector = (
    _load_st_projector(model_config)
    if vllm_config
    else None
)

__init__

__init__() -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(self) -> None:
    super().__init__(activation=PoolerNormalize())

    # Load ST projector if available
    from vllm.config import get_current_vllm_config
    from vllm.model_executor.models.adapters import _load_st_projector

    vllm_config = get_current_vllm_config()
    self.projector = _load_st_projector(
        vllm_config.model_config) if vllm_config else None

forward

forward(
    pooled_data: Union[list[Tensor], Tensor],
    pooling_metadata: PoolingMetadata,
)
Source code in vllm/model_executor/layers/pooler.py
def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor],
            pooling_metadata: PoolingMetadata):

    # Apply ST projector
    if self.projector is not None:
        projector = cast(nn.Module, self.projector)

        def _proj(x: torch.Tensor) -> torch.Tensor:
            orig_dtype = x.dtype
            y = projector(x.to(torch.float32))
            return y.to(orig_dtype)

        if isinstance(pooled_data, torch.Tensor):
            pooled_data = _proj(pooled_data)
        else:
            pooled_data = [_proj(t) for t in pooled_data]

    pooling_params = get_pooling_params(pooling_metadata)

    if isinstance(pooled_data, list):
        pooled_data = torch.stack(pooled_data)
    # pooled_data shape: [batchsize, embedding_dimension]

    # for matryoshka representation
    dimensions_list = [
        pooling_param.dimensions for pooling_param in pooling_params
    ]
    if any(d is not None for d in dimensions_list):
        # change the output dimension
        assert len(pooled_data) == len(dimensions_list)
        if len(set(dimensions_list)) == 1 and not isinstance(
                pooled_data, list):
            # if all dimensions are the same
            d = dimensions_list[0]
            pooled_data = pooled_data[..., :d]
        else:
            pooled_data = [
                vecs if d is None else vecs[..., :d]
                for vecs, d in zip(pooled_data, dimensions_list)
            ]

    # for normalize
    flags = [p.normalize for p in pooling_params]
    if len(set(flags)) == 1:
        if flags[0]:
            pooled_data = self.activation(pooled_data)
    else:
        pooled_data = [
            self.activation(vecs) if f else vecs
            for vecs, f in zip(pooled_data, flags)
        ]

    return pooled_data

LambdaPoolerActivation

Bases: PoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class LambdaPoolerActivation(PoolerActivation):

    def __init__(self, fn: Callable[[torch.Tensor], torch.Tensor]):
        super().__init__()

        self.fn = fn

    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        return self.fn(pooled_data)

fn instance-attribute

fn = fn

__init__

__init__(fn: Callable[[Tensor], Tensor])
Source code in vllm/model_executor/layers/pooler.py
def __init__(self, fn: Callable[[torch.Tensor], torch.Tensor]):
    super().__init__()

    self.fn = fn

forward_chunk

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    return self.fn(pooled_data)

LastPool

Bases: PoolingMethod

Source code in vllm/model_executor/layers/pooler.py
class LastPool(PoolingMethod):

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return {"encode", "embed", "classify", "score"}

    def forward_all(
        self,
        hidden_states: torch.Tensor,
        pooling_cursor: PoolingCursor,
    ) -> Union[list[torch.Tensor], torch.Tensor]:
        return hidden_states[pooling_cursor.last_token_indices_gpu]

forward_all

forward_all(
    hidden_states: Tensor, pooling_cursor: PoolingCursor
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
def forward_all(
    self,
    hidden_states: torch.Tensor,
    pooling_cursor: PoolingCursor,
) -> Union[list[torch.Tensor], torch.Tensor]:
    return hidden_states[pooling_cursor.last_token_indices_gpu]

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return {"encode", "embed", "classify", "score"}

MeanPool

Bases: PoolingMethod

Source code in vllm/model_executor/layers/pooler.py
class MeanPool(PoolingMethod):

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return {"encode", "embed", "classify", "score"}

    def forward_all(
        self,
        hidden_states: torch.Tensor,
        pooling_cursor: PoolingCursor,
    ) -> Union[list[torch.Tensor], torch.Tensor]:

        assert not pooling_cursor.is_partial_prefill(), \
            "partial prefill not supported with MEAN pooling"

        prompt_lens = pooling_cursor.prompt_lens_cpu.to(hidden_states.device,
                                                        non_blocking=True)

        # Use float32 for torch.cumsum in MeanPool,
        # otherwise precision will be lost significantly.
        cumsum = torch.cumsum(hidden_states, dim=0, dtype=torch.float32)

        start_indices = pooling_cursor.first_token_indices_gpu
        end_indices = pooling_cursor.last_token_indices_gpu
        return (cumsum[end_indices] - cumsum[start_indices] +
                hidden_states[start_indices]) / prompt_lens.unsqueeze(1)

forward_all

forward_all(
    hidden_states: Tensor, pooling_cursor: PoolingCursor
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
def forward_all(
    self,
    hidden_states: torch.Tensor,
    pooling_cursor: PoolingCursor,
) -> Union[list[torch.Tensor], torch.Tensor]:

    assert not pooling_cursor.is_partial_prefill(), \
        "partial prefill not supported with MEAN pooling"

    prompt_lens = pooling_cursor.prompt_lens_cpu.to(hidden_states.device,
                                                    non_blocking=True)

    # Use float32 for torch.cumsum in MeanPool,
    # otherwise precision will be lost significantly.
    cumsum = torch.cumsum(hidden_states, dim=0, dtype=torch.float32)

    start_indices = pooling_cursor.first_token_indices_gpu
    end_indices = pooling_cursor.last_token_indices_gpu
    return (cumsum[end_indices] - cumsum[start_indices] +
            hidden_states[start_indices]) / prompt_lens.unsqueeze(1)

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return {"encode", "embed", "classify", "score"}

Pooler

Bases: Module, ABC

The interface required for all poolers used in pooling models in vLLM.

Source code in vllm/model_executor/layers/pooler.py
class Pooler(nn.Module, ABC):
    """The interface required for all poolers used in pooling models in vLLM."""

    @staticmethod
    def for_encode(pooler_config: PoolerConfig):
        if pooler_config.pooling_type == "STEP":
            return StepPooler()

        resolved_config = ResolvedPoolingConfig(task="encode",
                                                pooling_type=PoolingType.ALL)

        return SimplePooler.from_config(resolved_config)

    @staticmethod
    def for_embed(pooler_config: PoolerConfig):
        resolved_config = ResolvedPoolingConfig.from_config(
            task="embed",
            pooler_config=pooler_config,
        )

        return SimplePooler.from_config(resolved_config)

    @staticmethod
    def for_classify(
        pooler_config: PoolerConfig,
        classifier: Optional[ClassifierFn],
    ):
        resolved_config = ResolvedPoolingConfig.from_config(
            task="classify",
            pooler_config=pooler_config,
        )

        pooling = PoolingMethod.from_pooling_type(resolved_config.pooling_type)

        return ClassifierPooler(
            pooling=pooling,
            classifier=classifier,
        )

    @abstractmethod
    def get_supported_tasks(self) -> Set[PoolingTask]:
        """Determine which pooling tasks are supported."""
        raise NotImplementedError

    def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
        """
        Construct the updated pooling parameters to use for a supported task.
        """
        return PoolingParamsUpdate()

    @abstractmethod
    def forward(
        self,
        hidden_states: Union[list[torch.Tensor], torch.Tensor],
        pooling_metadata: PoolingMetadata,
    ) -> PoolerOutput:
        raise NotImplementedError

for_classify staticmethod

for_classify(
    pooler_config: PoolerConfig,
    classifier: Optional[ClassifierFn],
)
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def for_classify(
    pooler_config: PoolerConfig,
    classifier: Optional[ClassifierFn],
):
    resolved_config = ResolvedPoolingConfig.from_config(
        task="classify",
        pooler_config=pooler_config,
    )

    pooling = PoolingMethod.from_pooling_type(resolved_config.pooling_type)

    return ClassifierPooler(
        pooling=pooling,
        classifier=classifier,
    )

for_embed staticmethod

for_embed(pooler_config: PoolerConfig)
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def for_embed(pooler_config: PoolerConfig):
    resolved_config = ResolvedPoolingConfig.from_config(
        task="embed",
        pooler_config=pooler_config,
    )

    return SimplePooler.from_config(resolved_config)

for_encode staticmethod

for_encode(pooler_config: PoolerConfig)
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def for_encode(pooler_config: PoolerConfig):
    if pooler_config.pooling_type == "STEP":
        return StepPooler()

    resolved_config = ResolvedPoolingConfig(task="encode",
                                            pooling_type=PoolingType.ALL)

    return SimplePooler.from_config(resolved_config)

forward abstractmethod

forward(
    hidden_states: Union[list[Tensor], Tensor],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput
Source code in vllm/model_executor/layers/pooler.py
@abstractmethod
def forward(
    self,
    hidden_states: Union[list[torch.Tensor], torch.Tensor],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
    raise NotImplementedError

get_pooling_updates

get_pooling_updates(
    task: PoolingTask,
) -> PoolingParamsUpdate

Construct the updated pooling parameters to use for a supported task.

Source code in vllm/model_executor/layers/pooler.py
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
    """
    Construct the updated pooling parameters to use for a supported task.
    """
    return PoolingParamsUpdate()

get_supported_tasks abstractmethod

get_supported_tasks() -> Set[PoolingTask]

Determine which pooling tasks are supported.

Source code in vllm/model_executor/layers/pooler.py
@abstractmethod
def get_supported_tasks(self) -> Set[PoolingTask]:
    """Determine which pooling tasks are supported."""
    raise NotImplementedError

PoolerActivation

Bases: BasePoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class PoolerActivation(BasePoolerActivation):

    @staticmethod
    def wraps(module: nn.Module):
        if isinstance(module, nn.Identity):
            return PoolerIdentity()
        if isinstance(module, (nn.Sigmoid, nn.Softmax)):
            return PoolerClassify()

        return LambdaPoolerActivation(module)

    @abstractmethod
    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        raise NotImplementedError

    def forward(self, pooled_data: _T) -> _T:
        if isinstance(pooled_data, list):
            return [self.forward_chunk(data) for data in pooled_data]

        return self.forward_chunk(pooled_data)

forward

forward(pooled_data: _T) -> _T
Source code in vllm/model_executor/layers/pooler.py
def forward(self, pooled_data: _T) -> _T:
    if isinstance(pooled_data, list):
        return [self.forward_chunk(data) for data in pooled_data]

    return self.forward_chunk(pooled_data)

forward_chunk abstractmethod

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
@abstractmethod
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    raise NotImplementedError

wraps staticmethod

wraps(module: Module)
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def wraps(module: nn.Module):
    if isinstance(module, nn.Identity):
        return PoolerIdentity()
    if isinstance(module, (nn.Sigmoid, nn.Softmax)):
        return PoolerClassify()

    return LambdaPoolerActivation(module)

PoolerClassify

Bases: PoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class PoolerClassify(PoolerActivation):

    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        num_labels = pooled_data.shape[-1]
        if num_labels < 2:
            return F.sigmoid(pooled_data.float()).to(pooled_data.dtype)

        return F.softmax(pooled_data.float(), dim=-1).to(pooled_data.dtype)

forward_chunk

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    num_labels = pooled_data.shape[-1]
    if num_labels < 2:
        return F.sigmoid(pooled_data.float()).to(pooled_data.dtype)

    return F.softmax(pooled_data.float(), dim=-1).to(pooled_data.dtype)

PoolerHead

Bases: Module

Source code in vllm/model_executor/layers/pooler.py
class PoolerHead(nn.Module):

    def __init__(self, activation: PoolerActivation) -> None:
        super().__init__()
        self.activation = activation

    def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor],
                pooling_metadata: PoolingMetadata):

        return self.activation(pooled_data)

activation instance-attribute

activation = activation

__init__

__init__(activation: PoolerActivation) -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(self, activation: PoolerActivation) -> None:
    super().__init__()
    self.activation = activation

forward

forward(
    pooled_data: Union[list[Tensor], Tensor],
    pooling_metadata: PoolingMetadata,
)
Source code in vllm/model_executor/layers/pooler.py
def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor],
            pooling_metadata: PoolingMetadata):

    return self.activation(pooled_data)

PoolerIdentity

Bases: PoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class PoolerIdentity(PoolerActivation):

    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        return pooled_data

forward_chunk

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    return pooled_data

PoolerMultiLabelClassify

Bases: PoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class PoolerMultiLabelClassify(PoolerActivation):

    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        return F.sigmoid(pooled_data.float()).to(pooled_data.dtype)

forward_chunk

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    return F.sigmoid(pooled_data.float()).to(pooled_data.dtype)

PoolerNormalize

Bases: PoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class PoolerNormalize(PoolerActivation):

    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        x = F.normalize(pooled_data.float(), p=2, dim=-1)
        return x.to(pooled_data.dtype)

forward_chunk

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    x = F.normalize(pooled_data.float(), p=2, dim=-1)
    return x.to(pooled_data.dtype)

PoolerScore

Bases: PoolerActivation

Source code in vllm/model_executor/layers/pooler.py
class PoolerScore(PoolerActivation):

    def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
        num_labels = pooled_data.shape[-1]
        if num_labels < 2:
            return F.sigmoid(pooled_data.float()).to(pooled_data.dtype)

        return pooled_data

forward_chunk

forward_chunk(pooled_data: Tensor) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def forward_chunk(self, pooled_data: torch.Tensor) -> torch.Tensor:
    num_labels = pooled_data.shape[-1]
    if num_labels < 2:
        return F.sigmoid(pooled_data.float()).to(pooled_data.dtype)

    return pooled_data

PoolingMethod

Bases: Module, ABC

Source code in vllm/model_executor/layers/pooler.py
class PoolingMethod(nn.Module, ABC):

    @staticmethod
    def from_pooling_type(pooling_type: PoolingType) -> "PoolingMethod":
        if pooling_type == PoolingType.LAST:
            return LastPool()
        if pooling_type == PoolingType.ALL:
            return AllPool()
        if pooling_type == PoolingType.CLS:
            return CLSPool()
        if pooling_type == PoolingType.MEAN:
            return MeanPool()

        raise NotImplementedError(f"Unsupported method: {pooling_type}")

    @abstractmethod
    def get_supported_tasks(self) -> Set[PoolingTask]:
        raise NotImplementedError

    def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
        return PoolingParamsUpdate()

    @abstractmethod
    def forward_all(
        self,
        hidden_states: torch.Tensor,
        pooling_cursor: PoolingCursor,
    ) -> Union[list[torch.Tensor], torch.Tensor]:
        raise NotImplementedError

    def forward(
        self,
        hidden_states: torch.Tensor,
        pooling_metadata: PoolingMetadata,
    ) -> Union[list[torch.Tensor], torch.Tensor]:
        pooling_cursor = pooling_metadata.pooling_cursor
        return self.forward_all(hidden_states, pooling_cursor)

forward

forward(
    hidden_states: Tensor, pooling_metadata: PoolingMetadata
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
def forward(
    self,
    hidden_states: torch.Tensor,
    pooling_metadata: PoolingMetadata,
) -> Union[list[torch.Tensor], torch.Tensor]:
    pooling_cursor = pooling_metadata.pooling_cursor
    return self.forward_all(hidden_states, pooling_cursor)

forward_all abstractmethod

forward_all(
    hidden_states: Tensor, pooling_cursor: PoolingCursor
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
@abstractmethod
def forward_all(
    self,
    hidden_states: torch.Tensor,
    pooling_cursor: PoolingCursor,
) -> Union[list[torch.Tensor], torch.Tensor]:
    raise NotImplementedError

from_pooling_type staticmethod

from_pooling_type(
    pooling_type: PoolingType,
) -> PoolingMethod
Source code in vllm/model_executor/layers/pooler.py
@staticmethod
def from_pooling_type(pooling_type: PoolingType) -> "PoolingMethod":
    if pooling_type == PoolingType.LAST:
        return LastPool()
    if pooling_type == PoolingType.ALL:
        return AllPool()
    if pooling_type == PoolingType.CLS:
        return CLSPool()
    if pooling_type == PoolingType.MEAN:
        return MeanPool()

    raise NotImplementedError(f"Unsupported method: {pooling_type}")

get_pooling_updates

get_pooling_updates(
    task: PoolingTask,
) -> PoolingParamsUpdate
Source code in vllm/model_executor/layers/pooler.py
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
    return PoolingParamsUpdate()

get_supported_tasks abstractmethod

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
@abstractmethod
def get_supported_tasks(self) -> Set[PoolingTask]:
    raise NotImplementedError

PoolingParamsUpdate dataclass

Source code in vllm/model_executor/layers/pooler.py
@dataclass(frozen=True)
class PoolingParamsUpdate:
    requires_token_ids: bool = False
    """Set this flag to enable `get_prompt_token_ids` for your pooler."""

    def apply(self, params: PoolingParams) -> None:
        params.requires_token_ids = self.requires_token_ids

requires_token_ids class-attribute instance-attribute

requires_token_ids: bool = False

Set this flag to enable get_prompt_token_ids for your pooler.

__init__

__init__(requires_token_ids: bool = False) -> None

apply

apply(params: PoolingParams) -> None
Source code in vllm/model_executor/layers/pooler.py
def apply(self, params: PoolingParams) -> None:
    params.requires_token_ids = self.requires_token_ids

PoolingType

Bases: IntEnum

Enumeration for different types of pooling methods.

Source code in vllm/model_executor/layers/pooler.py
class PoolingType(IntEnum):
    """Enumeration for different types of pooling methods."""
    LAST = 0
    ALL = 1
    CLS = 2
    STEP = 3
    MEAN = 4

ALL class-attribute instance-attribute

ALL = 1

CLS class-attribute instance-attribute

CLS = 2

LAST class-attribute instance-attribute

LAST = 0

MEAN class-attribute instance-attribute

MEAN = 4

STEP class-attribute instance-attribute

STEP = 3

ResolvedPoolingConfig dataclass

Source code in vllm/model_executor/layers/pooler.py
@dataclass(frozen=True)
class ResolvedPoolingConfig:
    pooling_type: PoolingType
    task: PoolingTask

    @classmethod
    def from_config(
        cls,
        task: PoolingTask,
        pooler_config: PoolerConfig,
    ) -> "ResolvedPoolingConfig":
        assert pooler_config.pooling_type is not None
        return cls(task=task,
                   pooling_type=PoolingType[pooler_config.pooling_type])

pooling_type instance-attribute

pooling_type: PoolingType

task instance-attribute

__init__

__init__(
    pooling_type: PoolingType, task: PoolingTask
) -> None

from_config classmethod

from_config(
    task: PoolingTask, pooler_config: PoolerConfig
) -> ResolvedPoolingConfig
Source code in vllm/model_executor/layers/pooler.py
@classmethod
def from_config(
    cls,
    task: PoolingTask,
    pooler_config: PoolerConfig,
) -> "ResolvedPoolingConfig":
    assert pooler_config.pooling_type is not None
    return cls(task=task,
               pooling_type=PoolingType[pooler_config.pooling_type])

RewardPoolerHead

Bases: PoolerHead

Source code in vllm/model_executor/layers/pooler.py
class RewardPoolerHead(PoolerHead):

    def __init__(self) -> None:
        super().__init__(activation=PoolerClassify())

    def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor],
                pooling_metadata: PoolingMetadata):
        pooling_params = get_pooling_params(pooling_metadata)

        # for softmax
        flags = [p.softmax for p in pooling_params]
        if len(set(flags)) == 1:
            if flags[0]:
                pooled_data = self.activation(pooled_data)
        else:
            pooled_data = [
                self.activation(vecs) if f else vecs
                for vecs, f in zip(pooled_data, flags)
            ]

        return pooled_data

__init__

__init__() -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(self) -> None:
    super().__init__(activation=PoolerClassify())

forward

forward(
    pooled_data: Union[list[Tensor], Tensor],
    pooling_metadata: PoolingMetadata,
)
Source code in vllm/model_executor/layers/pooler.py
def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor],
            pooling_metadata: PoolingMetadata):
    pooling_params = get_pooling_params(pooling_metadata)

    # for softmax
    flags = [p.softmax for p in pooling_params]
    if len(set(flags)) == 1:
        if flags[0]:
            pooled_data = self.activation(pooled_data)
    else:
        pooled_data = [
            self.activation(vecs) if f else vecs
            for vecs, f in zip(pooled_data, flags)
        ]

    return pooled_data

SimplePooler

Bases: Pooler

A layer that pools specific information from hidden states.

This layer does the following: 1. Extracts specific tokens or aggregates data based on pooling method. 2. Normalizes output if specified. 3. Returns structured results as PoolerOutput.

Source code in vllm/model_executor/layers/pooler.py
class SimplePooler(Pooler):
    """A layer that pools specific information from hidden states.

    This layer does the following:
    1. Extracts specific tokens or aggregates data based on pooling method.
    2. Normalizes output if specified.
    3. Returns structured results as `PoolerOutput`.
    """

    @classmethod
    def from_config(
        cls,
        pooler_config: ResolvedPoolingConfig,
    ) -> "SimplePooler":
        pooling = PoolingMethod.from_pooling_type(pooler_config.pooling_type)
        if pooler_config.task == "embed":
            head = EmbeddingPoolerHead()
        elif pooler_config.task == "encode":
            head = RewardPoolerHead()
        else:
            raise NotImplementedError(f"Unknown task: {pooler_config.task}")
        return cls(pooling, head)

    def __init__(self, pooling: PoolingMethod, head: PoolerHead) -> None:
        super().__init__()

        self.pooling = pooling
        self.head = head

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return self.pooling.get_supported_tasks()

    def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
        return self.pooling.get_pooling_updates(task)

    def forward(
        self,
        hidden_states: Union[torch.Tensor, list[torch.Tensor]],
        pooling_metadata: PoolingMetadata,
    ) -> PoolerOutput:
        pooled_data = self.pooling(hidden_states, pooling_metadata)
        pooled_data = self.head(pooled_data, pooling_metadata)
        return build_output(pooled_data)

head instance-attribute

head = head

pooling instance-attribute

pooling = pooling

__init__

__init__(pooling: PoolingMethod, head: PoolerHead) -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(self, pooling: PoolingMethod, head: PoolerHead) -> None:
    super().__init__()

    self.pooling = pooling
    self.head = head

forward

forward(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput
Source code in vllm/model_executor/layers/pooler.py
def forward(
    self,
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
    pooled_data = self.pooling(hidden_states, pooling_metadata)
    pooled_data = self.head(pooled_data, pooling_metadata)
    return build_output(pooled_data)

from_config classmethod

from_config(
    pooler_config: ResolvedPoolingConfig,
) -> SimplePooler
Source code in vllm/model_executor/layers/pooler.py
@classmethod
def from_config(
    cls,
    pooler_config: ResolvedPoolingConfig,
) -> "SimplePooler":
    pooling = PoolingMethod.from_pooling_type(pooler_config.pooling_type)
    if pooler_config.task == "embed":
        head = EmbeddingPoolerHead()
    elif pooler_config.task == "encode":
        head = RewardPoolerHead()
    else:
        raise NotImplementedError(f"Unknown task: {pooler_config.task}")
    return cls(pooling, head)

get_pooling_updates

get_pooling_updates(
    task: PoolingTask,
) -> PoolingParamsUpdate
Source code in vllm/model_executor/layers/pooler.py
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
    return self.pooling.get_pooling_updates(task)

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return self.pooling.get_supported_tasks()

StepPooler

Bases: Pooler

Source code in vllm/model_executor/layers/pooler.py
class StepPooler(Pooler):

    def __init__(self, ) -> None:
        super().__init__()

        self.pooling = AllPool()
        self.head = RewardPoolerHead()

    def extract_states(
        self,
        hidden_states: Union[torch.Tensor, list[torch.Tensor]],
        pooling_metadata: PoolingMetadata,
    ) -> Union[list[torch.Tensor], torch.Tensor]:
        pooled_data_lst = self.pooling(hidden_states, pooling_metadata)
        prompt_token_ids = get_prompt_token_ids(pooling_metadata)

        pooled_data = list[torch.Tensor]()

        pooling_params = get_pooling_params(pooling_metadata)

        for data, token_id, pooling_param in zip(pooled_data_lst,
                                                 prompt_token_ids,
                                                 pooling_params):
            step_tag_id = pooling_param.step_tag_id
            returned_token_ids = pooling_param.returned_token_ids

            if returned_token_ids is not None and len(returned_token_ids) > 0:
                data = data[:, returned_token_ids]

            if step_tag_id is not None:
                data = data[token_id == step_tag_id]
            pooled_data.append(data)

        return pooled_data

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return {"encode"}

    def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
        return PoolingParamsUpdate(requires_token_ids=True)

    def forward(
        self,
        hidden_states: Union[torch.Tensor, list[torch.Tensor]],
        pooling_metadata: PoolingMetadata,
    ) -> PoolerOutput:
        pooled_data = self.extract_states(hidden_states, pooling_metadata)
        pooled_data = self.head(pooled_data, pooling_metadata)
        return build_output(pooled_data)

head instance-attribute

pooling instance-attribute

pooling = AllPool()

__init__

__init__() -> None
Source code in vllm/model_executor/layers/pooler.py
def __init__(self, ) -> None:
    super().__init__()

    self.pooling = AllPool()
    self.head = RewardPoolerHead()

extract_states

extract_states(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> Union[list[Tensor], Tensor]
Source code in vllm/model_executor/layers/pooler.py
def extract_states(
    self,
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> Union[list[torch.Tensor], torch.Tensor]:
    pooled_data_lst = self.pooling(hidden_states, pooling_metadata)
    prompt_token_ids = get_prompt_token_ids(pooling_metadata)

    pooled_data = list[torch.Tensor]()

    pooling_params = get_pooling_params(pooling_metadata)

    for data, token_id, pooling_param in zip(pooled_data_lst,
                                             prompt_token_ids,
                                             pooling_params):
        step_tag_id = pooling_param.step_tag_id
        returned_token_ids = pooling_param.returned_token_ids

        if returned_token_ids is not None and len(returned_token_ids) > 0:
            data = data[:, returned_token_ids]

        if step_tag_id is not None:
            data = data[token_id == step_tag_id]
        pooled_data.append(data)

    return pooled_data

forward

forward(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput
Source code in vllm/model_executor/layers/pooler.py
def forward(
    self,
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> PoolerOutput:
    pooled_data = self.extract_states(hidden_states, pooling_metadata)
    pooled_data = self.head(pooled_data, pooling_metadata)
    return build_output(pooled_data)

get_pooling_updates

get_pooling_updates(
    task: PoolingTask,
) -> PoolingParamsUpdate
Source code in vllm/model_executor/layers/pooler.py
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
    return PoolingParamsUpdate(requires_token_ids=True)

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return {"encode"}

build_output

build_output(
    all_data: Union[Tensor, list[Tensor]],
) -> PoolerOutput
Source code in vllm/model_executor/layers/pooler.py
def build_output(
    all_data: Union[torch.Tensor, list[torch.Tensor]], ) -> PoolerOutput:
    # Pooling models D2H & synchronize occurs here
    if isinstance(all_data, list):
        all_data = [d.to("cpu", non_blocking=True) for d in all_data]
    else:
        all_data = all_data.to("cpu", non_blocking=True)
    current_stream().synchronize()

    all_outputs = [PoolingSequenceGroupOutput(data) for data in all_data]
    return PoolerOutput(outputs=all_outputs)

get_classification_activation_function

get_classification_activation_function(
    config: PretrainedConfig,
)
Source code in vllm/model_executor/layers/pooler.py
def get_classification_activation_function(config: PretrainedConfig):
    # Implement alignment with transformers ForSequenceClassificationLoss
    # https://github.com/huggingface/transformers/blob/57bb6db6ee4cfaccc45b8d474dfad5a17811ca60/src/transformers/loss/loss_utils.py#L92
    problem_type = getattr(config, "problem_type", "")
    if problem_type == "regression":
        return PoolerIdentity()
    if problem_type == "single_label_classification":
        return PoolerClassify()
    if problem_type == "multi_label_classification":
        return PoolerMultiLabelClassify()
    return PoolerClassify()

get_cross_encoder_activation_function

get_cross_encoder_activation_function(
    config: PretrainedConfig,
)
Source code in vllm/model_executor/layers/pooler.py
def get_cross_encoder_activation_function(config: PretrainedConfig):
    function_name: Optional[str] = None
    if (hasattr(config, "sentence_transformers")
            and "activation_fn" in config.sentence_transformers):
        function_name = config.sentence_transformers["activation_fn"]
    elif (hasattr(config, "sbert_ce_default_activation_function")
          and config.sbert_ce_default_activation_function is not None):
        function_name = config.sbert_ce_default_activation_function

    if function_name is not None:
        assert function_name.startswith("torch.nn.modules."), (
            "Loading of activation functions is restricted to "
            "torch.nn.modules for security reasons")
        fn = resolve_obj_by_qualname(function_name)()
        return PoolerActivation.wraps(fn)

    return PoolerScore()

get_pooling_params

get_pooling_params(
    pooling_metadata: PoolingMetadata,
) -> list[PoolingParams]
Source code in vllm/model_executor/layers/pooler.py
def get_pooling_params(
        pooling_metadata: PoolingMetadata) -> list[PoolingParams]:
    if isinstance(pooling_metadata, V0PoolingMetadata):
        pooling_params = [p for _, p in pooling_metadata.seq_groups]
    else:
        pooling_params = pooling_metadata.pooling_params
    return pooling_params

get_prompt_lens

get_prompt_lens(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> Tensor
Source code in vllm/model_executor/layers/pooler.py
def get_prompt_lens(
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> torch.Tensor:
    if isinstance(pooling_metadata, V1PoolingMetadata):
        return pooling_metadata.prompt_lens

    return PoolingTensors.from_pooling_metadata(
        pooling_metadata, hidden_states[0].device).prompt_lens

get_prompt_token_ids

get_prompt_token_ids(
    pooling_metadata: PoolingMetadata,
) -> list[Tensor]
Source code in vllm/model_executor/layers/pooler.py
def get_prompt_token_ids(
        pooling_metadata: PoolingMetadata) -> list[torch.Tensor]:
    if isinstance(pooling_metadata, V1PoolingMetadata):
        assert pooling_metadata.prompt_token_ids is not None, (
            "Please set `requires_token_ids=True` in `get_pooling_updates`")

        return [
            pooling_metadata.prompt_token_ids[i, :num]
            for i, num in enumerate(pooling_metadata.prompt_lens)
        ]

    return [
        torch.tensor(seq_data_i.prompt_token_ids)
        for seq_data_i in pooling_metadata.seq_data.values()
    ]

get_tasks

get_tasks(
    pooling_metadata: PoolingMetadata,
) -> list[PoolingTask]
Source code in vllm/model_executor/layers/pooler.py
def get_tasks(pooling_metadata: PoolingMetadata) -> list[PoolingTask]:
    pooling_params = get_pooling_params(pooling_metadata)

    tasks: list[PoolingTask] = [
        task for pooling_param in pooling_params
        if (task := pooling_param.task) is not None
    ]
    assert len(pooling_params) == len(tasks)

    return tasks