Skip to content

vllm.model_executor.models.modernbert

ModernBertAttention

Bases: Module

Source code in vllm/model_executor/models/modernbert.py
class ModernBertAttention(nn.Module):

    def __init__(self,
                 config: ModernBertConfig,
                 layer_id: Optional[int] = None):
        super().__init__()
        self.config = config
        self.hidden_size = config.hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.layer_id = layer_id
        self.deterministic_flash_attn = config.deterministic_flash_attn
        self.num_heads = config.num_attention_heads
        assert self.num_heads % tp_size == 0
        self.head_dim = config.hidden_size // config.num_attention_heads
        self.all_head_size = self.head_dim * self.num_heads
        self.scaling = self.head_dim**-0.5
        self.Wqkv = QKVParallelLinear(
            config.hidden_size,
            self.head_dim,
            self.num_heads,
            bias=config.attention_bias,
        )

        sliding_window = None
        if layer_id % config.global_attn_every_n_layers != 0:
            sliding_window = config.local_attention // 2
            rope_theta = config.local_rope_theta if config.local_rope_theta \
                    is not None else config.global_rope_theta
        else:
            rope_theta = config.global_rope_theta

        self.rotary_emb = ModernBertRotaryEmbedding(config=config,
                                                    head_size=self.head_dim,
                                                    dim=self.head_dim,
                                                    base=rope_theta)
        self.attn = EncoderOnlyAttention(
            self.num_heads,
            self.head_dim,
            self.scaling,
            prefix=f"{layer_id}.attn",
            per_layer_sliding_window=sliding_window)
        self.Wo = RowParallelLinear(config.hidden_size,
                                    config.hidden_size,
                                    bias=config.attention_bias)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
    ) -> torch.Tensor:
        qkv, _ = self.Wqkv(hidden_states)
        q, k, v = qkv.split([self.all_head_size] * 3, dim=-1)
        q, k = self.rotary_emb(position_ids, q, k)
        attn_outputs = self.attn(q, k, v)
        hidden_states = attn_outputs
        hidden_states, _ = self.Wo(hidden_states)
        return hidden_states

Wo instance-attribute

Wo = RowParallelLinear(
    hidden_size, hidden_size, bias=attention_bias
)

Wqkv instance-attribute

Wqkv = QKVParallelLinear(
    hidden_size, head_dim, num_heads, bias=attention_bias
)

all_head_size instance-attribute

all_head_size = head_dim * num_heads

attn instance-attribute

attn = EncoderOnlyAttention(
    num_heads,
    head_dim,
    scaling,
    prefix=f"{layer_id}.attn",
    per_layer_sliding_window=sliding_window,
)

config instance-attribute

config = config

deterministic_flash_attn instance-attribute

deterministic_flash_attn = deterministic_flash_attn

head_dim instance-attribute

head_dim = hidden_size // num_attention_heads

hidden_size instance-attribute

hidden_size = hidden_size

layer_id instance-attribute

layer_id = layer_id

num_heads instance-attribute

num_heads = num_attention_heads

rotary_emb instance-attribute

rotary_emb = ModernBertRotaryEmbedding(
    config=config,
    head_size=head_dim,
    dim=head_dim,
    base=rope_theta,
)

scaling instance-attribute

scaling = head_dim ** -0.5

__init__

__init__(
    config: ModernBertConfig, layer_id: Optional[int] = None
)
Source code in vllm/model_executor/models/modernbert.py
def __init__(self,
             config: ModernBertConfig,
             layer_id: Optional[int] = None):
    super().__init__()
    self.config = config
    self.hidden_size = config.hidden_size
    tp_size = get_tensor_model_parallel_world_size()
    self.layer_id = layer_id
    self.deterministic_flash_attn = config.deterministic_flash_attn
    self.num_heads = config.num_attention_heads
    assert self.num_heads % tp_size == 0
    self.head_dim = config.hidden_size // config.num_attention_heads
    self.all_head_size = self.head_dim * self.num_heads
    self.scaling = self.head_dim**-0.5
    self.Wqkv = QKVParallelLinear(
        config.hidden_size,
        self.head_dim,
        self.num_heads,
        bias=config.attention_bias,
    )

    sliding_window = None
    if layer_id % config.global_attn_every_n_layers != 0:
        sliding_window = config.local_attention // 2
        rope_theta = config.local_rope_theta if config.local_rope_theta \
                is not None else config.global_rope_theta
    else:
        rope_theta = config.global_rope_theta

    self.rotary_emb = ModernBertRotaryEmbedding(config=config,
                                                head_size=self.head_dim,
                                                dim=self.head_dim,
                                                base=rope_theta)
    self.attn = EncoderOnlyAttention(
        self.num_heads,
        self.head_dim,
        self.scaling,
        prefix=f"{layer_id}.attn",
        per_layer_sliding_window=sliding_window)
    self.Wo = RowParallelLinear(config.hidden_size,
                                config.hidden_size,
                                bias=config.attention_bias)

forward

forward(
    hidden_states: Tensor, position_ids: Tensor
) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    hidden_states: torch.Tensor,
    position_ids: torch.Tensor,
) -> torch.Tensor:
    qkv, _ = self.Wqkv(hidden_states)
    q, k, v = qkv.split([self.all_head_size] * 3, dim=-1)
    q, k = self.rotary_emb(position_ids, q, k)
    attn_outputs = self.attn(q, k, v)
    hidden_states = attn_outputs
    hidden_states, _ = self.Wo(hidden_states)
    return hidden_states

ModernBertEmbeddings

Bases: Module

Source code in vllm/model_executor/models/modernbert.py
class ModernBertEmbeddings(nn.Module):

    def __init__(self, config: ModernBertConfig):

        super().__init__()
        self.config = config
        self.tok_embeddings = VocabParallelEmbedding(config.vocab_size,
                                                     config.hidden_size)
        self.norm = nn.LayerNorm(config.hidden_size,
                                 eps=config.layer_norm_eps,
                                 bias=config.norm_bias)

    def forward(
        self,
        input_ids: torch.Tensor,
        inputs_embeds: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        if inputs_embeds is not None:
            return self.norm(inputs_embeds)
        else:
            inputs_embeds = self.tok_embeddings(input_ids)
            embeddings = self.norm(inputs_embeds)
            return embeddings

config instance-attribute

config = config

norm instance-attribute

norm = LayerNorm(
    hidden_size, eps=layer_norm_eps, bias=norm_bias
)

tok_embeddings instance-attribute

tok_embeddings = VocabParallelEmbedding(
    vocab_size, hidden_size
)

__init__

__init__(config: ModernBertConfig)
Source code in vllm/model_executor/models/modernbert.py
def __init__(self, config: ModernBertConfig):

    super().__init__()
    self.config = config
    self.tok_embeddings = VocabParallelEmbedding(config.vocab_size,
                                                 config.hidden_size)
    self.norm = nn.LayerNorm(config.hidden_size,
                             eps=config.layer_norm_eps,
                             bias=config.norm_bias)

forward

forward(
    input_ids: Tensor,
    inputs_embeds: Optional[Tensor] = None,
) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    input_ids: torch.Tensor,
    inputs_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    if inputs_embeds is not None:
        return self.norm(inputs_embeds)
    else:
        inputs_embeds = self.tok_embeddings(input_ids)
        embeddings = self.norm(inputs_embeds)
        return embeddings

ModernBertEncoderLayer

Bases: Module

Source code in vllm/model_executor/models/modernbert.py
class ModernBertEncoderLayer(nn.Module):

    def __init__(self, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        self.layers = nn.ModuleList([
            ModernBertLayer(config=config, layer_id=layer_id)
            for layer_id in range(config.num_hidden_layers)
        ])

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
    ) -> torch.Tensor:
        for i, layer in enumerate(self.layers):
            hidden_states = layer(hidden_states, position_ids)
        return hidden_states

layers instance-attribute

layers = ModuleList(
    [
        (ModernBertLayer(config=config, layer_id=layer_id))
        for layer_id in (range(num_hidden_layers))
    ]
)

__init__

__init__(vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/modernbert.py
def __init__(self, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    self.layers = nn.ModuleList([
        ModernBertLayer(config=config, layer_id=layer_id)
        for layer_id in range(config.num_hidden_layers)
    ])

forward

forward(
    hidden_states: Tensor, position_ids: Tensor
) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    hidden_states: torch.Tensor,
    position_ids: torch.Tensor,
) -> torch.Tensor:
    for i, layer in enumerate(self.layers):
        hidden_states = layer(hidden_states, position_ids)
    return hidden_states

ModernBertForSequenceClassification

Bases: Module, SupportsCrossEncoding

Source code in vllm/model_executor/models/modernbert.py
@default_pooling_type("CLS")
class ModernBertForSequenceClassification(nn.Module, SupportsCrossEncoding):

    is_pooling_model = True

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        self.config = config
        self.model = ModernBertModel(vllm_config=vllm_config,
                                     prefix=maybe_prefix(prefix, "modernbert"))
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.pooling = ModernBertPooler(config)

        pooler_config = vllm_config.model_config.pooler_config
        assert pooler_config is not None

        self.pooler = DispatchPooler({
            "encode":
            Pooler.for_encode(pooler_config),
            "classify":
            ClassifierPooler(
                pooling=self.pooling,
                classifier=self.classifier,
                act_fn=ClassifierPooler.act_fn_for_seq_cls(
                    vllm_config.model_config),
            ),
            "score":
            ClassifierPooler(
                pooling=self.pooling,
                classifier=self.classifier,
                act_fn=ClassifierPooler.act_fn_for_cross_encoder(
                    vllm_config.model_config),
            ),
        })

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):

        self_weights = []

        def weight_filter():
            for name, weight in weights:
                if name.startswith("model."):
                    yield name[len("model."):], weight
                else:
                    self_weights.append((name, weight))

        self.model.load_weights(weight_filter())

        params_dict = dict(self.named_parameters())

        for name, loaded_weight in self_weights:
            if name.startswith("classifier"):
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            if name.startswith("head"):
                param = params_dict["pooling." + name[len("head") + 1:]]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)

    def forward(
        self,
        input_ids: Optional[torch.LongTensor],
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        return self.model(
            input_ids=input_ids,
            inputs_embeds=inputs_embeds,
            positions=positions,
        )

classifier instance-attribute

classifier = Linear(hidden_size, num_labels)

config instance-attribute

config = config

is_pooling_model class-attribute instance-attribute

is_pooling_model = True

model instance-attribute

model = ModernBertModel(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "modernbert"),
)

pooler instance-attribute

pooler = DispatchPooler(
    {
        "encode": for_encode(pooler_config),
        "classify": ClassifierPooler(
            pooling=pooling,
            classifier=classifier,
            act_fn=act_fn_for_seq_cls(model_config),
        ),
        "score": ClassifierPooler(
            pooling=pooling,
            classifier=classifier,
            act_fn=act_fn_for_cross_encoder(model_config),
        ),
    }
)

pooling instance-attribute

pooling = ModernBertPooler(config)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/modernbert.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    self.config = config
    self.model = ModernBertModel(vllm_config=vllm_config,
                                 prefix=maybe_prefix(prefix, "modernbert"))
    self.classifier = nn.Linear(config.hidden_size, config.num_labels)
    self.pooling = ModernBertPooler(config)

    pooler_config = vllm_config.model_config.pooler_config
    assert pooler_config is not None

    self.pooler = DispatchPooler({
        "encode":
        Pooler.for_encode(pooler_config),
        "classify":
        ClassifierPooler(
            pooling=self.pooling,
            classifier=self.classifier,
            act_fn=ClassifierPooler.act_fn_for_seq_cls(
                vllm_config.model_config),
        ),
        "score":
        ClassifierPooler(
            pooling=self.pooling,
            classifier=self.classifier,
            act_fn=ClassifierPooler.act_fn_for_cross_encoder(
                vllm_config.model_config),
        ),
    })

forward

forward(
    input_ids: Optional[LongTensor],
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    input_ids: Optional[torch.LongTensor],
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    return self.model(
        input_ids=input_ids,
        inputs_embeds=inputs_embeds,
        positions=positions,
    )

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/modernbert.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):

    self_weights = []

    def weight_filter():
        for name, weight in weights:
            if name.startswith("model."):
                yield name[len("model."):], weight
            else:
                self_weights.append((name, weight))

    self.model.load_weights(weight_filter())

    params_dict = dict(self.named_parameters())

    for name, loaded_weight in self_weights:
        if name.startswith("classifier"):
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        if name.startswith("head"):
            param = params_dict["pooling." + name[len("head") + 1:]]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)

ModernBertLayer

Bases: Module

Source code in vllm/model_executor/models/modernbert.py
class ModernBertLayer(nn.Module):

    def __init__(self,
                 config: ModernBertConfig,
                 prefix: str = "",
                 layer_id: Optional[int] = None):
        super().__init__()
        self.config = config
        if layer_id == 0:
            self.attn_norm = nn.Identity()
        else:
            self.attn_norm = nn.LayerNorm(config.hidden_size,
                                          eps=config.norm_eps,
                                          bias=config.norm_bias)
        self.attn = ModernBertAttention(config=config, layer_id=layer_id)
        self.mlp_norm = nn.LayerNorm(config.hidden_size,
                                     eps=config.norm_eps,
                                     bias=config.norm_bias)
        self.mlp = ModernBertMLP(config)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_ids: torch.Tensor,
    ) -> torch.Tensor:
        attn_outputs = self.attn(hidden_states=self.attn_norm(hidden_states),
                                 position_ids=position_ids)
        hidden_states = hidden_states + attn_outputs
        mlp_output = self.mlp(self.mlp_norm(hidden_states))
        hidden_states = hidden_states + mlp_output
        return hidden_states

attn instance-attribute

attn = ModernBertAttention(config=config, layer_id=layer_id)

attn_norm instance-attribute

attn_norm = Identity()

config instance-attribute

config = config

mlp instance-attribute

mlp = ModernBertMLP(config)

mlp_norm instance-attribute

mlp_norm = LayerNorm(
    hidden_size, eps=norm_eps, bias=norm_bias
)

__init__

__init__(
    config: ModernBertConfig,
    prefix: str = "",
    layer_id: Optional[int] = None,
)
Source code in vllm/model_executor/models/modernbert.py
def __init__(self,
             config: ModernBertConfig,
             prefix: str = "",
             layer_id: Optional[int] = None):
    super().__init__()
    self.config = config
    if layer_id == 0:
        self.attn_norm = nn.Identity()
    else:
        self.attn_norm = nn.LayerNorm(config.hidden_size,
                                      eps=config.norm_eps,
                                      bias=config.norm_bias)
    self.attn = ModernBertAttention(config=config, layer_id=layer_id)
    self.mlp_norm = nn.LayerNorm(config.hidden_size,
                                 eps=config.norm_eps,
                                 bias=config.norm_bias)
    self.mlp = ModernBertMLP(config)

forward

forward(
    hidden_states: Tensor, position_ids: Tensor
) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    hidden_states: torch.Tensor,
    position_ids: torch.Tensor,
) -> torch.Tensor:
    attn_outputs = self.attn(hidden_states=self.attn_norm(hidden_states),
                             position_ids=position_ids)
    hidden_states = hidden_states + attn_outputs
    mlp_output = self.mlp(self.mlp_norm(hidden_states))
    hidden_states = hidden_states + mlp_output
    return hidden_states

ModernBertMLP

Bases: Module

Source code in vllm/model_executor/models/modernbert.py
class ModernBertMLP(nn.Module):

    def __init__(self, config: ModernBertConfig):
        super().__init__()
        self.config = config
        self.Wi = nn.Linear(config.hidden_size,
                            int(config.intermediate_size) * 2,
                            bias=config.mlp_bias)
        self.act = nn.GELU()
        self.Wo = RowParallelLinear(config.intermediate_size,
                                    config.hidden_size,
                                    bias=config.mlp_bias)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        input, gate = self.Wi(hidden_states).chunk(2, dim=-1)
        return self.Wo(self.act(input) * gate)[0]

Wi instance-attribute

Wi = Linear(
    hidden_size, int(intermediate_size) * 2, bias=mlp_bias
)

Wo instance-attribute

Wo = RowParallelLinear(
    intermediate_size, hidden_size, bias=mlp_bias
)

act instance-attribute

act = GELU()

config instance-attribute

config = config

__init__

__init__(config: ModernBertConfig)
Source code in vllm/model_executor/models/modernbert.py
def __init__(self, config: ModernBertConfig):
    super().__init__()
    self.config = config
    self.Wi = nn.Linear(config.hidden_size,
                        int(config.intermediate_size) * 2,
                        bias=config.mlp_bias)
    self.act = nn.GELU()
    self.Wo = RowParallelLinear(config.intermediate_size,
                                config.hidden_size,
                                bias=config.mlp_bias)

forward

forward(hidden_states: Tensor) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
    input, gate = self.Wi(hidden_states).chunk(2, dim=-1)
    return self.Wo(self.act(input) * gate)[0]

ModernBertModel

Bases: Module

Source code in vllm/model_executor/models/modernbert.py
@support_torch_compile
@default_pooling_type("CLS")
class ModernBertModel(nn.Module):
    hf_to_vllm_mapper = WeightsMapper(
        orig_to_new_prefix={"layers.": "encoder_layer.layers."})

    def __init__(
        self,
        vllm_config: VllmConfig,
        prefix: str = "",
    ):
        super().__init__()
        config = vllm_config.model_config.hf_config
        self.config = config
        self.embeddings = ModernBertEmbeddings(config)
        self.encoder_layer = ModernBertEncoderLayer(vllm_config)
        self.final_norm = nn.LayerNorm(config.hidden_size,
                                       eps=config.norm_eps,
                                       bias=config.norm_bias)

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        weights = self.hf_to_vllm_mapper.apply(weights)
        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:
            if name.endswith(".bias") and name not in params_dict:
                continue
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        if inputs_embeds is not None:
            hidden_states = inputs_embeds
        else:
            hidden_states = self.embeddings(input_ids=input_ids,
                                            inputs_embeds=inputs_embeds)

        outputs = self.encoder_layer(
            hidden_states=hidden_states,
            position_ids=positions,
        )
        norm_outputs = self.final_norm(outputs)
        return norm_outputs

config instance-attribute

config = config

embeddings instance-attribute

embeddings = ModernBertEmbeddings(config)

encoder_layer instance-attribute

encoder_layer = ModernBertEncoderLayer(vllm_config)

final_norm instance-attribute

final_norm = LayerNorm(
    hidden_size, eps=norm_eps, bias=norm_bias
)

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_prefix={"layers.": "encoder_layer.layers."}
)

__init__

__init__(vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/modernbert.py
def __init__(
    self,
    vllm_config: VllmConfig,
    prefix: str = "",
):
    super().__init__()
    config = vllm_config.model_config.hf_config
    self.config = config
    self.embeddings = ModernBertEmbeddings(config)
    self.encoder_layer = ModernBertEncoderLayer(vllm_config)
    self.final_norm = nn.LayerNorm(config.hidden_size,
                                   eps=config.norm_eps,
                                   bias=config.norm_bias)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
) -> Tensor
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    if inputs_embeds is not None:
        hidden_states = inputs_embeds
    else:
        hidden_states = self.embeddings(input_ids=input_ids,
                                        inputs_embeds=inputs_embeds)

    outputs = self.encoder_layer(
        hidden_states=hidden_states,
        position_ids=positions,
    )
    norm_outputs = self.final_norm(outputs)
    return norm_outputs

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/modernbert.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    weights = self.hf_to_vllm_mapper.apply(weights)
    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:
        if name.endswith(".bias") and name not in params_dict:
            continue
        param = params_dict[name]
        weight_loader = getattr(param, "weight_loader",
                                default_weight_loader)
        weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

ModernBertPooler

Bases: Pooler

Source code in vllm/model_executor/models/modernbert.py
class ModernBertPooler(Pooler):

    def __init__(self, config: ModernBertConfig):
        super().__init__()

        pooling_type = PoolingType[config.classifier_pooling.upper()]
        self.pooling = PoolingMethod.from_pooling_type(pooling_type)
        self.dense = nn.Linear(config.hidden_size, config.hidden_size,
                               config.classifier_bias)
        self.act = nn.GELU()
        self.norm = nn.LayerNorm(config.hidden_size,
                                 eps=config.norm_eps,
                                 bias=config.norm_bias)

    def get_supported_tasks(self) -> Set[PoolingTask]:
        return self.pooling.get_supported_tasks()

    def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
        return self.pooling.get_pooling_updates(task)

    def _head(self, pooled_output: torch.Tensor):
        pooled_output = pooled_output.to(self.dense.weight.dtype)
        return self.norm(self.act(self.dense(pooled_output)))

    def forward(
        self,
        hidden_states: Union[torch.Tensor, list[torch.Tensor]],
        pooling_metadata: PoolingMetadata,
    ) -> Union[torch.Tensor, list[torch.Tensor]]:
        pooled_output = self.pooling(hidden_states, pooling_metadata)

        if isinstance(pooled_output, list):
            pooled_output = [self._head(output) for output in pooled_output]
        else:
            pooled_output = self._head(pooled_output)

        return pooled_output

act instance-attribute

act = GELU()

dense instance-attribute

dense = Linear(hidden_size, hidden_size, classifier_bias)

norm instance-attribute

norm = LayerNorm(hidden_size, eps=norm_eps, bias=norm_bias)

pooling instance-attribute

pooling = from_pooling_type(pooling_type)

__init__

__init__(config: ModernBertConfig)
Source code in vllm/model_executor/models/modernbert.py
def __init__(self, config: ModernBertConfig):
    super().__init__()

    pooling_type = PoolingType[config.classifier_pooling.upper()]
    self.pooling = PoolingMethod.from_pooling_type(pooling_type)
    self.dense = nn.Linear(config.hidden_size, config.hidden_size,
                           config.classifier_bias)
    self.act = nn.GELU()
    self.norm = nn.LayerNorm(config.hidden_size,
                             eps=config.norm_eps,
                             bias=config.norm_bias)

_head

_head(pooled_output: Tensor)
Source code in vllm/model_executor/models/modernbert.py
def _head(self, pooled_output: torch.Tensor):
    pooled_output = pooled_output.to(self.dense.weight.dtype)
    return self.norm(self.act(self.dense(pooled_output)))

forward

forward(
    hidden_states: Union[Tensor, list[Tensor]],
    pooling_metadata: PoolingMetadata,
) -> Union[Tensor, list[Tensor]]
Source code in vllm/model_executor/models/modernbert.py
def forward(
    self,
    hidden_states: Union[torch.Tensor, list[torch.Tensor]],
    pooling_metadata: PoolingMetadata,
) -> Union[torch.Tensor, list[torch.Tensor]]:
    pooled_output = self.pooling(hidden_states, pooling_metadata)

    if isinstance(pooled_output, list):
        pooled_output = [self._head(output) for output in pooled_output]
    else:
        pooled_output = self._head(pooled_output)

    return pooled_output

get_pooling_updates

get_pooling_updates(
    task: PoolingTask,
) -> PoolingParamsUpdate
Source code in vllm/model_executor/models/modernbert.py
def get_pooling_updates(self, task: PoolingTask) -> PoolingParamsUpdate:
    return self.pooling.get_pooling_updates(task)

get_supported_tasks

get_supported_tasks() -> Set[PoolingTask]
Source code in vllm/model_executor/models/modernbert.py
def get_supported_tasks(self) -> Set[PoolingTask]:
    return self.pooling.get_supported_tasks()

ModernBertRotaryEmbedding

Bases: RotaryEmbedding

Source code in vllm/model_executor/models/modernbert.py
class ModernBertRotaryEmbedding(RotaryEmbedding):

    def __init__(self, config: ModernBertConfig, head_size: int, dim: int,
                 base: float):
        super().__init__(
            head_size=head_size,
            rotary_dim=dim,
            max_position_embeddings=config.max_position_embeddings,
            base=base,
            is_neox_style=True,
            dtype=torch.float16)
        self.config = config

config instance-attribute

config = config

__init__

__init__(
    config: ModernBertConfig,
    head_size: int,
    dim: int,
    base: float,
)
Source code in vllm/model_executor/models/modernbert.py
def __init__(self, config: ModernBertConfig, head_size: int, dim: int,
             base: float):
    super().__init__(
        head_size=head_size,
        rotary_dim=dim,
        max_position_embeddings=config.max_position_embeddings,
        base=base,
        is_neox_style=True,
        dtype=torch.float16)
    self.config = config