Skip to content

vllm.model_executor.models.voxtral

ISO639_1_SUPPORTED_LANGS module-attribute

ISO639_1_SUPPORTED_LANGS = {
    "ar": "Arabic",
    "nl": "Dutch",
    "en": "English",
    "fr": "French",
    "de": "German",
    "hi": "Hindi",
    "it": "Italian",
    "pt": "Portuguese",
    "es": "Spanish",
}

logger module-attribute

logger = init_logger(__name__)

AudioLanguageAdapter

Bases: Module

Source code in vllm/model_executor/models/voxtral.py
class AudioLanguageAdapter(nn.Module):

    def __init__(self, hidden_size: int, dim: int) -> None:
        super().__init__()
        self.w_in = nn.Linear(hidden_size, dim, bias=False)
        self.gelu = nn.GELU()
        self.w_out = nn.Linear(dim, dim, bias=False)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.w_out(self.gelu(self.w_in(x)))

gelu instance-attribute

gelu = GELU()

w_in instance-attribute

w_in = Linear(hidden_size, dim, bias=False)

w_out instance-attribute

w_out = Linear(dim, dim, bias=False)

__init__

__init__(hidden_size: int, dim: int) -> None
Source code in vllm/model_executor/models/voxtral.py
def __init__(self, hidden_size: int, dim: int) -> None:
    super().__init__()
    self.w_in = nn.Linear(hidden_size, dim, bias=False)
    self.gelu = nn.GELU()
    self.w_out = nn.Linear(dim, dim, bias=False)

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/voxtral.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    return self.w_out(self.gelu(self.w_in(x)))

VoxtralDummyInputsBuilder

Bases: BaseDummyInputsBuilder[VoxtralProcessingInfo]

Source code in vllm/model_executor/models/voxtral.py
class VoxtralDummyInputsBuilder(BaseDummyInputsBuilder[VoxtralProcessingInfo]):

    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        return ""

    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> MultiModalDataDict:
        num_audios = mm_counts.get("audio", 0)

        target_length = self.info.get_max_audio_array_len()

        return {
            "audio":
            self._get_dummy_audios(length=target_length, num_audios=num_audios)
        }

    def get_dummy_processor_inputs(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> ProcessorInputs:
        tokenizer = self.info.get_tokenizer()

        dummy_text = self.get_dummy_text(mm_counts)
        dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts)
        dummy_audios = dummy_mm_data.get("audio", [])

        audio_chunks: list[AudioChunk] = []
        format = "wav"
        for audio in dummy_audios:
            audio_item = Audio(
                audio_array=audio,
                sampling_rate=self.info.get_hf_processor().sampling_rate,
                format=format,
            )
            chunk = AudioChunk(input_audio=RawAudio.from_audio(audio_item))
            audio_chunks.append(chunk)

        request = ChatCompletionRequest(messages=[
            UserMessage(content=[TextChunk(text=dummy_text), *audio_chunks]),
        ])
        res = tokenizer.mistral.encode_chat_completion(request)
        dummy_tokens = res.tokens
        # whixtral tokenizer adds padding to the audio
        # so we need to update the audio arrays
        dummy_mm_data["audio"] = [a.audio_array for a in res.audios]

        return ProcessorInputs(prompt=dummy_tokens, mm_data=dummy_mm_data)

get_dummy_mm_data

get_dummy_mm_data(
    seq_len: int, mm_counts: Mapping[str, int]
) -> MultiModalDataDict
Source code in vllm/model_executor/models/voxtral.py
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> MultiModalDataDict:
    num_audios = mm_counts.get("audio", 0)

    target_length = self.info.get_max_audio_array_len()

    return {
        "audio":
        self._get_dummy_audios(length=target_length, num_audios=num_audios)
    }

get_dummy_processor_inputs

get_dummy_processor_inputs(
    seq_len: int, mm_counts: Mapping[str, int]
) -> ProcessorInputs
Source code in vllm/model_executor/models/voxtral.py
def get_dummy_processor_inputs(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> ProcessorInputs:
    tokenizer = self.info.get_tokenizer()

    dummy_text = self.get_dummy_text(mm_counts)
    dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts)
    dummy_audios = dummy_mm_data.get("audio", [])

    audio_chunks: list[AudioChunk] = []
    format = "wav"
    for audio in dummy_audios:
        audio_item = Audio(
            audio_array=audio,
            sampling_rate=self.info.get_hf_processor().sampling_rate,
            format=format,
        )
        chunk = AudioChunk(input_audio=RawAudio.from_audio(audio_item))
        audio_chunks.append(chunk)

    request = ChatCompletionRequest(messages=[
        UserMessage(content=[TextChunk(text=dummy_text), *audio_chunks]),
    ])
    res = tokenizer.mistral.encode_chat_completion(request)
    dummy_tokens = res.tokens
    # whixtral tokenizer adds padding to the audio
    # so we need to update the audio arrays
    dummy_mm_data["audio"] = [a.audio_array for a in res.audios]

    return ProcessorInputs(prompt=dummy_tokens, mm_data=dummy_mm_data)

get_dummy_text

get_dummy_text(mm_counts: Mapping[str, int]) -> str
Source code in vllm/model_executor/models/voxtral.py
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    return ""

VoxtralEncoderModel

Bases: Module

Source code in vllm/model_executor/models/voxtral.py
class VoxtralEncoderModel(nn.Module):
    packed_modules_mapping = {"qkv_proj": ["q_proj", "k_proj", "v_proj"]}

    # fmt: off
    mistral_remapping = [
        (r"whisper_encoder\.conv_layers\.0\.(weight|bias)", r"whisper_encoder.conv1.\1"), # noqa: E501
        (r"whisper_encoder\.conv_layers\.1\.(weight|bias)", r"whisper_encoder.conv2.\1"), # noqa: E501
        (r"whisper_encoder\.transformer\.layers\.(\d+)\.attention\.w([qkv])\.(weight|bias)", r"whisper_encoder.layers.\1.self_attn.\2_proj.\3"), # noqa: E501
        (r"whisper_encoder\.transformer\.layers\.(\d+)\.attention\.wo\.(weight|bias)", r"whisper_encoder.layers.\1.self_attn.out_proj.\2"), # noqa: E501
        (r"whisper_encoder\.transformer\.layers\.(\d+)\.attention_norm\.(weight|bias)", r"whisper_encoder.layers.\1.self_attn_layer_norm.\2"), # noqa: E501
        (r"whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w1\.(weight|bias)", r"whisper_encoder.layers.\1.mlp.fc1.\2"), # noqa: E501
        (r"whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w2\.(weight|bias)", r"whisper_encoder.layers.\1.mlp.fc2.\2"), # noqa: E501
        (r"whisper_encoder\.transformer\.layers\.(\d+)\.ffn_norm\.(weight|bias)", r"whisper_encoder.layers.\1.final_layer_norm.\2"), # noqa: E501
        (r"whisper_encoder\.transformer\.norm\.(weight|bias)", r"whisper_encoder.layer_norm.\1"), # noqa: E501
    ]
    # fmt: on

    def __init__(
        self,
        vllm_config: VllmConfig,
        *,
        prefix: str = "",
    ) -> None:
        super().__init__()
        self.config = cast(WhisperConfig, vllm_config.model_config.hf_config)
        self.dtype: torch.dtype = vllm_config.model_config.dtype
        self.whisper_encoder = WhisperEncoder(vllm_config=vllm_config,
                                              prefix=maybe_prefix(
                                                  prefix, "whisper_encoder"),
                                              is_standalone_encoder=True,
                                              init_in_fp32=True)
        mel_filters = mel_filter_bank(
            num_frequency_bins=1 + self.config.window_size // 2,
            num_mel_bins=self.config.num_mel_bins,
            min_frequency=0.0,
            max_frequency=8000.0,
            sampling_rate=self.config.sampling_rate,
        )
        self.mel_filters = torch.tensor(mel_filters, dtype=torch.float32)

    def compute_whisper_melspec(
        self,
        audio_waveforms: torch.Tensor,
    ) -> torch.Tensor:
        input_dtype = audio_waveforms.dtype
        window = torch.hann_window(self.config.window_size).to(
            audio_waveforms.device)
        stft = torch.stft(
            audio_waveforms,
            self.config.window_size,
            self.config.hop_length,
            window=window,
            return_complex=True,
        )
        magnitudes = stft[..., :-1].abs()**2
        mel_spec = self.mel_filters.T @ magnitudes
        log_spec = torch.clamp(mel_spec, min=1e-10).log10()
        log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
        log_spec = (log_spec + 4.0) / 4.0
        return log_spec.to(input_dtype)

    @property
    def downsample_factor(self) -> int:
        return self.whisper_encoder.conv1.stride[
            0] * self.whisper_encoder.conv2.stride[0]

    @property
    def chunk_size(self) -> int:
        return self.config.max_source_positions * self.downsample_factor

    def prepare_inputs_for_conv(
        self,
        audio_waveforms: list[torch.Tensor],
    ) -> tuple[torch.Tensor, list[int]]:
        assert isinstance(audio_waveforms, list)
        # list[num_mel_bins, seq_len]
        input_features = [
            self.compute_whisper_melspec(audio).to(self.dtype)
            for audio in audio_waveforms
        ]

        chunked_features: list[torch.Tensor] = []
        chunks_per_example: list[int] = []
        for feature in input_features:
            chunks = feature.split(self.chunk_size, dim=-1)
            chunked_features += chunks
            chunks_per_example.append(len(chunks))

        # [total_num_chunks, num_mel_bins, chunk_size]
        return torch.stack(chunked_features), chunks_per_example

    def forward(
        self, input_features: Union[torch.Tensor, list[torch.Tensor]]
    ) -> list[torch.Tensor]:
        if not isinstance(input_features, list):
            input_features = [input_features]

        # Split long inputs into chunks
        input_embeds, chunks_per_example = (
            self.prepare_inputs_for_conv(input_features))

        # [total_num_chunks, ceil(chunk_size / downsample_factor), hidden_size]
        out = self.whisper_encoder([input_embeds])

        # Re-concatenate the chunks
        chunk_idx = 0
        results = []
        for n_chunks in chunks_per_example:
            result = out[chunk_idx:chunk_idx + n_chunks].flatten(0, 1)
            results.append(result)
            chunk_idx += n_chunks

        return results

    def load_weight(self, weight: tuple[str, torch.Tensor]) -> str:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
        ]
        params_dict = dict(self.named_parameters())

        name, loaded_weight = weight
        for pattern, repl in self.mistral_remapping:
            if re.fullmatch(pattern, name):
                name = re.sub(pattern, repl, name)

        for (param_name, weight_name, shard_id) in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)

        return name

chunk_size property

chunk_size: int

config instance-attribute

config = cast(WhisperConfig, hf_config)

downsample_factor property

downsample_factor: int

dtype instance-attribute

dtype: dtype = dtype

mel_filters instance-attribute

mel_filters = tensor(mel_filters, dtype=float32)

mistral_remapping class-attribute instance-attribute

mistral_remapping = [
    (
        "whisper_encoder\\.conv_layers\\.0\\.(weight|bias)",
        "whisper_encoder.conv1.\\1",
    ),
    (
        "whisper_encoder\\.conv_layers\\.1\\.(weight|bias)",
        "whisper_encoder.conv2.\\1",
    ),
    (
        "whisper_encoder\\.transformer\\.layers\\.(\\d+)\\.attention\\.w([qkv])\\.(weight|bias)",
        "whisper_encoder.layers.\\1.self_attn.\\2_proj.\\3",
    ),
    (
        "whisper_encoder\\.transformer\\.layers\\.(\\d+)\\.attention\\.wo\\.(weight|bias)",
        "whisper_encoder.layers.\\1.self_attn.out_proj.\\2",
    ),
    (
        "whisper_encoder\\.transformer\\.layers\\.(\\d+)\\.attention_norm\\.(weight|bias)",
        "whisper_encoder.layers.\\1.self_attn_layer_norm.\\2",
    ),
    (
        "whisper_encoder\\.transformer\\.layers\\.(\\d+)\\.feed_forward\\.w1\\.(weight|bias)",
        "whisper_encoder.layers.\\1.mlp.fc1.\\2",
    ),
    (
        "whisper_encoder\\.transformer\\.layers\\.(\\d+)\\.feed_forward\\.w2\\.(weight|bias)",
        "whisper_encoder.layers.\\1.mlp.fc2.\\2",
    ),
    (
        "whisper_encoder\\.transformer\\.layers\\.(\\d+)\\.ffn_norm\\.(weight|bias)",
        "whisper_encoder.layers.\\1.final_layer_norm.\\2",
    ),
    (
        "whisper_encoder\\.transformer\\.norm\\.(weight|bias)",
        "whisper_encoder.layer_norm.\\1",
    ),
]

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "qkv_proj": ["q_proj", "k_proj", "v_proj"]
}

whisper_encoder instance-attribute

whisper_encoder = WhisperEncoder(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "whisper_encoder"),
    is_standalone_encoder=True,
    init_in_fp32=True,
)

__init__

__init__(
    vllm_config: VllmConfig, *, prefix: str = ""
) -> None
Source code in vllm/model_executor/models/voxtral.py
def __init__(
    self,
    vllm_config: VllmConfig,
    *,
    prefix: str = "",
) -> None:
    super().__init__()
    self.config = cast(WhisperConfig, vllm_config.model_config.hf_config)
    self.dtype: torch.dtype = vllm_config.model_config.dtype
    self.whisper_encoder = WhisperEncoder(vllm_config=vllm_config,
                                          prefix=maybe_prefix(
                                              prefix, "whisper_encoder"),
                                          is_standalone_encoder=True,
                                          init_in_fp32=True)
    mel_filters = mel_filter_bank(
        num_frequency_bins=1 + self.config.window_size // 2,
        num_mel_bins=self.config.num_mel_bins,
        min_frequency=0.0,
        max_frequency=8000.0,
        sampling_rate=self.config.sampling_rate,
    )
    self.mel_filters = torch.tensor(mel_filters, dtype=torch.float32)

compute_whisper_melspec

compute_whisper_melspec(audio_waveforms: Tensor) -> Tensor
Source code in vllm/model_executor/models/voxtral.py
def compute_whisper_melspec(
    self,
    audio_waveforms: torch.Tensor,
) -> torch.Tensor:
    input_dtype = audio_waveforms.dtype
    window = torch.hann_window(self.config.window_size).to(
        audio_waveforms.device)
    stft = torch.stft(
        audio_waveforms,
        self.config.window_size,
        self.config.hop_length,
        window=window,
        return_complex=True,
    )
    magnitudes = stft[..., :-1].abs()**2
    mel_spec = self.mel_filters.T @ magnitudes
    log_spec = torch.clamp(mel_spec, min=1e-10).log10()
    log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
    log_spec = (log_spec + 4.0) / 4.0
    return log_spec.to(input_dtype)

forward

forward(
    input_features: Union[Tensor, list[Tensor]],
) -> list[Tensor]
Source code in vllm/model_executor/models/voxtral.py
def forward(
    self, input_features: Union[torch.Tensor, list[torch.Tensor]]
) -> list[torch.Tensor]:
    if not isinstance(input_features, list):
        input_features = [input_features]

    # Split long inputs into chunks
    input_embeds, chunks_per_example = (
        self.prepare_inputs_for_conv(input_features))

    # [total_num_chunks, ceil(chunk_size / downsample_factor), hidden_size]
    out = self.whisper_encoder([input_embeds])

    # Re-concatenate the chunks
    chunk_idx = 0
    results = []
    for n_chunks in chunks_per_example:
        result = out[chunk_idx:chunk_idx + n_chunks].flatten(0, 1)
        results.append(result)
        chunk_idx += n_chunks

    return results

load_weight

load_weight(weight: tuple[str, Tensor]) -> str
Source code in vllm/model_executor/models/voxtral.py
def load_weight(self, weight: tuple[str, torch.Tensor]) -> str:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        ("qkv_proj", "q_proj", "q"),
        ("qkv_proj", "k_proj", "k"),
        ("qkv_proj", "v_proj", "v"),
    ]
    params_dict = dict(self.named_parameters())

    name, loaded_weight = weight
    for pattern, repl in self.mistral_remapping:
        if re.fullmatch(pattern, name):
            name = re.sub(pattern, repl, name)

    for (param_name, weight_name, shard_id) in stacked_params_mapping:
        if weight_name not in name:
            continue
        name = name.replace(weight_name, param_name)

        param = params_dict[name]
        weight_loader = param.weight_loader
        weight_loader(param, loaded_weight, shard_id)
        break
    else:
        param = params_dict[name]
        weight_loader = getattr(param, "weight_loader",
                                default_weight_loader)
        weight_loader(param, loaded_weight)

    return name

prepare_inputs_for_conv

prepare_inputs_for_conv(
    audio_waveforms: list[Tensor],
) -> tuple[Tensor, list[int]]
Source code in vllm/model_executor/models/voxtral.py
def prepare_inputs_for_conv(
    self,
    audio_waveforms: list[torch.Tensor],
) -> tuple[torch.Tensor, list[int]]:
    assert isinstance(audio_waveforms, list)
    # list[num_mel_bins, seq_len]
    input_features = [
        self.compute_whisper_melspec(audio).to(self.dtype)
        for audio in audio_waveforms
    ]

    chunked_features: list[torch.Tensor] = []
    chunks_per_example: list[int] = []
    for feature in input_features:
        chunks = feature.split(self.chunk_size, dim=-1)
        chunked_features += chunks
        chunks_per_example.append(len(chunks))

    # [total_num_chunks, num_mel_bins, chunk_size]
    return torch.stack(chunked_features), chunks_per_example

VoxtralForConditionalGeneration

Bases: Module, SupportsMultiModal, SupportsPP, SupportsTranscription

Source code in vllm/model_executor/models/voxtral.py
@MULTIMODAL_REGISTRY.register_processor(VoxtralMultiModalProcessor,
                                        info=VoxtralProcessingInfo,
                                        dummy_inputs=VoxtralDummyInputsBuilder)
class VoxtralForConditionalGeneration(nn.Module, SupportsMultiModal,
                                      SupportsPP, SupportsTranscription):
    supported_languages = ISO639_1_SUPPORTED_LANGS

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        self.tokenizer = cached_tokenizer_from_config(vllm_config.model_config)

        config = vllm_config.model_config.hf_config
        self.config = config
        self.downsample_factor = self.config.audio_config.downsample_factor

        self.language_model = init_vllm_registered_model(
            vllm_config=vllm_config,
            hf_config=config.text_config,
            prefix=maybe_prefix(prefix, "language_model"),
        )
        self.whisper_encoder = VoxtralEncoderModel(
            vllm_config.with_hf_config(config.audio_config),
            prefix=maybe_prefix(prefix, "whisper_encoder"),
        )
        self.audio_language_adapter = AudioLanguageAdapter(
            hidden_size=config.audio_config.d_model * self.downsample_factor,
            dim=config.text_config.hidden_size,
        )

    def get_language_model(self) -> torch.nn.Module:
        return self.language_model

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        **kwargs: object,
    ) -> Union[torch.Tensor, IntermediateTensors]:
        if intermediate_tensors is not None:
            inputs_embeds = None

        # NOTE: In v1, inputs_embeds is always generated at model runner, this
        # condition is for v0 compatibility.
        elif inputs_embeds is None:
            audio_embeddings = self.get_multimodal_embeddings(**kwargs)
            inputs_embeds = self.get_input_embeddings(input_ids,
                                                      audio_embeddings)
            input_ids = None

        hidden_states = self.language_model.model(input_ids,
                                                  positions,
                                                  intermediate_tensors,
                                                  inputs_embeds=inputs_embeds)

        return hidden_states

    def get_multimodal_embeddings(
        self, **kwargs
    ) -> Union[list[torch.Tensor], torch.Tensor, tuple[torch.Tensor, ...],
               None]:
        audio_inputs = self._parse_and_validate_audio_arrays(**kwargs)
        if audio_inputs is None:
            return None

        audio_embeddings = self.whisper_encoder(audio_inputs)

        for i, audio_embedding in enumerate(audio_embeddings):
            seq_len, dim = audio_embedding.shape
            # Pad such that seq_len is divisible by downsample_factor
            target_seq_len = self.downsample_factor * math.ceil(
                seq_len / self.downsample_factor)
            audio_embedding = torch.nn.functional.pad(
                audio_embedding,
                (0, 0, 0, target_seq_len - seq_len),
            )
            audio_embeddings[i] = audio_embedding.reshape(
                target_seq_len // self.downsample_factor,
                dim * self.downsample_factor)

        # Concat, project and resplit
        audio_embeddings_packed = torch.cat(audio_embeddings, dim=0)
        audio_embeddings_packed = self.audio_language_adapter(
            audio_embeddings_packed)
        audio_embeddings = torch.split(audio_embeddings_packed,
                                       [a.shape[0] for a in audio_embeddings],
                                       dim=0)

        return audio_embeddings

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
    ) -> torch.Tensor:
        audio_encoder = self.tokenizer.instruct.audio_encoder
        audio_tok_id = audio_encoder.audio_token

        inputs_embeds = self.language_model.get_input_embeddings(input_ids)
        if multimodal_embeddings is not None:
            inputs_embeds = merge_multimodal_embeddings(
                input_ids, inputs_embeds, multimodal_embeddings, audio_tok_id)
        return inputs_embeds

    def _parse_and_validate_audio_arrays(
            self, **kwargs: object) -> Union[list[torch.Tensor], None]:
        audio_arrays = kwargs.pop("audio_arrays", None)
        if audio_arrays is None:
            return None

        if not isinstance(audio_arrays, (torch.Tensor, list)):
            raise ValueError("Incorrect type of audio_arrays. "
                             f"Got type: {type(audio_arrays)}")

        audio_arrays = flatten_bn(audio_arrays)
        if isinstance(audio_arrays, torch.Tensor):
            audio_arrays = list(audio_arrays.unbind(0))
        return audio_arrays

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[torch.Tensor]:
        return self.language_model.compute_logits(hidden_states,
                                                  sampling_metadata)

    @classmethod
    def get_speech_to_text_config(cls, model_config: ModelConfig,
                                  task_type: str) -> SpeechToTextConfig:
        tokenizer = cached_tokenizer_from_config(model_config)
        audio_config = tokenizer.instruct.audio_encoder.audio_config
        max_audio_clip_s = audio_config.chunk_length_s
        sample_rate = audio_config.sampling_rate
        return SpeechToTextConfig(
            max_audio_clip_s=max_audio_clip_s,
            sample_rate=sample_rate,
            # mistral_common and whisper encoder take care of chunking
            min_energy_split_window_size=None,
        )

    @classmethod
    # for speech-to-text transcription
    def get_generation_prompt(cls, audio: np.ndarray,
                              model_config: ModelConfig,
                              stt_config: SpeechToTextConfig,
                              language: Optional[str], task_type: str,
                              request_prompt: str) -> PromptType:
        tokenizer = cached_tokenizer_from_config(model_config)
        audio = Audio(audio, int(stt_config.sample_rate),
                      format="wav")  # lossless
        req = TranscriptionRequest(model=model_config.model,
                                   audio=RawAudio.from_audio(audio),
                                   language=language)

        tokenized = tokenizer.instruct.encode_transcription(req)
        audio = (tokenized.audios[0].audio_array, stt_config.sample_rate)
        prompts_dict = {"multi_modal_data": {"audio": audio}}
        prompts_dict["prompt_token_ids"] = tokenized.tokens
        return cast(PromptType, prompts_dict)

    @classmethod
    def get_num_audio_tokens(cls, audio_duration_s: float,
                             stt_config: SpeechToTextConfig,
                             model_config: ModelConfig) -> Optional[int]:
        """
        Map from audio duration to number of audio tokens produced by the ASR 
        model, without running a forward pass.
        This is used for estimating the amount of processing for this audio.
        """
        tokenizer = cached_tokenizer_from_config(model_config)
        adapter = VoxtralProcessorAdapter(tokenizer)
        return adapter.get_num_audio_tokens(
            int(audio_duration_s * stt_config.sample_rate))

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        # fmt: off
        remapping_rules = [
            (r"mm_whisper_embeddings\.(.*)", r"\1"),
            (r"audio_language_projection\.(.*)", r"audio_language_adapter.\1"),
            (r"audio_language_adapter\.0\.weight", r"audio_language_adapter.w_in.weight"),  # noqa: E501
            (r"audio_language_adapter\.2\.weight", r"audio_language_adapter.w_out.weight"),  # noqa: E501
        ]
        # fmt: on

        audio_params = dict(
            nn.ModuleDict({
                "audio_language_adapter":
                self.audio_language_adapter,
            }).named_parameters())

        loaded_weights = set()

        def llm_weights_generator():
            nonlocal loaded_weights
            for name, w in weights:
                is_encoder = (
                    name.startswith("mm_whisper_embeddings") and
                    not name.startswith("mm_whisper_embeddings.tok_embeddings")
                    and not name.startswith(
                        "mm_whisper_embeddings.audio_language_projection"))

                for pattern, repl in remapping_rules:
                    if re.fullmatch(pattern, name):
                        name = re.sub(pattern, repl, name)

                if is_encoder:
                    name = self.whisper_encoder.load_weight((name, w))
                    loaded_weights.add(f"whisper_encoder.{name}")
                    continue

                if name in audio_params:
                    param = audio_params[name]
                    with torch.no_grad():
                        default_weight_loader(param, w)
                    loaded_weights.add(name)
                else:
                    yield (name, w)

        for name in self.language_model.load_weights(llm_weights_generator()):
            loaded_weights.add(f"language_model.{name}")

        # potentially manually add position embeddings
        sin_key = "whisper_encoder.whisper_encoder.embed_positions.weight"
        if sin_key not in loaded_weights:
            # make sure we don't hit an error here
            loaded_weights.add(sin_key)

        return loaded_weights

audio_language_adapter instance-attribute

audio_language_adapter = AudioLanguageAdapter(
    hidden_size=d_model * downsample_factor, dim=hidden_size
)

config instance-attribute

config = config

downsample_factor instance-attribute

downsample_factor = downsample_factor

language_model instance-attribute

language_model = init_vllm_registered_model(
    vllm_config=vllm_config,
    hf_config=text_config,
    prefix=maybe_prefix(prefix, "language_model"),
)

supported_languages class-attribute instance-attribute

supported_languages = ISO639_1_SUPPORTED_LANGS

tokenizer instance-attribute

tokenizer = cached_tokenizer_from_config(model_config)

whisper_encoder instance-attribute

whisper_encoder = VoxtralEncoderModel(
    with_hf_config(audio_config),
    prefix=maybe_prefix(prefix, "whisper_encoder"),
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/voxtral.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    self.tokenizer = cached_tokenizer_from_config(vllm_config.model_config)

    config = vllm_config.model_config.hf_config
    self.config = config
    self.downsample_factor = self.config.audio_config.downsample_factor

    self.language_model = init_vllm_registered_model(
        vllm_config=vllm_config,
        hf_config=config.text_config,
        prefix=maybe_prefix(prefix, "language_model"),
    )
    self.whisper_encoder = VoxtralEncoderModel(
        vllm_config.with_hf_config(config.audio_config),
        prefix=maybe_prefix(prefix, "whisper_encoder"),
    )
    self.audio_language_adapter = AudioLanguageAdapter(
        hidden_size=config.audio_config.d_model * self.downsample_factor,
        dim=config.text_config.hidden_size,
    )

_parse_and_validate_audio_arrays

_parse_and_validate_audio_arrays(
    **kwargs: object,
) -> Union[list[Tensor], None]
Source code in vllm/model_executor/models/voxtral.py
def _parse_and_validate_audio_arrays(
        self, **kwargs: object) -> Union[list[torch.Tensor], None]:
    audio_arrays = kwargs.pop("audio_arrays", None)
    if audio_arrays is None:
        return None

    if not isinstance(audio_arrays, (torch.Tensor, list)):
        raise ValueError("Incorrect type of audio_arrays. "
                         f"Got type: {type(audio_arrays)}")

    audio_arrays = flatten_bn(audio_arrays)
    if isinstance(audio_arrays, torch.Tensor):
        audio_arrays = list(audio_arrays.unbind(0))
    return audio_arrays

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/voxtral.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
    return self.language_model.compute_logits(hidden_states,
                                              sampling_metadata)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs: object,
) -> Union[Tensor, IntermediateTensors]
Source code in vllm/model_executor/models/voxtral.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    **kwargs: object,
) -> Union[torch.Tensor, IntermediateTensors]:
    if intermediate_tensors is not None:
        inputs_embeds = None

    # NOTE: In v1, inputs_embeds is always generated at model runner, this
    # condition is for v0 compatibility.
    elif inputs_embeds is None:
        audio_embeddings = self.get_multimodal_embeddings(**kwargs)
        inputs_embeds = self.get_input_embeddings(input_ids,
                                                  audio_embeddings)
        input_ids = None

    hidden_states = self.language_model.model(input_ids,
                                              positions,
                                              intermediate_tensors,
                                              inputs_embeds=inputs_embeds)

    return hidden_states

get_generation_prompt classmethod

get_generation_prompt(
    audio: ndarray,
    model_config: ModelConfig,
    stt_config: SpeechToTextConfig,
    language: Optional[str],
    task_type: str,
    request_prompt: str,
) -> PromptType
Source code in vllm/model_executor/models/voxtral.py
@classmethod
# for speech-to-text transcription
def get_generation_prompt(cls, audio: np.ndarray,
                          model_config: ModelConfig,
                          stt_config: SpeechToTextConfig,
                          language: Optional[str], task_type: str,
                          request_prompt: str) -> PromptType:
    tokenizer = cached_tokenizer_from_config(model_config)
    audio = Audio(audio, int(stt_config.sample_rate),
                  format="wav")  # lossless
    req = TranscriptionRequest(model=model_config.model,
                               audio=RawAudio.from_audio(audio),
                               language=language)

    tokenized = tokenizer.instruct.encode_transcription(req)
    audio = (tokenized.audios[0].audio_array, stt_config.sample_rate)
    prompts_dict = {"multi_modal_data": {"audio": audio}}
    prompts_dict["prompt_token_ids"] = tokenized.tokens
    return cast(PromptType, prompts_dict)

get_input_embeddings

get_input_embeddings(
    input_ids: Tensor,
    multimodal_embeddings: Optional[
        MultiModalEmbeddings
    ] = None,
) -> Tensor
Source code in vllm/model_executor/models/voxtral.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
) -> torch.Tensor:
    audio_encoder = self.tokenizer.instruct.audio_encoder
    audio_tok_id = audio_encoder.audio_token

    inputs_embeds = self.language_model.get_input_embeddings(input_ids)
    if multimodal_embeddings is not None:
        inputs_embeds = merge_multimodal_embeddings(
            input_ids, inputs_embeds, multimodal_embeddings, audio_tok_id)
    return inputs_embeds

get_language_model

get_language_model() -> Module
Source code in vllm/model_executor/models/voxtral.py
def get_language_model(self) -> torch.nn.Module:
    return self.language_model

get_multimodal_embeddings

get_multimodal_embeddings(
    **kwargs,
) -> Union[list[Tensor], Tensor, tuple[Tensor, ...], None]
Source code in vllm/model_executor/models/voxtral.py
def get_multimodal_embeddings(
    self, **kwargs
) -> Union[list[torch.Tensor], torch.Tensor, tuple[torch.Tensor, ...],
           None]:
    audio_inputs = self._parse_and_validate_audio_arrays(**kwargs)
    if audio_inputs is None:
        return None

    audio_embeddings = self.whisper_encoder(audio_inputs)

    for i, audio_embedding in enumerate(audio_embeddings):
        seq_len, dim = audio_embedding.shape
        # Pad such that seq_len is divisible by downsample_factor
        target_seq_len = self.downsample_factor * math.ceil(
            seq_len / self.downsample_factor)
        audio_embedding = torch.nn.functional.pad(
            audio_embedding,
            (0, 0, 0, target_seq_len - seq_len),
        )
        audio_embeddings[i] = audio_embedding.reshape(
            target_seq_len // self.downsample_factor,
            dim * self.downsample_factor)

    # Concat, project and resplit
    audio_embeddings_packed = torch.cat(audio_embeddings, dim=0)
    audio_embeddings_packed = self.audio_language_adapter(
        audio_embeddings_packed)
    audio_embeddings = torch.split(audio_embeddings_packed,
                                   [a.shape[0] for a in audio_embeddings],
                                   dim=0)

    return audio_embeddings

get_num_audio_tokens classmethod

get_num_audio_tokens(
    audio_duration_s: float,
    stt_config: SpeechToTextConfig,
    model_config: ModelConfig,
) -> Optional[int]

Map from audio duration to number of audio tokens produced by the ASR model, without running a forward pass. This is used for estimating the amount of processing for this audio.

Source code in vllm/model_executor/models/voxtral.py
@classmethod
def get_num_audio_tokens(cls, audio_duration_s: float,
                         stt_config: SpeechToTextConfig,
                         model_config: ModelConfig) -> Optional[int]:
    """
    Map from audio duration to number of audio tokens produced by the ASR 
    model, without running a forward pass.
    This is used for estimating the amount of processing for this audio.
    """
    tokenizer = cached_tokenizer_from_config(model_config)
    adapter = VoxtralProcessorAdapter(tokenizer)
    return adapter.get_num_audio_tokens(
        int(audio_duration_s * stt_config.sample_rate))

get_speech_to_text_config classmethod

get_speech_to_text_config(
    model_config: ModelConfig, task_type: str
) -> SpeechToTextConfig
Source code in vllm/model_executor/models/voxtral.py
@classmethod
def get_speech_to_text_config(cls, model_config: ModelConfig,
                              task_type: str) -> SpeechToTextConfig:
    tokenizer = cached_tokenizer_from_config(model_config)
    audio_config = tokenizer.instruct.audio_encoder.audio_config
    max_audio_clip_s = audio_config.chunk_length_s
    sample_rate = audio_config.sampling_rate
    return SpeechToTextConfig(
        max_audio_clip_s=max_audio_clip_s,
        sample_rate=sample_rate,
        # mistral_common and whisper encoder take care of chunking
        min_energy_split_window_size=None,
    )

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/voxtral.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    # fmt: off
    remapping_rules = [
        (r"mm_whisper_embeddings\.(.*)", r"\1"),
        (r"audio_language_projection\.(.*)", r"audio_language_adapter.\1"),
        (r"audio_language_adapter\.0\.weight", r"audio_language_adapter.w_in.weight"),  # noqa: E501
        (r"audio_language_adapter\.2\.weight", r"audio_language_adapter.w_out.weight"),  # noqa: E501
    ]
    # fmt: on

    audio_params = dict(
        nn.ModuleDict({
            "audio_language_adapter":
            self.audio_language_adapter,
        }).named_parameters())

    loaded_weights = set()

    def llm_weights_generator():
        nonlocal loaded_weights
        for name, w in weights:
            is_encoder = (
                name.startswith("mm_whisper_embeddings") and
                not name.startswith("mm_whisper_embeddings.tok_embeddings")
                and not name.startswith(
                    "mm_whisper_embeddings.audio_language_projection"))

            for pattern, repl in remapping_rules:
                if re.fullmatch(pattern, name):
                    name = re.sub(pattern, repl, name)

            if is_encoder:
                name = self.whisper_encoder.load_weight((name, w))
                loaded_weights.add(f"whisper_encoder.{name}")
                continue

            if name in audio_params:
                param = audio_params[name]
                with torch.no_grad():
                    default_weight_loader(param, w)
                loaded_weights.add(name)
            else:
                yield (name, w)

    for name in self.language_model.load_weights(llm_weights_generator()):
        loaded_weights.add(f"language_model.{name}")

    # potentially manually add position embeddings
    sin_key = "whisper_encoder.whisper_encoder.embed_positions.weight"
    if sin_key not in loaded_weights:
        # make sure we don't hit an error here
        loaded_weights.add(sin_key)

    return loaded_weights

VoxtralMultiModalProcessor

Bases: BaseMultiModalProcessor[VoxtralProcessingInfo]

Source code in vllm/model_executor/models/voxtral.py
class VoxtralMultiModalProcessor(BaseMultiModalProcessor[VoxtralProcessingInfo]
                                 ):

    def _get_mm_fields_config(
        self,
        hf_inputs: Mapping[str, NestedTensors],
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        return dict(audio_arrays=MultiModalFieldConfig.batched("audio"))

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, object],
        out_mm_kwargs: MultiModalKwargsItems,
    ) -> Sequence[PromptUpdate]:
        processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)

        audio_id = processor.audio_token_id

        def get_replacement(item_idx: int):
            audios = mm_items.get_items("audio", AudioProcessorItems)
            audio_len = audios.get_audio_length(item_idx)

            nb_audio_tokens = processor.get_num_audio_tokens(audio_len)

            return [audio_id] * nb_audio_tokens

        return [
            PromptReplacement(
                modality="audio",
                target="",  # Never match the prompt (see below note)
                replacement=get_replacement,
            ),
        ]

    def _cached_apply_hf_processor(
        self,
        prompt: Union[str, list[int]],
        mm_data_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, object],
        tokenization_kwargs: Mapping[str, object],
    ) -> tuple[list[int], MultiModalProcessingInfo, bool]:
        prompt_ids, mm_info, _ = super()._cached_apply_hf_processor(
            prompt=prompt,
            mm_data_items=mm_data_items,
            hf_processor_mm_kwargs=hf_processor_mm_kwargs,
            tokenization_kwargs=tokenization_kwargs,
        )

        # NOTE: The tokens are already inserted by the chat template
        return prompt_ids, mm_info, True

    def _get_data_parser(self) -> MultiModalDataParser:
        sampling_rate = self.info.get_hf_processor().sampling_rate
        return MultiModalDataParser(target_sr=sampling_rate)

_cached_apply_hf_processor

_cached_apply_hf_processor(
    prompt: Union[str, list[int]],
    mm_data_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    tokenization_kwargs: Mapping[str, object],
) -> tuple[list[int], MultiModalProcessingInfo, bool]
Source code in vllm/model_executor/models/voxtral.py
def _cached_apply_hf_processor(
    self,
    prompt: Union[str, list[int]],
    mm_data_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    tokenization_kwargs: Mapping[str, object],
) -> tuple[list[int], MultiModalProcessingInfo, bool]:
    prompt_ids, mm_info, _ = super()._cached_apply_hf_processor(
        prompt=prompt,
        mm_data_items=mm_data_items,
        hf_processor_mm_kwargs=hf_processor_mm_kwargs,
        tokenization_kwargs=tokenization_kwargs,
    )

    # NOTE: The tokens are already inserted by the chat template
    return prompt_ids, mm_info, True

_get_data_parser

_get_data_parser() -> MultiModalDataParser
Source code in vllm/model_executor/models/voxtral.py
def _get_data_parser(self) -> MultiModalDataParser:
    sampling_rate = self.info.get_hf_processor().sampling_rate
    return MultiModalDataParser(target_sr=sampling_rate)

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: Mapping[str, NestedTensors],
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]
Source code in vllm/model_executor/models/voxtral.py
def _get_mm_fields_config(
    self,
    hf_inputs: Mapping[str, NestedTensors],
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    return dict(audio_arrays=MultiModalFieldConfig.batched("audio"))

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/voxtral.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
    processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)

    audio_id = processor.audio_token_id

    def get_replacement(item_idx: int):
        audios = mm_items.get_items("audio", AudioProcessorItems)
        audio_len = audios.get_audio_length(item_idx)

        nb_audio_tokens = processor.get_num_audio_tokens(audio_len)

        return [audio_id] * nb_audio_tokens

    return [
        PromptReplacement(
            modality="audio",
            target="",  # Never match the prompt (see below note)
            replacement=get_replacement,
        ),
    ]

VoxtralProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/voxtral.py
class VoxtralProcessingInfo(BaseProcessingInfo):

    def get_tokenizer(self) -> MistralTokenizer:
        tokenizer = cached_tokenizer_from_config(self.ctx.model_config)
        if not isinstance(tokenizer, MistralTokenizer):
            raise ValueError("This model requires `--tokenizer-mode mistral`")

        return tokenizer

    def get_hf_processor(self) -> VoxtralProcessorAdapter:
        return VoxtralProcessorAdapter(self.get_tokenizer())

    def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
        return {"audio": 5}  # Performance tends to degrade after 5

    def get_mm_max_tokens_per_item(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> Mapping[str, int]:
        return {"audio": self.get_max_audio_tokens()}

    def get_max_audio_tokens(self) -> int:
        return self.ctx.model_config.max_model_len

    def get_max_audio_array_len(self) -> int:
        processor = self.get_hf_processor()
        return self.get_max_audio_tokens() * int(
            processor.sampling_rate // processor.frame_rate)

get_hf_processor

get_hf_processor() -> VoxtralProcessorAdapter
Source code in vllm/model_executor/models/voxtral.py
def get_hf_processor(self) -> VoxtralProcessorAdapter:
    return VoxtralProcessorAdapter(self.get_tokenizer())

get_max_audio_array_len

get_max_audio_array_len() -> int
Source code in vllm/model_executor/models/voxtral.py
def get_max_audio_array_len(self) -> int:
    processor = self.get_hf_processor()
    return self.get_max_audio_tokens() * int(
        processor.sampling_rate // processor.frame_rate)

get_max_audio_tokens

get_max_audio_tokens() -> int
Source code in vllm/model_executor/models/voxtral.py
def get_max_audio_tokens(self) -> int:
    return self.ctx.model_config.max_model_len

get_mm_max_tokens_per_item

get_mm_max_tokens_per_item(
    seq_len: int, mm_counts: Mapping[str, int]
) -> Mapping[str, int]
Source code in vllm/model_executor/models/voxtral.py
def get_mm_max_tokens_per_item(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> Mapping[str, int]:
    return {"audio": self.get_max_audio_tokens()}

get_supported_mm_limits

get_supported_mm_limits() -> Mapping[str, Optional[int]]
Source code in vllm/model_executor/models/voxtral.py
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
    return {"audio": 5}  # Performance tends to degrade after 5

get_tokenizer

get_tokenizer() -> MistralTokenizer
Source code in vllm/model_executor/models/voxtral.py
def get_tokenizer(self) -> MistralTokenizer:
    tokenizer = cached_tokenizer_from_config(self.ctx.model_config)
    if not isinstance(tokenizer, MistralTokenizer):
        raise ValueError("This model requires `--tokenizer-mode mistral`")

    return tokenizer

VoxtralProcessorAdapter

Provide a HF-compatible interface for :class:mistral_common.tokens.tokenizers.multimodal.AudioEncoder.

Source code in vllm/model_executor/models/voxtral.py
class VoxtralProcessorAdapter:
    """
    Provide a HF-compatible interface for
    :class:`mistral_common.tokens.tokenizers.multimodal.AudioEncoder`.
    """

    def __init__(self, tokenizer: MistralTokenizer) -> None:
        super().__init__()
        self.tokenizer = tokenizer

    @cached_property
    def _audio_processor(self) -> AudioEncoder:
        audio_encoder = self.tokenizer.instruct.audio_encoder
        assert isinstance(audio_encoder, AudioEncoder)
        return audio_encoder

    @cached_property
    def audio_token_id(self) -> int:
        return self._audio_processor.special_ids.audio

    @cached_property
    def begin_audio_token_id(self) -> int:
        return self._audio_processor.special_ids.begin_audio

    # @cached_property
    # def begin_transcript_token_id(self) -> int:
    #     return self._audio_processor.special_ids.begin_transcript

    # @cached_property
    # def end_transcript_token_id(self) -> int:
    #     return self._audio_processor.special_ids.end_transcript

    @cached_property
    def sampling_rate(self) -> int:
        return self._audio_processor.audio_config.sampling_rate

    @cached_property
    def frame_rate(self) -> float:
        return self._audio_processor.audio_config.frame_rate

    def get_num_audio_tokens(
        self,
        audio_length: int,
    ) -> int:
        pad_audio_length = self._audio_processor.next_multiple_of_chunk_frames(
            audio_length, self.sampling_rate)
        return ceil(pad_audio_length / (self.sampling_rate // self.frame_rate))

    def __call__(
        self,
        text: Optional[Union[TextInput, list[TextInput]]] = None,
        audios: Optional[Union[np.ndarray, list[np.ndarray]]] = None,
        return_tensors: Optional[Union[str, TensorType]] = None,
        **kwargs,
    ) -> Mapping[str, NestedTensors]:
        if text is None:
            text = []
        if not isinstance(text, list):
            text = [text]
        if audios is None:
            audios = []
        if not isinstance(audios, list):
            audios = [audios]

        if not audios:
            input_ids = self.tokenizer(text).input_ids
            return {"input_ids": torch.tensor(input_ids)}

        # Allow dummy text, which is used for profiling as well as token inputs
        if any(len(t) > 0 for t in text):
            raise ValueError(
                "You've passed text inputs instead of token inputs. "
                "Make sure to process your input via `mistral_common`'s "
                "tokenizer or pass a chat completion request. "
                "For more info, see: "
                "https://github.com/vllm-project/vllm/issues/8411.")

        audios_tokens = list[torch.Tensor]()
        audios_processed = list[torch.Tensor]()
        for audio in audios:
            assert isinstance(audio, np.ndarray)
            assert audio.ndim == 1

            # pad if necessary
            audio = self._audio_processor.pad(audio, self.sampling_rate)

            audio_tokens = [
                self.begin_audio_token_id
            ] + [self.audio_token_id] * self.get_num_audio_tokens(len(audio))

            audios_tokens.append(torch.tensor(audio_tokens))
            audios_processed.append(torch.tensor(audio))

        return {
            "input_ids": torch.cat(audios_tokens)[None].expand(len(text), -1),
            "audio_arrays": audios_processed,
        }

_audio_processor cached property

_audio_processor: AudioEncoder

audio_token_id cached property

audio_token_id: int

begin_audio_token_id cached property

begin_audio_token_id: int

frame_rate cached property

frame_rate: float

sampling_rate cached property

sampling_rate: int

tokenizer instance-attribute

tokenizer = tokenizer

__call__

__call__(
    text: Optional[
        Union[TextInput, list[TextInput]]
    ] = None,
    audios: Optional[Union[ndarray, list[ndarray]]] = None,
    return_tensors: Optional[Union[str, TensorType]] = None,
    **kwargs,
) -> Mapping[str, NestedTensors]
Source code in vllm/model_executor/models/voxtral.py
def __call__(
    self,
    text: Optional[Union[TextInput, list[TextInput]]] = None,
    audios: Optional[Union[np.ndarray, list[np.ndarray]]] = None,
    return_tensors: Optional[Union[str, TensorType]] = None,
    **kwargs,
) -> Mapping[str, NestedTensors]:
    if text is None:
        text = []
    if not isinstance(text, list):
        text = [text]
    if audios is None:
        audios = []
    if not isinstance(audios, list):
        audios = [audios]

    if not audios:
        input_ids = self.tokenizer(text).input_ids
        return {"input_ids": torch.tensor(input_ids)}

    # Allow dummy text, which is used for profiling as well as token inputs
    if any(len(t) > 0 for t in text):
        raise ValueError(
            "You've passed text inputs instead of token inputs. "
            "Make sure to process your input via `mistral_common`'s "
            "tokenizer or pass a chat completion request. "
            "For more info, see: "
            "https://github.com/vllm-project/vllm/issues/8411.")

    audios_tokens = list[torch.Tensor]()
    audios_processed = list[torch.Tensor]()
    for audio in audios:
        assert isinstance(audio, np.ndarray)
        assert audio.ndim == 1

        # pad if necessary
        audio = self._audio_processor.pad(audio, self.sampling_rate)

        audio_tokens = [
            self.begin_audio_token_id
        ] + [self.audio_token_id] * self.get_num_audio_tokens(len(audio))

        audios_tokens.append(torch.tensor(audio_tokens))
        audios_processed.append(torch.tensor(audio))

    return {
        "input_ids": torch.cat(audios_tokens)[None].expand(len(text), -1),
        "audio_arrays": audios_processed,
    }

__init__

__init__(tokenizer: MistralTokenizer) -> None
Source code in vllm/model_executor/models/voxtral.py
def __init__(self, tokenizer: MistralTokenizer) -> None:
    super().__init__()
    self.tokenizer = tokenizer

get_num_audio_tokens

get_num_audio_tokens(audio_length: int) -> int
Source code in vllm/model_executor/models/voxtral.py
def get_num_audio_tokens(
    self,
    audio_length: int,
) -> int:
    pad_audio_length = self._audio_processor.next_multiple_of_chunk_frames(
        audio_length, self.sampling_rate)
    return ceil(pad_audio_length / (self.sampling_rate // self.frame_rate))