Bases: DeepSeekR1ReasoningParser
Reasoning parser for Mistral models.
The Mistral models uses [THINK]...[/THINK] tokens to denote reasoning text. This parser extracts the reasoning content from the model output.
Source code in vllm/reasoning/mistral_reasoning_parser.py
| @ReasoningParserManager.register_module("mistral")
class MistralReasoningParser(DeepSeekR1ReasoningParser):
"""
Reasoning parser for Mistral models.
The Mistral models uses [THINK]...[/THINK] tokens to denote reasoning
text. This parser extracts the reasoning content from the model output.
"""
def __init__(self, tokenizer: MistralTokenizer):
if not isinstance(tokenizer, MistralTokenizer):
raise ValueError(
"The tokenizer must be an instance of MistralTokenizer.")
ReasoningParser.__init__(self, tokenizer)
if not self.model_tokenizer:
raise ValueError(
"The model tokenizer must be passed to the ReasoningParser "
"constructor during construction.")
from mistral_common.tokens.tokenizers.base import SpecialTokens
self.start_token = SpecialTokens.begin_think
self.end_token = SpecialTokens.end_think
self.start_token_id = tokenizer.tokenizer.get_control_token(
self.start_token)
self.end_token_id = tokenizer.tokenizer.get_control_token(
self.end_token)
if self.start_token_id is None or self.end_token_id is None:
raise RuntimeError(
"Mistral reasoning parser could not locate think start/end "
"tokens in the tokenizer!")
|
end_token instance-attribute
end_token_id instance-attribute
end_token_id = get_control_token(end_token)
start_token instance-attribute
start_token = begin_think
start_token_id instance-attribute
start_token_id = get_control_token(start_token)
__init__
Source code in vllm/reasoning/mistral_reasoning_parser.py
| def __init__(self, tokenizer: MistralTokenizer):
if not isinstance(tokenizer, MistralTokenizer):
raise ValueError(
"The tokenizer must be an instance of MistralTokenizer.")
ReasoningParser.__init__(self, tokenizer)
if not self.model_tokenizer:
raise ValueError(
"The model tokenizer must be passed to the ReasoningParser "
"constructor during construction.")
from mistral_common.tokens.tokenizers.base import SpecialTokens
self.start_token = SpecialTokens.begin_think
self.end_token = SpecialTokens.end_think
self.start_token_id = tokenizer.tokenizer.get_control_token(
self.start_token)
self.end_token_id = tokenizer.tokenizer.get_control_token(
self.end_token)
if self.start_token_id is None or self.end_token_id is None:
raise RuntimeError(
"Mistral reasoning parser could not locate think start/end "
"tokens in the tokenizer!")
|