Skip to content

vllm.entrypoints.openai.serving_chat

logger module-attribute

logger = init_logger(__name__)

OpenAIServingChat

Bases: OpenAIServing

Source code in vllm/entrypoints/openai/serving_chat.py
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
class OpenAIServingChat(OpenAIServing):

    def __init__(
        self,
        engine_client: EngineClient,
        model_config: ModelConfig,
        models: OpenAIServingModels,
        response_role: str,
        *,
        request_logger: Optional[RequestLogger],
        chat_template: Optional[str],
        chat_template_content_format: ChatTemplateContentFormatOption,
        return_tokens_as_token_ids: bool = False,
        reasoning_parser: str = "",
        enable_auto_tools: bool = False,
        exclude_tools_when_tool_choice_none: bool = False,
        tool_parser: Optional[str] = None,
        enable_prompt_tokens_details: bool = False,
        enable_force_include_usage: bool = False,
        enable_log_outputs: bool = False,
    ) -> None:
        super().__init__(engine_client=engine_client,
                         model_config=model_config,
                         models=models,
                         request_logger=request_logger,
                         return_tokens_as_token_ids=return_tokens_as_token_ids,
                         enable_force_include_usage=enable_force_include_usage)

        self.response_role = response_role
        self.chat_template = chat_template
        self.chat_template_content_format: Final = chat_template_content_format
        self.enable_log_outputs = enable_log_outputs

        # set up tool use
        self.enable_auto_tools: bool = enable_auto_tools
        if self.enable_auto_tools:
            logger.info(
                "\"auto\" tool choice has been enabled please note that while"
                " the parallel_tool_calls client option is preset for "
                "compatibility reasons, it will be ignored.")

        self.reasoning_parser: Optional[Callable[[AnyTokenizer],
                                                 ReasoningParser]] = None
        if reasoning_parser:
            try:
                self.reasoning_parser = (
                    ReasoningParserManager.get_reasoning_parser(
                        reasoning_parser))
                assert self.reasoning_parser is not None
            except Exception as e:
                raise TypeError(
                    f"{reasoning_parser=} has not been registered") from e
        self.tool_parser: Optional[Callable[[AnyTokenizer], ToolParser]] = None
        if self.enable_auto_tools:
            try:
                if (tool_parser == "pythonic" and
                        model_config.model.startswith("meta-llama/Llama-3.2")):
                    logger.warning(
                        "Llama3.2 models may struggle to emit valid pythonic"
                        " tool calls")
                self.tool_parser = ToolParserManager.get_tool_parser(
                    tool_parser)
            except Exception as e:
                raise TypeError("Error: --enable-auto-tool-choice requires "
                                f"tool_parser:'{tool_parser}' which has not "
                                "been registered") from e
        self.exclude_tools_when_tool_choice_none = (
            exclude_tools_when_tool_choice_none)

        self.enable_prompt_tokens_details = enable_prompt_tokens_details
        self.enable_force_include_usage = enable_force_include_usage
        self.default_sampling_params = (
            self.model_config.get_diff_sampling_param())
        if self.default_sampling_params:
            source = self.model_config.generation_config
            source = "model" if source == "auto" else source
            logger.info("Using default chat sampling params from %s: %s",
                        source, self.default_sampling_params)
        if self.model_config.hf_config.model_type == 'kimi_k2':
            self.tool_call_id_type = 'kimi_k2'
        else:
            self.tool_call_id_type = 'random'

        self.use_harmony = model_config.hf_config.model_type == "gpt_oss"
        if self.use_harmony:
            if "stop_token_ids" not in self.default_sampling_params:
                self.default_sampling_params["stop_token_ids"] = []
            self.default_sampling_params["stop_token_ids"].extend(
                get_stop_tokens_for_assistant_actions())

        # NOTE(woosuk): While OpenAI's chat completion API supports browsing
        # for some models, currently vLLM doesn't support it. Please use the
        # Responses API instead.
        self.supports_browsing = False
        self.browser_tool = None
        # NOTE(woosuk): Chat completion API does not support code interpreter.
        # Please use the Responses API instead.
        self.supports_code_interpreter = False
        self.python_tool = None

    async def create_chat_completion(
        self,
        request: ChatCompletionRequest,
        raw_request: Optional[Request] = None,
    ) -> Union[AsyncGenerator[str, None], ChatCompletionResponse,
               ErrorResponse]:
        """
        Chat Completion API similar to OpenAI's API.

        See https://platform.openai.com/docs/api-reference/chat/create
        for the API specification. This API mimics the OpenAI
        Chat Completion API.
        """
        error_check_ret = await self._check_model(request)
        if error_check_ret is not None:
            logger.error("Error with model %s", error_check_ret)
            return error_check_ret

        # If the engine is dead, raise the engine's DEAD_ERROR.
        # This is required for the streaming case, where we return a
        # success status before we actually start generating text :).
        if self.engine_client.errored:
            raise self.engine_client.dead_error

        try:
            lora_request = self._maybe_get_adapters(
                request, supports_default_mm_loras=True)

            model_name = self._get_model_name(request.model, lora_request)

            tokenizer = await self.engine_client.get_tokenizer(lora_request)

            tool_parser = self.tool_parser

            if isinstance(tokenizer, MistralTokenizer):
                # because of issues with pydantic we need to potentially
                # re-serialize the tool_calls field of the request
                # for more info: see comment in `maybe_serialize_tool_calls`
                maybe_serialize_tool_calls(request)
                truncate_tool_call_ids(request)
                validate_request_params(request)

            if (request.tool_choice == "auto" and
                    not (self.enable_auto_tools and tool_parser is not None)
                    and not isinstance(tokenizer, MistralTokenizer)
                    and not self.use_harmony):
                # for hf tokenizers, "auto" tools requires
                # --enable-auto-tool-choice and --tool-call-parser
                return self.create_error_response(
                    "\"auto\" tool choice requires "
                    "--enable-auto-tool-choice and --tool-call-parser to be set"
                )

            if (request.tools is None
                    or (request.tool_choice == "none"
                        and self.exclude_tools_when_tool_choice_none)):
                tool_dicts = None
            else:
                tool_dicts = [tool.model_dump() for tool in request.tools]

            if not self.use_harmony:
                # Common case.
                (
                    conversation,
                    request_prompts,
                    engine_prompts,
                ) = await self._preprocess_chat(
                    request,
                    tokenizer,
                    request.messages,
                    chat_template=request.chat_template or self.chat_template,
                    chat_template_content_format=self.
                    chat_template_content_format,
                    add_generation_prompt=request.add_generation_prompt,
                    continue_final_message=request.continue_final_message,
                    tool_dicts=tool_dicts,
                    documents=request.documents,
                    chat_template_kwargs=request.chat_template_kwargs,
                    tool_parser=tool_parser,
                    truncate_prompt_tokens=request.truncate_prompt_tokens,
                    add_special_tokens=request.add_special_tokens,
                )
            else:
                # For GPT-OSS.
                (
                    conversation,
                    request_prompts,
                    engine_prompts,
                ) = self._make_request_with_harmony(request)
        except (ValueError, TypeError, RuntimeError,
                jinja2.TemplateError) as e:
            logger.exception("Error in preprocessing prompt inputs")
            return self.create_error_response(f"{e} {e.__cause__}")

        request_id = "chatcmpl-" \
                     f"{self._base_request_id(raw_request, request.request_id)}"

        request_metadata = RequestResponseMetadata(request_id=request_id)
        if raw_request:
            raw_request.state.request_metadata = request_metadata

        # Schedule the request and get the result generator.
        generators: list[AsyncGenerator[RequestOutput, None]] = []
        try:
            for i, engine_prompt in enumerate(engine_prompts):
                sampling_params: Union[SamplingParams, BeamSearchParams]

                if self.default_sampling_params is None:
                    self.default_sampling_params = {}

                max_tokens = get_max_tokens(
                    max_model_len=self.max_model_len,
                    request=request,
                    input_length=len(engine_prompt["prompt_token_ids"]),
                    default_sampling_params=self.default_sampling_params)

                if request.use_beam_search:
                    sampling_params = request.to_beam_search_params(
                        max_tokens, self.default_sampling_params)
                else:
                    sampling_params = request.to_sampling_params(
                        max_tokens, self.model_config.logits_processor_pattern,
                        self.default_sampling_params)

                self._log_inputs(request_id,
                                 request_prompts[i],
                                 params=sampling_params,
                                 lora_request=lora_request)

                trace_headers = (None if raw_request is None else await
                                 self._get_trace_headers(raw_request.headers))

                if isinstance(sampling_params, BeamSearchParams):
                    generator = self.engine_client.beam_search(
                        prompt=engine_prompt,
                        request_id=request_id,
                        params=sampling_params,
                        lora_request=lora_request,
                    )
                else:
                    generator = self.engine_client.generate(
                        engine_prompt,
                        sampling_params,
                        request_id,
                        lora_request=lora_request,
                        trace_headers=trace_headers,
                        priority=request.priority,
                    )

                generators.append(generator)
        except ValueError as e:
            # TODO: Use a vllm-specific Validation Error
            return self.create_error_response(str(e))

        assert len(generators) == 1
        result_generator, = generators

        # Streaming response
        if request.stream:
            return self.chat_completion_stream_generator(
                request,
                result_generator,
                request_id,
                model_name,
                conversation,
                tokenizer,
                request_metadata,
                enable_force_include_usage=self.enable_force_include_usage)

        try:
            return await self.chat_completion_full_generator(
                request, result_generator, request_id, model_name,
                conversation, tokenizer, request_metadata)
        except ValueError as e:
            # TODO: Use a vllm-specific Validation Error
            return self.create_error_response(str(e))

    def get_chat_request_role(self, request: ChatCompletionRequest) -> str:
        if request.add_generation_prompt:
            return self.response_role
        return request.messages[-1]["role"]

    @staticmethod
    def _bracket_level(s: str, opening='{', closing='}') -> int:
        """
        Calculate the current level of nested brackets in a given string.
        """
        level = 0
        for char in s:
            if char == opening:
                level += 1
            elif char == closing:
                level -= 1
        return level

    @staticmethod
    def _filter_delta_text(delta_text: str,
                           previous_text: str) -> tuple[str, bool]:
        # remove last '},' of the tool definition stemming from the
        # "name"/"parameters" outer object or closing ']' of the tool list
        # count occurrences of opening and closing curly braces and
        # once level 0 is reached stop outputting text
        # if 0 is reached while parsing the delta_text we know the current
        # tool will finish in this current iteration
        bracket_level = OpenAIServingChat._bracket_level(previous_text)
        updated_delta, passed_zero = "", False
        for c in delta_text:
            if c == '{':
                bracket_level += 1
                passed_zero = bracket_level == 0
            elif c == '}':
                bracket_level -= 1
                passed_zero = bracket_level == 0

            if bracket_level != 0:
                updated_delta += c
            else:
                # if a comma is reached at level 0 we can stop
                if c == ',':
                    break
        return updated_delta, passed_zero

    def extract_tool_call_required_streaming(
        self,
        previous_text: str,
        current_text: Optional[str],
        delta_text: str,
        function_name_returned: bool,
        tool_call_idx: Optional[int] = None
    ) -> tuple[Optional[DeltaMessage], bool]:
        if current_text is None or current_text == "":
            # if the current text is empty, we cannot parse it
            return None, function_name_returned
        try:
            obj = partial_json_parser.loads(current_text)
        except partial_json_parser.core.exceptions.MalformedJSON:
            logger.debug('not enough tokens to parse into JSON yet')
            obj = None

        # check if the current text is a valid array
        # containing a partial tool calling object
        # if not repeat
        if obj is None or not isinstance(obj, list) or not len(obj) > 0:
            function_name_returned = False
            delta_message = None
        else:
            _, finishes_previous_tool = OpenAIServingChat._filter_delta_text(
                delta_text, previous_text)
            # take the last tool call from the generated list
            current_tool_call = obj[-1]

            # once parameters have been generated the name is complete as well
            if not finishes_previous_tool and ("name" not in current_tool_call
                                               or "parameters"
                                               not in current_tool_call):
                function_name_returned = False
                delta_message = None
            else:
                if not function_name_returned:
                    # get partly generated arguments from the latest tool call
                    param_match = re.search(r'.*"parameters":\s*(.*)',
                                            current_text)
                    arguments = param_match.group(1) if param_match else ""
                    arguments, _ = OpenAIServingChat._filter_delta_text(
                        arguments, previous_text)

                    # if this iteration finishes a previous tool call but a
                    # new incomplete tool is already generated, take the
                    # previous from the list
                    if (finishes_previous_tool
                            and "parameters" not in current_tool_call):
                        current_tool_call = obj[-2]

                    function_name_returned = True
                    tool_call_id = make_tool_call_id(
                        id_type=self.tool_call_id_type,
                        func_name=current_tool_call["name"],
                        idx=tool_call_idx)
                    delta_message = DeltaMessage(tool_calls=[
                        DeltaToolCall(id=tool_call_id,
                                      function=DeltaFunctionCall(
                                          name=current_tool_call["name"],
                                          arguments=arguments),
                                      index=len(obj) - 1,
                                      type="function")
                    ])

                else:
                    delta_text, _ = OpenAIServingChat._filter_delta_text(
                        delta_text, previous_text)

                    if delta_text != "":
                        delta_message = DeltaMessage(tool_calls=[
                            DeltaToolCall(
                                function=DeltaFunctionCall(
                                    # OpenAI API returns None
                                    # instead of name every time
                                    name=None,
                                    arguments=delta_text),
                                index=len(obj) - 1)
                        ])
                    else:
                        delta_message = None

        return delta_message, function_name_returned

    async def chat_completion_stream_generator(
        self,
        request: ChatCompletionRequest,
        result_generator: AsyncIterator[RequestOutput],
        request_id: str,
        model_name: str,
        conversation: list[ConversationMessage],
        tokenizer: AnyTokenizer,
        request_metadata: RequestResponseMetadata,
        enable_force_include_usage: bool,
    ) -> AsyncGenerator[str, None]:
        created_time = int(time.time())
        chunk_object_type: Final = "chat.completion.chunk"
        first_iteration = True

        # Send response for each token for each request.n (index)
        num_choices = 1 if request.n is None else request.n
        previous_num_tokens = [0] * num_choices
        finish_reason_sent = [False] * num_choices
        num_prompt_tokens = 0
        num_cached_tokens = None
        if self.use_harmony:
            harmony_parsers = [
                get_streamable_parser_for_assistant()
                for _ in range(num_choices)
            ]

        if isinstance(request.tool_choice, ChatCompletionNamedToolChoiceParam):
            tool_choice_function_name = request.tool_choice.function.name
        else:
            tool_choice_function_name = None

        # Determine whether tools are in use with "auto" tool choice
        tool_choice_auto = (
            not tool_choice_function_name
            and self._should_stream_with_auto_tool_parsing(request))

        all_previous_token_ids: Optional[list[list[int]]]
        function_name_returned = [False] * num_choices
        if self.tool_call_id_type == 'kimi_k2':
            history_tool_call_cnt = get_history_tool_calls_cnt(conversation)
        else:
            history_tool_call_cnt = 0

        # Always track previous_texts for comprehensive output logging
        previous_texts = [""] * num_choices

        # Only one of these will be used, thus previous_texts and
        # all_previous_token_ids will not be used twice in the same iteration.
        if tool_choice_auto or self.reasoning_parser:
            # These are only required in "auto" tool choice case
            all_previous_token_ids = [[]] * num_choices
            # For reasoning parser and tool call all enabled
            added_content_delta_arr = [False] * num_choices
            reasoning_end_arr = [False] * num_choices
        elif request.tool_choice == "required":
            all_previous_token_ids = None
        else:
            all_previous_token_ids = None

        try:
            if self.reasoning_parser:
                reasoning_parser = self.reasoning_parser(tokenizer)
        except RuntimeError as e:
            logger.exception("Error in reasoning parser creation.")
            data = self.create_streaming_error_response(str(e))
            yield f"data: {data}\n\n"
            yield "data: [DONE]\n\n"
            return
        # Prepare the tool parser if it's needed
        try:
            if tool_choice_auto and self.tool_parser:
                tool_parsers: list[Optional[ToolParser]] = [
                    self.tool_parser(tokenizer)
                ] * num_choices
            else:
                tool_parsers = [None] * num_choices
        except Exception as e:
            logger.exception("Error in tool parser creation.")
            data = self.create_streaming_error_response(str(e))
            yield f"data: {data}\n\n"
            yield "data: [DONE]\n\n"
            return

        stream_options = request.stream_options
        if stream_options:
            include_usage = stream_options.include_usage \
                            or enable_force_include_usage
            include_continuous_usage = include_usage and \
                                       stream_options.continuous_usage_stats
        else:
            include_usage, include_continuous_usage = False, False

        try:
            async for res in result_generator:
                if res.prompt_token_ids is not None:
                    num_prompt_tokens = len(res.prompt_token_ids)
                    if res.encoder_prompt_token_ids is not None:
                        num_prompt_tokens += len(res.encoder_prompt_token_ids)

                # We need to do it here, because if there are exceptions in
                # the result_generator, it needs to be sent as the FIRST
                # response (by the try...catch).
                if first_iteration:
                    num_cached_tokens = res.num_cached_tokens
                    # Send first response for each request.n (index) with
                    # the role
                    role = self.get_chat_request_role(request)

                    # NOTE num_choices defaults to 1 so this usually executes
                    # once per request
                    for i in range(num_choices):
                        choice_data = ChatCompletionResponseStreamChoice(
                            index=i,
                            delta=DeltaMessage(
                                role=role,
                                content="",
                            ),
                            logprobs=None,
                            finish_reason=None)

                        # return prompt_token_ids at the first chunk ever
                        chunk = ChatCompletionStreamResponse(
                            id=request_id,
                            object=chunk_object_type,
                            created=created_time,
                            choices=[choice_data],
                            model=model_name,
                            prompt_token_ids=(res.prompt_token_ids
                                              if request.return_token_ids else
                                              None))

                        # if continuous usage stats are requested, add it
                        if include_continuous_usage:
                            chunk.usage = UsageInfo(
                                prompt_tokens=num_prompt_tokens,
                                completion_tokens=0,
                                total_tokens=num_prompt_tokens)

                        data = chunk.model_dump_json(exclude_unset=True)
                        yield f"data: {data}\n\n"

                    # Send response to echo the input portion of the
                    # last message
                    if request.echo:
                        last_msg_content: Union[str, list[dict[str, str]]] = ""
                        if conversation and "content" in conversation[
                                -1] and conversation[-1].get("role") == role:
                            last_msg_content = conversation[-1]["content"] or ""

                        if last_msg_content:
                            for i in range(num_choices):
                                choice_data = (
                                    ChatCompletionResponseStreamChoice(
                                        index=i,
                                        delta=DeltaMessage(
                                            content=last_msg_content),
                                        logprobs=None,
                                        finish_reason=None))
                                chunk = ChatCompletionStreamResponse(
                                    id=request_id,
                                    object=chunk_object_type,
                                    created=created_time,
                                    choices=[choice_data],
                                    model=model_name)
                                if include_continuous_usage:
                                    chunk.usage = UsageInfo(
                                        prompt_tokens=num_prompt_tokens,
                                        completion_tokens=0,
                                        total_tokens=num_prompt_tokens)

                                data = chunk.model_dump_json(
                                    exclude_unset=True)
                                yield f"data: {data}\n\n"
                    first_iteration = False

                for output in res.outputs:
                    i = output.index
                    tool_parser = tool_parsers[i]

                    if finish_reason_sent[i]:
                        continue

                    if request.logprobs and request.top_logprobs is not None:
                        assert output.logprobs is not None, (
                            "Did not output logprobs")
                        logprobs = self._create_chat_logprobs(
                            token_ids=output.token_ids,
                            top_logprobs=output.logprobs,
                            tokenizer=tokenizer,
                            num_output_top_logprobs=request.top_logprobs,
                            return_as_token_id=request.
                            return_tokens_as_token_ids,
                        )
                    else:
                        logprobs = None

                    if self.use_harmony:
                        harmony_parser = harmony_parsers[i]
                        for token_id in output.token_ids:
                            harmony_parser.process(token_id)
                        is_reasoning = \
                            harmony_parser.current_channel == "analysis"
                        if not request.include_reasoning and is_reasoning:
                            # Skip the reasoning content.
                            continue
                        delta_text = harmony_parser.last_content_delta or ""
                    else:
                        delta_text = output.text

                    if not delta_text and not output.token_ids and \
                        not previous_num_tokens[i]:
                        # Chunked prefill case, don't return empty chunks
                        continue

                    delta_message: Optional[DeltaMessage]

                    # just update previous_texts and previous_token_ids
                    if ((tool_choice_auto or self.reasoning_parser)
                            and not self.use_harmony):
                        assert previous_texts is not None
                        assert all_previous_token_ids is not None
                        previous_text = previous_texts[i]
                        previous_token_ids = all_previous_token_ids[i]
                        current_text = previous_text + delta_text
                        # avoid the None + list error.
                        if previous_token_ids:
                            current_token_ids = previous_token_ids + as_list(
                                output.token_ids)
                        else:
                            current_token_ids = as_list(output.token_ids)

                    if self.use_harmony:
                        if is_reasoning:
                            delta_message = DeltaMessage(
                                reasoning_content=delta_text)
                        else:
                            delta_message = DeltaMessage(content=delta_text)
                    # handle streaming deltas for tools with named tool_choice
                    elif tool_choice_function_name:
                        if (self.reasoning_parser and not reasoning_end_arr[i]
                                and not reasoning_parser.is_reasoning_end(
                                    previous_token_ids)):
                            assert reasoning_parser is not None
                            delta_message = (
                                reasoning_parser.
                                extract_reasoning_content_streaming(
                                    previous_text,
                                    current_text,
                                    delta_text,
                                    previous_token_ids,
                                    current_token_ids,
                                    output.token_ids,
                                ))
                            # When encountering think end id in delta_token_ids
                            # or think end id in prompt_token_ids
                            # i.e {"enable_thinking": False},
                            # set reasoning status to end.
                            # Only keep 'content', remove 'reasoning_content'.
                            if reasoning_parser.is_reasoning_end(
                                    as_list(output.token_ids)) or (
                                        res.prompt_token_ids
                                        and reasoning_parser.is_reasoning_end(
                                            res.prompt_token_ids)):
                                reasoning_end_arr[i] = True
                                if delta_message and delta_message.content:
                                    # This need to be added to next `delta_text`
                                    current_text = delta_message.content
                                    delta_message.content = None
                                else:
                                    current_text = ""
                        else:
                            # Just to add remaining `content`
                            if self.reasoning_parser:
                                delta_text = previous_text + delta_text
                                current_text = ""

                            if function_name_returned[i]:
                                delta_tool_call = DeltaToolCall(
                                    function=DeltaFunctionCall(
                                        arguments=delta_text),
                                    index=i)
                            else:
                                delta_tool_call = DeltaToolCall(
                                    id=make_tool_call_id(),
                                    type="function",
                                    function=DeltaFunctionCall(
                                        name=tool_choice_function_name,
                                        arguments=delta_text),
                                    index=i)
                                function_name_returned[i] = True

                            delta_message = DeltaMessage(tool_calls=[
                                delta_tool_call,
                            ])

                    elif request.tool_choice == "required":
                        assert previous_texts is not None
                        previous_text = previous_texts[i]
                        current_text = previous_text + delta_text
                        fn_name_returned = function_name_returned[i]

                        if self.reasoning_parser:
                            _, content = \
                                reasoning_parser.extract_reasoning_content(
                                    current_text,
                                    request
                                )
                        else:
                            content = current_text
                        delta_message, function_name_returned[i] = (
                            self.extract_tool_call_required_streaming(
                                previous_text=previous_text,
                                current_text=content,
                                delta_text=delta_text,
                                function_name_returned=fn_name_returned,
                                tool_call_idx=history_tool_call_cnt))
                        if (delta_message and delta_message.tool_calls and
                                delta_message.tool_calls[0].id is not None):
                            history_tool_call_cnt += 1

                        # update the previous values for the next iteration
                        previous_texts[i] = current_text

                    # handle streaming deltas for tools with "auto" tool choice
                    # and reasoning parser
                    elif tool_choice_auto and self.reasoning_parser:
                        assert tool_parser is not None
                        assert reasoning_parser is not None
                        assert added_content_delta_arr is not None
                        assert reasoning_end_arr is not None
                        output_token_ids = as_list(output.token_ids)
                        if not reasoning_end_arr[i]:
                            delta_message = (
                                reasoning_parser.
                                extract_reasoning_content_streaming(
                                    previous_text,
                                    current_text,
                                    delta_text,
                                    previous_token_ids,
                                    current_token_ids,
                                    output_token_ids,
                                ))
                            # When encountering think end id in prompt_token_ids
                            # i.e {"enable_thinking": False},
                            # set reasoning status to end.
                            # Remove the text and token ids related
                            # to 'reasoning_content'.
                            if res.prompt_token_ids and \
                                reasoning_parser.is_reasoning_end(
                                    res.prompt_token_ids):
                                reasoning_end_arr[i] = True
                                current_token_ids = output_token_ids
                                if delta_message and delta_message.content:
                                    current_text = delta_message.content
                                    delta_message.content = None
                                else:
                                    current_text = ""
                            # When encountering think end id in delta_token_ids,
                            # set reasoning status to end.
                            # Remove the text and token ids related
                            # to 'reasoning_content'.
                            if reasoning_parser.is_reasoning_end(
                                    output_token_ids):
                                reasoning_end_arr[i] = True
                                current_token_ids =  \
                                    reasoning_parser.extract_content_ids(
                                        output_token_ids)
                                if delta_message and delta_message.content:
                                    current_text = delta_message.content
                                    delta_message.content = None
                                else:
                                    current_text = ""

                        # handle tool calls only after reasoning is done,
                        else:
                            delta_token_ids = output_token_ids
                            # First time to tool call,
                            # add the remaining text and token ids
                            # to delta from previous
                            if not added_content_delta_arr[i]:
                                added_content_delta_arr[i] = True
                                previous_text = ""
                                previous_token_ids = []
                                delta_text = current_text
                                delta_token_ids = current_token_ids

                            delta_message = (
                                tool_parser.extract_tool_calls_streaming(
                                    previous_text=previous_text,
                                    current_text=current_text,
                                    delta_text=delta_text,
                                    previous_token_ids=previous_token_ids,
                                    current_token_ids=current_token_ids,
                                    delta_token_ids=delta_token_ids,
                                    request=request))
                    # when only tool calls
                    elif tool_choice_auto:
                        assert tool_parser is not None
                        delta_message = (
                            tool_parser.extract_tool_calls_streaming(
                                previous_text=previous_text,
                                current_text=current_text,
                                delta_text=delta_text,
                                previous_token_ids=previous_token_ids,
                                current_token_ids=current_token_ids,
                                delta_token_ids=output.token_ids,
                                request=request))

                    # when only reasoning
                    elif self.reasoning_parser:
                        delta_message = (reasoning_parser.
                                         extract_reasoning_content_streaming(
                                             previous_text,
                                             current_text,
                                             delta_text,
                                             previous_token_ids,
                                             current_token_ids,
                                             output.token_ids,
                                         ))
                    # handle streaming just a content delta
                    else:
                        delta_message = DeltaMessage(content=delta_text)

                    # update the previous values for the next iteration
                    if tool_choice_auto or self.reasoning_parser:
                        assert previous_texts is not None
                        assert all_previous_token_ids is not None
                        previous_texts[i] = current_text
                        all_previous_token_ids[i] = current_token_ids
                    else:
                        # Update for comprehensive logging even in simple case
                        assert previous_texts is not None
                        previous_texts[i] += delta_text

                    # set the previous values for the next iteration
                    previous_num_tokens[i] += len(output.token_ids)

                    # if the message delta is None (e.g. because it was a
                    # "control token" for tool calls or the parser otherwise
                    # wasn't ready to send a token, then
                    #   get the next token without streaming a chunk
                    if delta_message is None:
                        continue

                    # Log streaming delta if output logging is enabled
                    if self.enable_log_outputs and self.request_logger:
                        delta_content = ""
                        if delta_message.content:
                            delta_content = delta_message.content
                        elif delta_message.tool_calls:
                            delta_content = "".join(
                                tc.function.arguments
                                for tc in delta_message.tool_calls
                                if tc.function and tc.function.arguments)

                        if delta_content:
                            self.request_logger.log_outputs(
                                request_id=request_id,
                                outputs=delta_content,
                                output_token_ids=as_list(output.token_ids),
                                finish_reason=output.finish_reason,
                                is_streaming=True,
                                delta=True,
                            )

                    if output.finish_reason is None:
                        # Send token-by-token response for each request.n
                        choice_data = ChatCompletionResponseStreamChoice(
                            index=i,
                            delta=delta_message,
                            logprobs=logprobs,
                            finish_reason=None,
                            token_ids=(as_list(output.token_ids)
                                       if request.return_token_ids else None))

                    # if the model is finished generating
                    else:
                        # check to make sure we haven't "forgotten" to stream
                        #   any tokens that were generated but previously
                        #   matched by partial json parsing
                        # only happens if we are NOT using guided decoding
                        auto_tools_called = False
                        if tool_parser:
                            auto_tools_called = len(
                                tool_parser.prev_tool_call_arr) > 0
                            index = len(tool_parser.prev_tool_call_arr
                                        ) - 1 if auto_tools_called else 0
                        else:
                            index = 0

                        if self._should_check_for_unstreamed_tool_arg_tokens(
                                delta_message, output) and tool_parser:
                            latest_delta_len = 0
                            if ((isinstance(
                                    delta_message.tool_calls[0].function,
                                    DeltaFunctionCall)) and isinstance(
                                        delta_message.tool_calls[0].function.
                                        arguments, str)):
                                latest_delta_len = len(
                                    delta_message.tool_calls[0].function.
                                    arguments)

                            # get the expected call based on partial JSON
                            # parsing which "autocompletes" the JSON
                            expected_call = json.dumps(
                                tool_parser.prev_tool_call_arr[index].get(
                                    "arguments", {}),
                                ensure_ascii=False)

                            # get what we've streamed so far for arguments
                            # for the current tool
                            actual_call = tool_parser.streamed_args_for_tool[
                                index]
                            if (latest_delta_len > 0):
                                actual_call = actual_call[:-latest_delta_len]

                            # check to see if there's anything left to stream
                            remaining_call = expected_call.replace(
                                actual_call, "", 1)
                            # set that as a delta message
                            delta_message = DeltaMessage(tool_calls=[
                                DeltaToolCall(index=index,
                                              function=DeltaFunctionCall(
                                                  arguments=remaining_call).
                                              model_dump(exclude_none=True))
                            ])

                        # Send the finish response for each request.n only once
                        choice_data = ChatCompletionResponseStreamChoice(
                            index=i,
                            delta=delta_message,
                            logprobs=logprobs,
                            finish_reason=output.finish_reason
                            if not auto_tools_called else "tool_calls",
                            stop_reason=output.stop_reason,
                            token_ids=(as_list(output.token_ids)
                                       if request.return_token_ids else None))

                        finish_reason_sent[i] = True

                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name)

                    # handle usage stats if requested & if continuous
                    if include_continuous_usage:
                        completion_tokens = previous_num_tokens[i]
                        chunk.usage = UsageInfo(
                            prompt_tokens=num_prompt_tokens,
                            completion_tokens=completion_tokens,
                            total_tokens=num_prompt_tokens + completion_tokens,
                        )

                    data = chunk.model_dump_json(exclude_unset=True)
                    yield f"data: {data}\n\n"

            # once the final token is handled, if stream_options.include_usage
            # is sent, send the usage
            if include_usage:
                completion_tokens = sum(previous_num_tokens)
                final_usage = UsageInfo(prompt_tokens=num_prompt_tokens,
                                        completion_tokens=completion_tokens,
                                        total_tokens=num_prompt_tokens +
                                        completion_tokens)
                if self.enable_prompt_tokens_details and num_cached_tokens:
                    final_usage.prompt_tokens_details = PromptTokenUsageInfo(
                        cached_tokens=num_cached_tokens)

                final_usage_chunk = ChatCompletionStreamResponse(
                    id=request_id,
                    object=chunk_object_type,
                    created=created_time,
                    choices=[],
                    model=model_name,
                    usage=final_usage)
                final_usage_data = (final_usage_chunk.model_dump_json(
                    exclude_unset=True, exclude_none=True))
                yield f"data: {final_usage_data}\n\n"

            # report to FastAPI middleware aggregate usage across all choices
            num_completion_tokens = sum(previous_num_tokens)
            request_metadata.final_usage_info = UsageInfo(
                prompt_tokens=num_prompt_tokens,
                completion_tokens=num_completion_tokens,
                total_tokens=num_prompt_tokens + num_completion_tokens,
            )

            # Log complete streaming response if output logging is enabled
            if self.enable_log_outputs and self.request_logger:
                # Log the complete response for each choice
                for i in range(num_choices):
                    full_text = (
                        previous_texts[i]
                        if previous_texts and i < len(previous_texts) else
                        f"<streaming_complete: {previous_num_tokens[i]} tokens>"
                    )
                    self.request_logger.log_outputs(
                        request_id=request_id,
                        outputs=full_text,
                        output_token_ids=
                        None,  # Consider also logging all token IDs
                        finish_reason="streaming_complete",
                        is_streaming=True,
                        delta=False,
                    )

        except Exception as e:
            # TODO: Use a vllm-specific Validation Error
            logger.exception("Error in chat completion stream generator.")
            data = self.create_streaming_error_response(str(e))
            yield f"data: {data}\n\n"
        # Send the final done message after all response.n are finished
        yield "data: [DONE]\n\n"

    async def chat_completion_full_generator(
        self,
        request: ChatCompletionRequest,
        result_generator: AsyncIterator[RequestOutput],
        request_id: str,
        model_name: str,
        conversation: list[ConversationMessage],
        tokenizer: AnyTokenizer,
        request_metadata: RequestResponseMetadata,
    ) -> Union[ErrorResponse, ChatCompletionResponse]:

        created_time = int(time.time())
        final_res: Optional[RequestOutput] = None

        try:
            async for res in result_generator:
                final_res = res
        except asyncio.CancelledError:
            return self.create_error_response("Client disconnected")
        except ValueError as e:
            # TODO: Use a vllm-specific Validation Error
            return self.create_error_response(str(e))

        assert final_res is not None

        choices: list[ChatCompletionResponseChoice] = []
        if self.tool_call_id_type == 'kimi_k2':
            history_tool_call_cnt = get_history_tool_calls_cnt(conversation)
        else:
            history_tool_call_cnt = 0

        role = self.get_chat_request_role(request)
        for output in final_res.outputs:
            token_ids = output.token_ids
            out_logprobs = output.logprobs

            if request.logprobs and request.top_logprobs is not None:
                assert out_logprobs is not None, "Did not output logprobs"
                logprobs = self._create_chat_logprobs(
                    token_ids=token_ids,
                    top_logprobs=out_logprobs,
                    num_output_top_logprobs=request.top_logprobs,
                    tokenizer=tokenizer,
                    return_as_token_id=request.return_tokens_as_token_ids,
                )
            else:
                logprobs = None

            if self.use_harmony:
                reasoning_content, final_content, is_tool_call = (
                    parse_chat_output(token_ids))
                if not request.include_reasoning:
                    reasoning_content = None

                if is_tool_call:
                    # TODO(woosuk): Implement tool call for gpt-oss.
                    # For now, only Responses API supports tool call for
                    # gpt-oss.
                    raise NotImplementedError(
                        "Tool call in Chat Completion API is not supported "
                        "for gpt-oss yet. Please use Responses API instead.")
                else:
                    # Normal message
                    message = ChatMessage(
                        role=role,
                        reasoning_content=reasoning_content,
                        content=final_content,
                    )

                choice_data = ChatCompletionResponseChoice(
                    index=output.index,
                    message=message,
                    logprobs=logprobs,
                    finish_reason="tool_calls" if is_tool_call else
                    output.finish_reason if output.finish_reason else "stop",
                    stop_reason=output.stop_reason,
                )
                choices.append(choice_data)
                continue

            if self.reasoning_parser:
                try:
                    reasoning_parser = self.reasoning_parser(tokenizer)
                except RuntimeError as e:
                    logger.exception("Error in reasoning parser creation.")
                    return self.create_error_response(str(e))
                # If the reasoning parser is enabled,
                # tool calls are extracted exclusively from the content.
                reasoning_content, content = (
                    reasoning_parser.extract_reasoning_content(
                        output.text, request=request))
                if not request.include_reasoning:
                    reasoning_content = None
            else:
                reasoning_content = None
                content = output.text

            auto_tools_called = False
            # if auto tools are not enabled, and a named tool choice using
            #   outlines is not being used
            if (not self.enable_auto_tools or not self.tool_parser) and \
                (not isinstance(request.tool_choice,
                                ChatCompletionNamedToolChoiceParam
                                ) and request.tool_choice != "required"):
                message = ChatMessage(role=role,
                                      reasoning_content=reasoning_content,
                                      content=content)

            # if the request uses tools and specified a tool choice
            elif request.tool_choice and type(
                    request.tool_choice) is ChatCompletionNamedToolChoiceParam:

                tool_call_class = MistralToolCall if isinstance(
                    tokenizer, MistralTokenizer) else ToolCall
                message = ChatMessage(
                    role=role,
                    reasoning_content=reasoning_content,
                    content="",
                    tool_calls=[
                        tool_call_class(function=FunctionCall(
                            name=request.tool_choice.function.name,
                            arguments=content,
                        ))
                    ],
                )

            elif request.tool_choice and request.tool_choice == "required":
                tool_call_class = MistralToolCall if isinstance(
                    tokenizer, MistralTokenizer) else ToolCall

                # the fields of FunctionDefinition are a superset of the
                # tool call outputs and can be used for parsing
                assert content is not None
                tool_calls = TypeAdapter(
                    list[FunctionDefinition]).validate_json(content)
                tool_call_ids = []
                for tool_call in tool_calls:
                    tool_call_ids.append(
                        make_tool_call_id(id_type=self.tool_call_id_type,
                                          func_name=tool_call.name,
                                          idx=history_tool_call_cnt))
                    history_tool_call_cnt += 1
                message = ChatMessage(
                    role=role,
                    content="",
                    tool_calls=[
                        tool_call_class(id=tool_call_ids[i],
                                        function=FunctionCall(
                                            name=tool_call.name,
                                            arguments=json.dumps(
                                                tool_call.parameters,
                                                ensure_ascii=False)))
                        for i, tool_call in enumerate(tool_calls)
                    ],
                    reasoning_content=reasoning_content)

            # if the request doesn't use tool choice
            # OR specifies to not use a tool
            elif not request.tool_choice or request.tool_choice == "none":

                message = ChatMessage(role=role,
                                      reasoning_content=reasoning_content,
                                      content=content)

            # handle when there are tools and tool choice is auto
            elif request.tools and (
                    request.tool_choice == "auto"
                    or request.tool_choice is None) and self.enable_auto_tools \
                    and self.tool_parser:

                try:
                    tool_parser = self.tool_parser(tokenizer)
                except RuntimeError as e:
                    logger.exception("Error in tool parser creation.")
                    return self.create_error_response(str(e))

                tool_call_info = tool_parser.extract_tool_calls(
                    content if content is not None else "", request=request)
                # In the OpenAI API the finish_reason is "tools_called"
                # if the tool choice is auto and the model produced a tool
                # call. The same is not true for named function calls
                auto_tools_called = tool_call_info.tools_called
                if tool_call_info.tools_called:
                    message = ChatMessage(role=role,
                                          reasoning_content=reasoning_content,
                                          content=tool_call_info.content,
                                          tool_calls=tool_call_info.tool_calls)

                else:
                    # FOR NOW make it a chat message; we will have to detect
                    # the type to make it later.
                    ret_content = content

                    # try to use content return from tool parser first,
                    # tool parser may do some modify for the content.
                    if (tool_call_info.content
                            and len(tool_call_info.content) > 0):
                        ret_content = tool_call_info.content
                    message = ChatMessage(role=role,
                                          reasoning_content=reasoning_content,
                                          content=ret_content)

            # undetermined case that is still important to handle
            else:
                logger.error(
                    "Error in chat_completion_full_generator - cannot determine"
                    " if tools should be extracted. Returning a standard chat "
                    "completion.")
                message = ChatMessage(role=role,
                                      reasoning_content=reasoning_content,
                                      content=content)

            choice_data = ChatCompletionResponseChoice(
                index=output.index,
                message=message,
                logprobs=logprobs,
                finish_reason="tool_calls" if auto_tools_called else
                output.finish_reason if output.finish_reason else "stop",
                stop_reason=output.stop_reason,
                token_ids=(as_list(output.token_ids)
                           if request.return_token_ids else None),
            )

            choices.append(choice_data)

        if request.echo:
            last_msg_content: Union[str, list[dict[str, str]]] = ""
            if (conversation and "content" in conversation[-1]
                    and conversation[-1].get("role") == role):
                last_msg_content = conversation[-1]["content"] or ""
            if isinstance(last_msg_content, list):
                last_msg_content = "\n".join(msg['text']
                                             for msg in last_msg_content)

            for choice in choices:
                full_message = last_msg_content + (choice.message.content
                                                   or "")
                choice.message.content = full_message

        assert final_res.prompt_token_ids is not None
        num_prompt_tokens = len(final_res.prompt_token_ids)
        if final_res.encoder_prompt_token_ids is not None:
            num_prompt_tokens += len(final_res.encoder_prompt_token_ids)
        num_generated_tokens = sum(
            len(output.token_ids) for output in final_res.outputs)
        usage = UsageInfo(prompt_tokens=num_prompt_tokens,
                          completion_tokens=num_generated_tokens,
                          total_tokens=num_prompt_tokens +
                          num_generated_tokens)
        if self.enable_prompt_tokens_details and final_res.num_cached_tokens:
            usage.prompt_tokens_details = PromptTokenUsageInfo(
                cached_tokens=final_res.num_cached_tokens)

        request_metadata.final_usage_info = usage

        response = ChatCompletionResponse(
            id=request_id,
            created=created_time,
            model=model_name,
            choices=choices,
            usage=usage,
            prompt_logprobs=clamp_prompt_logprobs(final_res.prompt_logprobs),
            prompt_token_ids=(final_res.prompt_token_ids
                              if request.return_token_ids else None),
            kv_transfer_params=final_res.kv_transfer_params,
        )

        # Log complete response if output logging is enabled
        if self.enable_log_outputs and self.request_logger:
            for choice in choices:
                output_text = ""
                if choice.message.content:
                    output_text = choice.message.content
                elif choice.message.tool_calls:
                    # For tool calls, log the function name and arguments
                    tool_call_descriptions = []
                    for tc in choice.message.tool_calls:
                        if hasattr(tc.function, "name") and hasattr(
                                tc.function, "arguments"):
                            tool_call_descriptions.append(
                                f"{tc.function.name}({tc.function.arguments})")
                    tool_calls_str = ", ".join(tool_call_descriptions)
                    output_text = f"[tool_calls: {tool_calls_str}]"

                if output_text:
                    # Get the corresponding output token IDs
                    output_token_ids = None
                    if choice.index < len(final_res.outputs):
                        output_token_ids = final_res.outputs[
                            choice.index].token_ids

                    self.request_logger.log_outputs(
                        request_id=request_id,
                        outputs=output_text,
                        output_token_ids=output_token_ids,
                        finish_reason=choice.finish_reason,
                        is_streaming=False,
                        delta=False,
                    )

        return response

    def _get_top_logprobs(
            self, logprobs: dict[int, Logprob], top_logprobs: Optional[int],
            tokenizer: AnyTokenizer,
            should_return_as_token_id: bool) -> list[ChatCompletionLogProb]:
        return [
            ChatCompletionLogProb(
                token=(token := self._get_decoded_token(
                    p[1],
                    p[0],
                    tokenizer,
                    return_as_token_id=should_return_as_token_id,
                )),
                logprob=max(p[1].logprob, -9999.0),
                bytes=list(token.encode("utf-8", errors="replace")),
            ) for i, p in enumerate(logprobs.items())
            if top_logprobs and i < top_logprobs
        ]

    def _create_chat_logprobs(
        self,
        token_ids: GenericSequence[int],
        top_logprobs: GenericSequence[Optional[dict[int, Logprob]]],
        tokenizer: AnyTokenizer,
        num_output_top_logprobs: Optional[int] = None,
        return_as_token_id: Optional[bool] = None,
    ) -> ChatCompletionLogProbs:
        """Create OpenAI-style logprobs."""
        logprobs_content: list[ChatCompletionLogProbsContent] = []

        should_return_as_token_id = return_as_token_id if \
            return_as_token_id is not None else self.return_tokens_as_token_ids
        for i, token_id in enumerate(token_ids):
            step_top_logprobs = top_logprobs[i]
            if step_top_logprobs is None or step_top_logprobs.get(
                    token_id) is None:
                token = tokenizer.decode(token_id)
                if should_return_as_token_id:
                    token = f"token_id:{token_id}"

                logprobs_content.append(
                    ChatCompletionLogProbsContent(
                        token=token,
                        bytes=list(token.encode("utf-8", errors="replace")),
                    ))
            else:
                step_token = step_top_logprobs[token_id]
                step_decoded = step_token.decoded_token

                logprobs_content.append(
                    ChatCompletionLogProbsContent(
                        token=self._get_decoded_token(
                            step_token,
                            token_id,
                            tokenizer,
                            should_return_as_token_id,
                        ),
                        logprob=max(step_token.logprob, -9999.0),
                        bytes=None if step_decoded is None else list(
                            step_decoded.encode("utf-8", errors="replace")),
                        top_logprobs=self._get_top_logprobs(
                            step_top_logprobs, num_output_top_logprobs,
                            tokenizer, should_return_as_token_id),
                    ))

        return ChatCompletionLogProbs(content=logprobs_content)

    def _should_stream_with_auto_tool_parsing(self,
                                              request: ChatCompletionRequest):
        """
        Utility function to check if streamed tokens should go through the tool
        call parser that was configured.

        We only want to do this IF user-provided tools are set, a tool parser
        is configured, "auto" tool choice is enabled, and the request's tool
        choice field indicates that "auto" tool choice should be used.
        """
        return (request.tools and self.tool_parser and self.enable_auto_tools
                and request.tool_choice in ['auto', None])

    def _should_check_for_unstreamed_tool_arg_tokens(
        self,
        delta_message: Optional[DeltaMessage],
        output: CompletionOutput,
    ) -> bool:
        """
        Check to see if we should check for unstreamed tool arguments tokens.
        This is only applicable when auto tool parsing is enabled, the delta
        is a tool call with arguments.
        """

        # yapf: disable
        return bool(
            # if there is a delta message that includes tool calls which
            # include a function that has arguments
            output.finish_reason is not None
            and self.enable_auto_tools and self.tool_parser and delta_message
            and delta_message.tool_calls and delta_message.tool_calls[0]
            and delta_message.tool_calls[0].function
            and delta_message.tool_calls[0].function.arguments is not None
        )

    def _make_request_with_harmony(
        self,
        request: ChatCompletionRequest,
    ):
        messages: list[OpenAIMessage] = []

        # Add system message.
        # NOTE: In Chat Completion API, browsing is enabled by default
        # if the model supports it. TODO: Support browsing.
        assert not self.supports_browsing
        assert not self.supports_code_interpreter
        sys_msg = get_system_message(
            reasoning_effort=request.reasoning_effort,
            browser_description=None,
            python_description=None)
        messages.append(sys_msg)

        # Add developer message.
        dev_msg = get_developer_message()
        messages.append(dev_msg)

        # Add user message.
        for chat_msg in request.messages:
            messages.append(parse_chat_input(chat_msg))

        # Render prompt token ids.
        prompt_token_ids = render_for_completion(messages)
        engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids)

        # Add cache_salt if provided in the request
        if request.cache_salt is not None:
            engine_prompt["cache_salt"] = request.cache_salt

        return messages, [prompt_token_ids], [engine_prompt]

browser_tool instance-attribute

browser_tool = None

chat_template instance-attribute

chat_template = chat_template

chat_template_content_format instance-attribute

chat_template_content_format: Final = (
    chat_template_content_format
)

default_sampling_params instance-attribute

default_sampling_params = get_diff_sampling_param()

enable_auto_tools instance-attribute

enable_auto_tools: bool = enable_auto_tools

enable_force_include_usage instance-attribute

enable_force_include_usage = enable_force_include_usage

enable_log_outputs instance-attribute

enable_log_outputs = enable_log_outputs

enable_prompt_tokens_details instance-attribute

enable_prompt_tokens_details = enable_prompt_tokens_details

exclude_tools_when_tool_choice_none instance-attribute

exclude_tools_when_tool_choice_none = (
    exclude_tools_when_tool_choice_none
)

python_tool instance-attribute

python_tool = None

reasoning_parser instance-attribute

reasoning_parser: Optional[
    Callable[[AnyTokenizer], ReasoningParser]
] = get_reasoning_parser(reasoning_parser)

response_role instance-attribute

response_role = response_role

supports_browsing instance-attribute

supports_browsing = False

supports_code_interpreter instance-attribute

supports_code_interpreter = False

tool_call_id_type instance-attribute

tool_call_id_type = 'kimi_k2'

tool_parser instance-attribute

tool_parser: Optional[
    Callable[[AnyTokenizer], ToolParser]
] = get_tool_parser(tool_parser)

use_harmony instance-attribute

use_harmony = model_type == 'gpt_oss'

__init__

__init__(
    engine_client: EngineClient,
    model_config: ModelConfig,
    models: OpenAIServingModels,
    response_role: str,
    *,
    request_logger: Optional[RequestLogger],
    chat_template: Optional[str],
    chat_template_content_format: ChatTemplateContentFormatOption,
    return_tokens_as_token_ids: bool = False,
    reasoning_parser: str = "",
    enable_auto_tools: bool = False,
    exclude_tools_when_tool_choice_none: bool = False,
    tool_parser: Optional[str] = None,
    enable_prompt_tokens_details: bool = False,
    enable_force_include_usage: bool = False,
    enable_log_outputs: bool = False,
) -> None
Source code in vllm/entrypoints/openai/serving_chat.py
def __init__(
    self,
    engine_client: EngineClient,
    model_config: ModelConfig,
    models: OpenAIServingModels,
    response_role: str,
    *,
    request_logger: Optional[RequestLogger],
    chat_template: Optional[str],
    chat_template_content_format: ChatTemplateContentFormatOption,
    return_tokens_as_token_ids: bool = False,
    reasoning_parser: str = "",
    enable_auto_tools: bool = False,
    exclude_tools_when_tool_choice_none: bool = False,
    tool_parser: Optional[str] = None,
    enable_prompt_tokens_details: bool = False,
    enable_force_include_usage: bool = False,
    enable_log_outputs: bool = False,
) -> None:
    super().__init__(engine_client=engine_client,
                     model_config=model_config,
                     models=models,
                     request_logger=request_logger,
                     return_tokens_as_token_ids=return_tokens_as_token_ids,
                     enable_force_include_usage=enable_force_include_usage)

    self.response_role = response_role
    self.chat_template = chat_template
    self.chat_template_content_format: Final = chat_template_content_format
    self.enable_log_outputs = enable_log_outputs

    # set up tool use
    self.enable_auto_tools: bool = enable_auto_tools
    if self.enable_auto_tools:
        logger.info(
            "\"auto\" tool choice has been enabled please note that while"
            " the parallel_tool_calls client option is preset for "
            "compatibility reasons, it will be ignored.")

    self.reasoning_parser: Optional[Callable[[AnyTokenizer],
                                             ReasoningParser]] = None
    if reasoning_parser:
        try:
            self.reasoning_parser = (
                ReasoningParserManager.get_reasoning_parser(
                    reasoning_parser))
            assert self.reasoning_parser is not None
        except Exception as e:
            raise TypeError(
                f"{reasoning_parser=} has not been registered") from e
    self.tool_parser: Optional[Callable[[AnyTokenizer], ToolParser]] = None
    if self.enable_auto_tools:
        try:
            if (tool_parser == "pythonic" and
                    model_config.model.startswith("meta-llama/Llama-3.2")):
                logger.warning(
                    "Llama3.2 models may struggle to emit valid pythonic"
                    " tool calls")
            self.tool_parser = ToolParserManager.get_tool_parser(
                tool_parser)
        except Exception as e:
            raise TypeError("Error: --enable-auto-tool-choice requires "
                            f"tool_parser:'{tool_parser}' which has not "
                            "been registered") from e
    self.exclude_tools_when_tool_choice_none = (
        exclude_tools_when_tool_choice_none)

    self.enable_prompt_tokens_details = enable_prompt_tokens_details
    self.enable_force_include_usage = enable_force_include_usage
    self.default_sampling_params = (
        self.model_config.get_diff_sampling_param())
    if self.default_sampling_params:
        source = self.model_config.generation_config
        source = "model" if source == "auto" else source
        logger.info("Using default chat sampling params from %s: %s",
                    source, self.default_sampling_params)
    if self.model_config.hf_config.model_type == 'kimi_k2':
        self.tool_call_id_type = 'kimi_k2'
    else:
        self.tool_call_id_type = 'random'

    self.use_harmony = model_config.hf_config.model_type == "gpt_oss"
    if self.use_harmony:
        if "stop_token_ids" not in self.default_sampling_params:
            self.default_sampling_params["stop_token_ids"] = []
        self.default_sampling_params["stop_token_ids"].extend(
            get_stop_tokens_for_assistant_actions())

    # NOTE(woosuk): While OpenAI's chat completion API supports browsing
    # for some models, currently vLLM doesn't support it. Please use the
    # Responses API instead.
    self.supports_browsing = False
    self.browser_tool = None
    # NOTE(woosuk): Chat completion API does not support code interpreter.
    # Please use the Responses API instead.
    self.supports_code_interpreter = False
    self.python_tool = None

_bracket_level staticmethod

_bracket_level(s: str, opening='{', closing='}') -> int

Calculate the current level of nested brackets in a given string.

Source code in vllm/entrypoints/openai/serving_chat.py
@staticmethod
def _bracket_level(s: str, opening='{', closing='}') -> int:
    """
    Calculate the current level of nested brackets in a given string.
    """
    level = 0
    for char in s:
        if char == opening:
            level += 1
        elif char == closing:
            level -= 1
    return level

_create_chat_logprobs

_create_chat_logprobs(
    token_ids: Sequence[int],
    top_logprobs: Sequence[Optional[dict[int, Logprob]]],
    tokenizer: AnyTokenizer,
    num_output_top_logprobs: Optional[int] = None,
    return_as_token_id: Optional[bool] = None,
) -> ChatCompletionLogProbs

Create OpenAI-style logprobs.

Source code in vllm/entrypoints/openai/serving_chat.py
def _create_chat_logprobs(
    self,
    token_ids: GenericSequence[int],
    top_logprobs: GenericSequence[Optional[dict[int, Logprob]]],
    tokenizer: AnyTokenizer,
    num_output_top_logprobs: Optional[int] = None,
    return_as_token_id: Optional[bool] = None,
) -> ChatCompletionLogProbs:
    """Create OpenAI-style logprobs."""
    logprobs_content: list[ChatCompletionLogProbsContent] = []

    should_return_as_token_id = return_as_token_id if \
        return_as_token_id is not None else self.return_tokens_as_token_ids
    for i, token_id in enumerate(token_ids):
        step_top_logprobs = top_logprobs[i]
        if step_top_logprobs is None or step_top_logprobs.get(
                token_id) is None:
            token = tokenizer.decode(token_id)
            if should_return_as_token_id:
                token = f"token_id:{token_id}"

            logprobs_content.append(
                ChatCompletionLogProbsContent(
                    token=token,
                    bytes=list(token.encode("utf-8", errors="replace")),
                ))
        else:
            step_token = step_top_logprobs[token_id]
            step_decoded = step_token.decoded_token

            logprobs_content.append(
                ChatCompletionLogProbsContent(
                    token=self._get_decoded_token(
                        step_token,
                        token_id,
                        tokenizer,
                        should_return_as_token_id,
                    ),
                    logprob=max(step_token.logprob, -9999.0),
                    bytes=None if step_decoded is None else list(
                        step_decoded.encode("utf-8", errors="replace")),
                    top_logprobs=self._get_top_logprobs(
                        step_top_logprobs, num_output_top_logprobs,
                        tokenizer, should_return_as_token_id),
                ))

    return ChatCompletionLogProbs(content=logprobs_content)

_filter_delta_text staticmethod

_filter_delta_text(
    delta_text: str, previous_text: str
) -> tuple[str, bool]
Source code in vllm/entrypoints/openai/serving_chat.py
@staticmethod
def _filter_delta_text(delta_text: str,
                       previous_text: str) -> tuple[str, bool]:
    # remove last '},' of the tool definition stemming from the
    # "name"/"parameters" outer object or closing ']' of the tool list
    # count occurrences of opening and closing curly braces and
    # once level 0 is reached stop outputting text
    # if 0 is reached while parsing the delta_text we know the current
    # tool will finish in this current iteration
    bracket_level = OpenAIServingChat._bracket_level(previous_text)
    updated_delta, passed_zero = "", False
    for c in delta_text:
        if c == '{':
            bracket_level += 1
            passed_zero = bracket_level == 0
        elif c == '}':
            bracket_level -= 1
            passed_zero = bracket_level == 0

        if bracket_level != 0:
            updated_delta += c
        else:
            # if a comma is reached at level 0 we can stop
            if c == ',':
                break
    return updated_delta, passed_zero

_get_top_logprobs

_get_top_logprobs(
    logprobs: dict[int, Logprob],
    top_logprobs: Optional[int],
    tokenizer: AnyTokenizer,
    should_return_as_token_id: bool,
) -> list[ChatCompletionLogProb]
Source code in vllm/entrypoints/openai/serving_chat.py
def _get_top_logprobs(
        self, logprobs: dict[int, Logprob], top_logprobs: Optional[int],
        tokenizer: AnyTokenizer,
        should_return_as_token_id: bool) -> list[ChatCompletionLogProb]:
    return [
        ChatCompletionLogProb(
            token=(token := self._get_decoded_token(
                p[1],
                p[0],
                tokenizer,
                return_as_token_id=should_return_as_token_id,
            )),
            logprob=max(p[1].logprob, -9999.0),
            bytes=list(token.encode("utf-8", errors="replace")),
        ) for i, p in enumerate(logprobs.items())
        if top_logprobs and i < top_logprobs
    ]

_make_request_with_harmony

_make_request_with_harmony(request: ChatCompletionRequest)
Source code in vllm/entrypoints/openai/serving_chat.py
def _make_request_with_harmony(
    self,
    request: ChatCompletionRequest,
):
    messages: list[OpenAIMessage] = []

    # Add system message.
    # NOTE: In Chat Completion API, browsing is enabled by default
    # if the model supports it. TODO: Support browsing.
    assert not self.supports_browsing
    assert not self.supports_code_interpreter
    sys_msg = get_system_message(
        reasoning_effort=request.reasoning_effort,
        browser_description=None,
        python_description=None)
    messages.append(sys_msg)

    # Add developer message.
    dev_msg = get_developer_message()
    messages.append(dev_msg)

    # Add user message.
    for chat_msg in request.messages:
        messages.append(parse_chat_input(chat_msg))

    # Render prompt token ids.
    prompt_token_ids = render_for_completion(messages)
    engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids)

    # Add cache_salt if provided in the request
    if request.cache_salt is not None:
        engine_prompt["cache_salt"] = request.cache_salt

    return messages, [prompt_token_ids], [engine_prompt]

_should_check_for_unstreamed_tool_arg_tokens

_should_check_for_unstreamed_tool_arg_tokens(
    delta_message: Optional[DeltaMessage],
    output: CompletionOutput,
) -> bool

Check to see if we should check for unstreamed tool arguments tokens. This is only applicable when auto tool parsing is enabled, the delta is a tool call with arguments.

Source code in vllm/entrypoints/openai/serving_chat.py
def _should_check_for_unstreamed_tool_arg_tokens(
    self,
    delta_message: Optional[DeltaMessage],
    output: CompletionOutput,
) -> bool:
    """
    Check to see if we should check for unstreamed tool arguments tokens.
    This is only applicable when auto tool parsing is enabled, the delta
    is a tool call with arguments.
    """

    # yapf: disable
    return bool(
        # if there is a delta message that includes tool calls which
        # include a function that has arguments
        output.finish_reason is not None
        and self.enable_auto_tools and self.tool_parser and delta_message
        and delta_message.tool_calls and delta_message.tool_calls[0]
        and delta_message.tool_calls[0].function
        and delta_message.tool_calls[0].function.arguments is not None
    )

_should_stream_with_auto_tool_parsing

_should_stream_with_auto_tool_parsing(
    request: ChatCompletionRequest,
)

Utility function to check if streamed tokens should go through the tool call parser that was configured.

We only want to do this IF user-provided tools are set, a tool parser is configured, "auto" tool choice is enabled, and the request's tool choice field indicates that "auto" tool choice should be used.

Source code in vllm/entrypoints/openai/serving_chat.py
def _should_stream_with_auto_tool_parsing(self,
                                          request: ChatCompletionRequest):
    """
    Utility function to check if streamed tokens should go through the tool
    call parser that was configured.

    We only want to do this IF user-provided tools are set, a tool parser
    is configured, "auto" tool choice is enabled, and the request's tool
    choice field indicates that "auto" tool choice should be used.
    """
    return (request.tools and self.tool_parser and self.enable_auto_tools
            and request.tool_choice in ['auto', None])

chat_completion_full_generator async

chat_completion_full_generator(
    request: ChatCompletionRequest,
    result_generator: AsyncIterator[RequestOutput],
    request_id: str,
    model_name: str,
    conversation: list[ConversationMessage],
    tokenizer: AnyTokenizer,
    request_metadata: RequestResponseMetadata,
) -> Union[ErrorResponse, ChatCompletionResponse]
Source code in vllm/entrypoints/openai/serving_chat.py
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
async def chat_completion_full_generator(
    self,
    request: ChatCompletionRequest,
    result_generator: AsyncIterator[RequestOutput],
    request_id: str,
    model_name: str,
    conversation: list[ConversationMessage],
    tokenizer: AnyTokenizer,
    request_metadata: RequestResponseMetadata,
) -> Union[ErrorResponse, ChatCompletionResponse]:

    created_time = int(time.time())
    final_res: Optional[RequestOutput] = None

    try:
        async for res in result_generator:
            final_res = res
    except asyncio.CancelledError:
        return self.create_error_response("Client disconnected")
    except ValueError as e:
        # TODO: Use a vllm-specific Validation Error
        return self.create_error_response(str(e))

    assert final_res is not None

    choices: list[ChatCompletionResponseChoice] = []
    if self.tool_call_id_type == 'kimi_k2':
        history_tool_call_cnt = get_history_tool_calls_cnt(conversation)
    else:
        history_tool_call_cnt = 0

    role = self.get_chat_request_role(request)
    for output in final_res.outputs:
        token_ids = output.token_ids
        out_logprobs = output.logprobs

        if request.logprobs and request.top_logprobs is not None:
            assert out_logprobs is not None, "Did not output logprobs"
            logprobs = self._create_chat_logprobs(
                token_ids=token_ids,
                top_logprobs=out_logprobs,
                num_output_top_logprobs=request.top_logprobs,
                tokenizer=tokenizer,
                return_as_token_id=request.return_tokens_as_token_ids,
            )
        else:
            logprobs = None

        if self.use_harmony:
            reasoning_content, final_content, is_tool_call = (
                parse_chat_output(token_ids))
            if not request.include_reasoning:
                reasoning_content = None

            if is_tool_call:
                # TODO(woosuk): Implement tool call for gpt-oss.
                # For now, only Responses API supports tool call for
                # gpt-oss.
                raise NotImplementedError(
                    "Tool call in Chat Completion API is not supported "
                    "for gpt-oss yet. Please use Responses API instead.")
            else:
                # Normal message
                message = ChatMessage(
                    role=role,
                    reasoning_content=reasoning_content,
                    content=final_content,
                )

            choice_data = ChatCompletionResponseChoice(
                index=output.index,
                message=message,
                logprobs=logprobs,
                finish_reason="tool_calls" if is_tool_call else
                output.finish_reason if output.finish_reason else "stop",
                stop_reason=output.stop_reason,
            )
            choices.append(choice_data)
            continue

        if self.reasoning_parser:
            try:
                reasoning_parser = self.reasoning_parser(tokenizer)
            except RuntimeError as e:
                logger.exception("Error in reasoning parser creation.")
                return self.create_error_response(str(e))
            # If the reasoning parser is enabled,
            # tool calls are extracted exclusively from the content.
            reasoning_content, content = (
                reasoning_parser.extract_reasoning_content(
                    output.text, request=request))
            if not request.include_reasoning:
                reasoning_content = None
        else:
            reasoning_content = None
            content = output.text

        auto_tools_called = False
        # if auto tools are not enabled, and a named tool choice using
        #   outlines is not being used
        if (not self.enable_auto_tools or not self.tool_parser) and \
            (not isinstance(request.tool_choice,
                            ChatCompletionNamedToolChoiceParam
                            ) and request.tool_choice != "required"):
            message = ChatMessage(role=role,
                                  reasoning_content=reasoning_content,
                                  content=content)

        # if the request uses tools and specified a tool choice
        elif request.tool_choice and type(
                request.tool_choice) is ChatCompletionNamedToolChoiceParam:

            tool_call_class = MistralToolCall if isinstance(
                tokenizer, MistralTokenizer) else ToolCall
            message = ChatMessage(
                role=role,
                reasoning_content=reasoning_content,
                content="",
                tool_calls=[
                    tool_call_class(function=FunctionCall(
                        name=request.tool_choice.function.name,
                        arguments=content,
                    ))
                ],
            )

        elif request.tool_choice and request.tool_choice == "required":
            tool_call_class = MistralToolCall if isinstance(
                tokenizer, MistralTokenizer) else ToolCall

            # the fields of FunctionDefinition are a superset of the
            # tool call outputs and can be used for parsing
            assert content is not None
            tool_calls = TypeAdapter(
                list[FunctionDefinition]).validate_json(content)
            tool_call_ids = []
            for tool_call in tool_calls:
                tool_call_ids.append(
                    make_tool_call_id(id_type=self.tool_call_id_type,
                                      func_name=tool_call.name,
                                      idx=history_tool_call_cnt))
                history_tool_call_cnt += 1
            message = ChatMessage(
                role=role,
                content="",
                tool_calls=[
                    tool_call_class(id=tool_call_ids[i],
                                    function=FunctionCall(
                                        name=tool_call.name,
                                        arguments=json.dumps(
                                            tool_call.parameters,
                                            ensure_ascii=False)))
                    for i, tool_call in enumerate(tool_calls)
                ],
                reasoning_content=reasoning_content)

        # if the request doesn't use tool choice
        # OR specifies to not use a tool
        elif not request.tool_choice or request.tool_choice == "none":

            message = ChatMessage(role=role,
                                  reasoning_content=reasoning_content,
                                  content=content)

        # handle when there are tools and tool choice is auto
        elif request.tools and (
                request.tool_choice == "auto"
                or request.tool_choice is None) and self.enable_auto_tools \
                and self.tool_parser:

            try:
                tool_parser = self.tool_parser(tokenizer)
            except RuntimeError as e:
                logger.exception("Error in tool parser creation.")
                return self.create_error_response(str(e))

            tool_call_info = tool_parser.extract_tool_calls(
                content if content is not None else "", request=request)
            # In the OpenAI API the finish_reason is "tools_called"
            # if the tool choice is auto and the model produced a tool
            # call. The same is not true for named function calls
            auto_tools_called = tool_call_info.tools_called
            if tool_call_info.tools_called:
                message = ChatMessage(role=role,
                                      reasoning_content=reasoning_content,
                                      content=tool_call_info.content,
                                      tool_calls=tool_call_info.tool_calls)

            else:
                # FOR NOW make it a chat message; we will have to detect
                # the type to make it later.
                ret_content = content

                # try to use content return from tool parser first,
                # tool parser may do some modify for the content.
                if (tool_call_info.content
                        and len(tool_call_info.content) > 0):
                    ret_content = tool_call_info.content
                message = ChatMessage(role=role,
                                      reasoning_content=reasoning_content,
                                      content=ret_content)

        # undetermined case that is still important to handle
        else:
            logger.error(
                "Error in chat_completion_full_generator - cannot determine"
                " if tools should be extracted. Returning a standard chat "
                "completion.")
            message = ChatMessage(role=role,
                                  reasoning_content=reasoning_content,
                                  content=content)

        choice_data = ChatCompletionResponseChoice(
            index=output.index,
            message=message,
            logprobs=logprobs,
            finish_reason="tool_calls" if auto_tools_called else
            output.finish_reason if output.finish_reason else "stop",
            stop_reason=output.stop_reason,
            token_ids=(as_list(output.token_ids)
                       if request.return_token_ids else None),
        )

        choices.append(choice_data)

    if request.echo:
        last_msg_content: Union[str, list[dict[str, str]]] = ""
        if (conversation and "content" in conversation[-1]
                and conversation[-1].get("role") == role):
            last_msg_content = conversation[-1]["content"] or ""
        if isinstance(last_msg_content, list):
            last_msg_content = "\n".join(msg['text']
                                         for msg in last_msg_content)

        for choice in choices:
            full_message = last_msg_content + (choice.message.content
                                               or "")
            choice.message.content = full_message

    assert final_res.prompt_token_ids is not None
    num_prompt_tokens = len(final_res.prompt_token_ids)
    if final_res.encoder_prompt_token_ids is not None:
        num_prompt_tokens += len(final_res.encoder_prompt_token_ids)
    num_generated_tokens = sum(
        len(output.token_ids) for output in final_res.outputs)
    usage = UsageInfo(prompt_tokens=num_prompt_tokens,
                      completion_tokens=num_generated_tokens,
                      total_tokens=num_prompt_tokens +
                      num_generated_tokens)
    if self.enable_prompt_tokens_details and final_res.num_cached_tokens:
        usage.prompt_tokens_details = PromptTokenUsageInfo(
            cached_tokens=final_res.num_cached_tokens)

    request_metadata.final_usage_info = usage

    response = ChatCompletionResponse(
        id=request_id,
        created=created_time,
        model=model_name,
        choices=choices,
        usage=usage,
        prompt_logprobs=clamp_prompt_logprobs(final_res.prompt_logprobs),
        prompt_token_ids=(final_res.prompt_token_ids
                          if request.return_token_ids else None),
        kv_transfer_params=final_res.kv_transfer_params,
    )

    # Log complete response if output logging is enabled
    if self.enable_log_outputs and self.request_logger:
        for choice in choices:
            output_text = ""
            if choice.message.content:
                output_text = choice.message.content
            elif choice.message.tool_calls:
                # For tool calls, log the function name and arguments
                tool_call_descriptions = []
                for tc in choice.message.tool_calls:
                    if hasattr(tc.function, "name") and hasattr(
                            tc.function, "arguments"):
                        tool_call_descriptions.append(
                            f"{tc.function.name}({tc.function.arguments})")
                tool_calls_str = ", ".join(tool_call_descriptions)
                output_text = f"[tool_calls: {tool_calls_str}]"

            if output_text:
                # Get the corresponding output token IDs
                output_token_ids = None
                if choice.index < len(final_res.outputs):
                    output_token_ids = final_res.outputs[
                        choice.index].token_ids

                self.request_logger.log_outputs(
                    request_id=request_id,
                    outputs=output_text,
                    output_token_ids=output_token_ids,
                    finish_reason=choice.finish_reason,
                    is_streaming=False,
                    delta=False,
                )

    return response

chat_completion_stream_generator async

chat_completion_stream_generator(
    request: ChatCompletionRequest,
    result_generator: AsyncIterator[RequestOutput],
    request_id: str,
    model_name: str,
    conversation: list[ConversationMessage],
    tokenizer: AnyTokenizer,
    request_metadata: RequestResponseMetadata,
    enable_force_include_usage: bool,
) -> AsyncGenerator[str, None]
Source code in vllm/entrypoints/openai/serving_chat.py
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
async def chat_completion_stream_generator(
    self,
    request: ChatCompletionRequest,
    result_generator: AsyncIterator[RequestOutput],
    request_id: str,
    model_name: str,
    conversation: list[ConversationMessage],
    tokenizer: AnyTokenizer,
    request_metadata: RequestResponseMetadata,
    enable_force_include_usage: bool,
) -> AsyncGenerator[str, None]:
    created_time = int(time.time())
    chunk_object_type: Final = "chat.completion.chunk"
    first_iteration = True

    # Send response for each token for each request.n (index)
    num_choices = 1 if request.n is None else request.n
    previous_num_tokens = [0] * num_choices
    finish_reason_sent = [False] * num_choices
    num_prompt_tokens = 0
    num_cached_tokens = None
    if self.use_harmony:
        harmony_parsers = [
            get_streamable_parser_for_assistant()
            for _ in range(num_choices)
        ]

    if isinstance(request.tool_choice, ChatCompletionNamedToolChoiceParam):
        tool_choice_function_name = request.tool_choice.function.name
    else:
        tool_choice_function_name = None

    # Determine whether tools are in use with "auto" tool choice
    tool_choice_auto = (
        not tool_choice_function_name
        and self._should_stream_with_auto_tool_parsing(request))

    all_previous_token_ids: Optional[list[list[int]]]
    function_name_returned = [False] * num_choices
    if self.tool_call_id_type == 'kimi_k2':
        history_tool_call_cnt = get_history_tool_calls_cnt(conversation)
    else:
        history_tool_call_cnt = 0

    # Always track previous_texts for comprehensive output logging
    previous_texts = [""] * num_choices

    # Only one of these will be used, thus previous_texts and
    # all_previous_token_ids will not be used twice in the same iteration.
    if tool_choice_auto or self.reasoning_parser:
        # These are only required in "auto" tool choice case
        all_previous_token_ids = [[]] * num_choices
        # For reasoning parser and tool call all enabled
        added_content_delta_arr = [False] * num_choices
        reasoning_end_arr = [False] * num_choices
    elif request.tool_choice == "required":
        all_previous_token_ids = None
    else:
        all_previous_token_ids = None

    try:
        if self.reasoning_parser:
            reasoning_parser = self.reasoning_parser(tokenizer)
    except RuntimeError as e:
        logger.exception("Error in reasoning parser creation.")
        data = self.create_streaming_error_response(str(e))
        yield f"data: {data}\n\n"
        yield "data: [DONE]\n\n"
        return
    # Prepare the tool parser if it's needed
    try:
        if tool_choice_auto and self.tool_parser:
            tool_parsers: list[Optional[ToolParser]] = [
                self.tool_parser(tokenizer)
            ] * num_choices
        else:
            tool_parsers = [None] * num_choices
    except Exception as e:
        logger.exception("Error in tool parser creation.")
        data = self.create_streaming_error_response(str(e))
        yield f"data: {data}\n\n"
        yield "data: [DONE]\n\n"
        return

    stream_options = request.stream_options
    if stream_options:
        include_usage = stream_options.include_usage \
                        or enable_force_include_usage
        include_continuous_usage = include_usage and \
                                   stream_options.continuous_usage_stats
    else:
        include_usage, include_continuous_usage = False, False

    try:
        async for res in result_generator:
            if res.prompt_token_ids is not None:
                num_prompt_tokens = len(res.prompt_token_ids)
                if res.encoder_prompt_token_ids is not None:
                    num_prompt_tokens += len(res.encoder_prompt_token_ids)

            # We need to do it here, because if there are exceptions in
            # the result_generator, it needs to be sent as the FIRST
            # response (by the try...catch).
            if first_iteration:
                num_cached_tokens = res.num_cached_tokens
                # Send first response for each request.n (index) with
                # the role
                role = self.get_chat_request_role(request)

                # NOTE num_choices defaults to 1 so this usually executes
                # once per request
                for i in range(num_choices):
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=DeltaMessage(
                            role=role,
                            content="",
                        ),
                        logprobs=None,
                        finish_reason=None)

                    # return prompt_token_ids at the first chunk ever
                    chunk = ChatCompletionStreamResponse(
                        id=request_id,
                        object=chunk_object_type,
                        created=created_time,
                        choices=[choice_data],
                        model=model_name,
                        prompt_token_ids=(res.prompt_token_ids
                                          if request.return_token_ids else
                                          None))

                    # if continuous usage stats are requested, add it
                    if include_continuous_usage:
                        chunk.usage = UsageInfo(
                            prompt_tokens=num_prompt_tokens,
                            completion_tokens=0,
                            total_tokens=num_prompt_tokens)

                    data = chunk.model_dump_json(exclude_unset=True)
                    yield f"data: {data}\n\n"

                # Send response to echo the input portion of the
                # last message
                if request.echo:
                    last_msg_content: Union[str, list[dict[str, str]]] = ""
                    if conversation and "content" in conversation[
                            -1] and conversation[-1].get("role") == role:
                        last_msg_content = conversation[-1]["content"] or ""

                    if last_msg_content:
                        for i in range(num_choices):
                            choice_data = (
                                ChatCompletionResponseStreamChoice(
                                    index=i,
                                    delta=DeltaMessage(
                                        content=last_msg_content),
                                    logprobs=None,
                                    finish_reason=None))
                            chunk = ChatCompletionStreamResponse(
                                id=request_id,
                                object=chunk_object_type,
                                created=created_time,
                                choices=[choice_data],
                                model=model_name)
                            if include_continuous_usage:
                                chunk.usage = UsageInfo(
                                    prompt_tokens=num_prompt_tokens,
                                    completion_tokens=0,
                                    total_tokens=num_prompt_tokens)

                            data = chunk.model_dump_json(
                                exclude_unset=True)
                            yield f"data: {data}\n\n"
                first_iteration = False

            for output in res.outputs:
                i = output.index
                tool_parser = tool_parsers[i]

                if finish_reason_sent[i]:
                    continue

                if request.logprobs and request.top_logprobs is not None:
                    assert output.logprobs is not None, (
                        "Did not output logprobs")
                    logprobs = self._create_chat_logprobs(
                        token_ids=output.token_ids,
                        top_logprobs=output.logprobs,
                        tokenizer=tokenizer,
                        num_output_top_logprobs=request.top_logprobs,
                        return_as_token_id=request.
                        return_tokens_as_token_ids,
                    )
                else:
                    logprobs = None

                if self.use_harmony:
                    harmony_parser = harmony_parsers[i]
                    for token_id in output.token_ids:
                        harmony_parser.process(token_id)
                    is_reasoning = \
                        harmony_parser.current_channel == "analysis"
                    if not request.include_reasoning and is_reasoning:
                        # Skip the reasoning content.
                        continue
                    delta_text = harmony_parser.last_content_delta or ""
                else:
                    delta_text = output.text

                if not delta_text and not output.token_ids and \
                    not previous_num_tokens[i]:
                    # Chunked prefill case, don't return empty chunks
                    continue

                delta_message: Optional[DeltaMessage]

                # just update previous_texts and previous_token_ids
                if ((tool_choice_auto or self.reasoning_parser)
                        and not self.use_harmony):
                    assert previous_texts is not None
                    assert all_previous_token_ids is not None
                    previous_text = previous_texts[i]
                    previous_token_ids = all_previous_token_ids[i]
                    current_text = previous_text + delta_text
                    # avoid the None + list error.
                    if previous_token_ids:
                        current_token_ids = previous_token_ids + as_list(
                            output.token_ids)
                    else:
                        current_token_ids = as_list(output.token_ids)

                if self.use_harmony:
                    if is_reasoning:
                        delta_message = DeltaMessage(
                            reasoning_content=delta_text)
                    else:
                        delta_message = DeltaMessage(content=delta_text)
                # handle streaming deltas for tools with named tool_choice
                elif tool_choice_function_name:
                    if (self.reasoning_parser and not reasoning_end_arr[i]
                            and not reasoning_parser.is_reasoning_end(
                                previous_token_ids)):
                        assert reasoning_parser is not None
                        delta_message = (
                            reasoning_parser.
                            extract_reasoning_content_streaming(
                                previous_text,
                                current_text,
                                delta_text,
                                previous_token_ids,
                                current_token_ids,
                                output.token_ids,
                            ))
                        # When encountering think end id in delta_token_ids
                        # or think end id in prompt_token_ids
                        # i.e {"enable_thinking": False},
                        # set reasoning status to end.
                        # Only keep 'content', remove 'reasoning_content'.
                        if reasoning_parser.is_reasoning_end(
                                as_list(output.token_ids)) or (
                                    res.prompt_token_ids
                                    and reasoning_parser.is_reasoning_end(
                                        res.prompt_token_ids)):
                            reasoning_end_arr[i] = True
                            if delta_message and delta_message.content:
                                # This need to be added to next `delta_text`
                                current_text = delta_message.content
                                delta_message.content = None
                            else:
                                current_text = ""
                    else:
                        # Just to add remaining `content`
                        if self.reasoning_parser:
                            delta_text = previous_text + delta_text
                            current_text = ""

                        if function_name_returned[i]:
                            delta_tool_call = DeltaToolCall(
                                function=DeltaFunctionCall(
                                    arguments=delta_text),
                                index=i)
                        else:
                            delta_tool_call = DeltaToolCall(
                                id=make_tool_call_id(),
                                type="function",
                                function=DeltaFunctionCall(
                                    name=tool_choice_function_name,
                                    arguments=delta_text),
                                index=i)
                            function_name_returned[i] = True

                        delta_message = DeltaMessage(tool_calls=[
                            delta_tool_call,
                        ])

                elif request.tool_choice == "required":
                    assert previous_texts is not None
                    previous_text = previous_texts[i]
                    current_text = previous_text + delta_text
                    fn_name_returned = function_name_returned[i]

                    if self.reasoning_parser:
                        _, content = \
                            reasoning_parser.extract_reasoning_content(
                                current_text,
                                request
                            )
                    else:
                        content = current_text
                    delta_message, function_name_returned[i] = (
                        self.extract_tool_call_required_streaming(
                            previous_text=previous_text,
                            current_text=content,
                            delta_text=delta_text,
                            function_name_returned=fn_name_returned,
                            tool_call_idx=history_tool_call_cnt))
                    if (delta_message and delta_message.tool_calls and
                            delta_message.tool_calls[0].id is not None):
                        history_tool_call_cnt += 1

                    # update the previous values for the next iteration
                    previous_texts[i] = current_text

                # handle streaming deltas for tools with "auto" tool choice
                # and reasoning parser
                elif tool_choice_auto and self.reasoning_parser:
                    assert tool_parser is not None
                    assert reasoning_parser is not None
                    assert added_content_delta_arr is not None
                    assert reasoning_end_arr is not None
                    output_token_ids = as_list(output.token_ids)
                    if not reasoning_end_arr[i]:
                        delta_message = (
                            reasoning_parser.
                            extract_reasoning_content_streaming(
                                previous_text,
                                current_text,
                                delta_text,
                                previous_token_ids,
                                current_token_ids,
                                output_token_ids,
                            ))
                        # When encountering think end id in prompt_token_ids
                        # i.e {"enable_thinking": False},
                        # set reasoning status to end.
                        # Remove the text and token ids related
                        # to 'reasoning_content'.
                        if res.prompt_token_ids and \
                            reasoning_parser.is_reasoning_end(
                                res.prompt_token_ids):
                            reasoning_end_arr[i] = True
                            current_token_ids = output_token_ids
                            if delta_message and delta_message.content:
                                current_text = delta_message.content
                                delta_message.content = None
                            else:
                                current_text = ""
                        # When encountering think end id in delta_token_ids,
                        # set reasoning status to end.
                        # Remove the text and token ids related
                        # to 'reasoning_content'.
                        if reasoning_parser.is_reasoning_end(
                                output_token_ids):
                            reasoning_end_arr[i] = True
                            current_token_ids =  \
                                reasoning_parser.extract_content_ids(
                                    output_token_ids)
                            if delta_message and delta_message.content:
                                current_text = delta_message.content
                                delta_message.content = None
                            else:
                                current_text = ""

                    # handle tool calls only after reasoning is done,
                    else:
                        delta_token_ids = output_token_ids
                        # First time to tool call,
                        # add the remaining text and token ids
                        # to delta from previous
                        if not added_content_delta_arr[i]:
                            added_content_delta_arr[i] = True
                            previous_text = ""
                            previous_token_ids = []
                            delta_text = current_text
                            delta_token_ids = current_token_ids

                        delta_message = (
                            tool_parser.extract_tool_calls_streaming(
                                previous_text=previous_text,
                                current_text=current_text,
                                delta_text=delta_text,
                                previous_token_ids=previous_token_ids,
                                current_token_ids=current_token_ids,
                                delta_token_ids=delta_token_ids,
                                request=request))
                # when only tool calls
                elif tool_choice_auto:
                    assert tool_parser is not None
                    delta_message = (
                        tool_parser.extract_tool_calls_streaming(
                            previous_text=previous_text,
                            current_text=current_text,
                            delta_text=delta_text,
                            previous_token_ids=previous_token_ids,
                            current_token_ids=current_token_ids,
                            delta_token_ids=output.token_ids,
                            request=request))

                # when only reasoning
                elif self.reasoning_parser:
                    delta_message = (reasoning_parser.
                                     extract_reasoning_content_streaming(
                                         previous_text,
                                         current_text,
                                         delta_text,
                                         previous_token_ids,
                                         current_token_ids,
                                         output.token_ids,
                                     ))
                # handle streaming just a content delta
                else:
                    delta_message = DeltaMessage(content=delta_text)

                # update the previous values for the next iteration
                if tool_choice_auto or self.reasoning_parser:
                    assert previous_texts is not None
                    assert all_previous_token_ids is not None
                    previous_texts[i] = current_text
                    all_previous_token_ids[i] = current_token_ids
                else:
                    # Update for comprehensive logging even in simple case
                    assert previous_texts is not None
                    previous_texts[i] += delta_text

                # set the previous values for the next iteration
                previous_num_tokens[i] += len(output.token_ids)

                # if the message delta is None (e.g. because it was a
                # "control token" for tool calls or the parser otherwise
                # wasn't ready to send a token, then
                #   get the next token without streaming a chunk
                if delta_message is None:
                    continue

                # Log streaming delta if output logging is enabled
                if self.enable_log_outputs and self.request_logger:
                    delta_content = ""
                    if delta_message.content:
                        delta_content = delta_message.content
                    elif delta_message.tool_calls:
                        delta_content = "".join(
                            tc.function.arguments
                            for tc in delta_message.tool_calls
                            if tc.function and tc.function.arguments)

                    if delta_content:
                        self.request_logger.log_outputs(
                            request_id=request_id,
                            outputs=delta_content,
                            output_token_ids=as_list(output.token_ids),
                            finish_reason=output.finish_reason,
                            is_streaming=True,
                            delta=True,
                        )

                if output.finish_reason is None:
                    # Send token-by-token response for each request.n
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=delta_message,
                        logprobs=logprobs,
                        finish_reason=None,
                        token_ids=(as_list(output.token_ids)
                                   if request.return_token_ids else None))

                # if the model is finished generating
                else:
                    # check to make sure we haven't "forgotten" to stream
                    #   any tokens that were generated but previously
                    #   matched by partial json parsing
                    # only happens if we are NOT using guided decoding
                    auto_tools_called = False
                    if tool_parser:
                        auto_tools_called = len(
                            tool_parser.prev_tool_call_arr) > 0
                        index = len(tool_parser.prev_tool_call_arr
                                    ) - 1 if auto_tools_called else 0
                    else:
                        index = 0

                    if self._should_check_for_unstreamed_tool_arg_tokens(
                            delta_message, output) and tool_parser:
                        latest_delta_len = 0
                        if ((isinstance(
                                delta_message.tool_calls[0].function,
                                DeltaFunctionCall)) and isinstance(
                                    delta_message.tool_calls[0].function.
                                    arguments, str)):
                            latest_delta_len = len(
                                delta_message.tool_calls[0].function.
                                arguments)

                        # get the expected call based on partial JSON
                        # parsing which "autocompletes" the JSON
                        expected_call = json.dumps(
                            tool_parser.prev_tool_call_arr[index].get(
                                "arguments", {}),
                            ensure_ascii=False)

                        # get what we've streamed so far for arguments
                        # for the current tool
                        actual_call = tool_parser.streamed_args_for_tool[
                            index]
                        if (latest_delta_len > 0):
                            actual_call = actual_call[:-latest_delta_len]

                        # check to see if there's anything left to stream
                        remaining_call = expected_call.replace(
                            actual_call, "", 1)
                        # set that as a delta message
                        delta_message = DeltaMessage(tool_calls=[
                            DeltaToolCall(index=index,
                                          function=DeltaFunctionCall(
                                              arguments=remaining_call).
                                          model_dump(exclude_none=True))
                        ])

                    # Send the finish response for each request.n only once
                    choice_data = ChatCompletionResponseStreamChoice(
                        index=i,
                        delta=delta_message,
                        logprobs=logprobs,
                        finish_reason=output.finish_reason
                        if not auto_tools_called else "tool_calls",
                        stop_reason=output.stop_reason,
                        token_ids=(as_list(output.token_ids)
                                   if request.return_token_ids else None))

                    finish_reason_sent[i] = True

                chunk = ChatCompletionStreamResponse(
                    id=request_id,
                    object=chunk_object_type,
                    created=created_time,
                    choices=[choice_data],
                    model=model_name)

                # handle usage stats if requested & if continuous
                if include_continuous_usage:
                    completion_tokens = previous_num_tokens[i]
                    chunk.usage = UsageInfo(
                        prompt_tokens=num_prompt_tokens,
                        completion_tokens=completion_tokens,
                        total_tokens=num_prompt_tokens + completion_tokens,
                    )

                data = chunk.model_dump_json(exclude_unset=True)
                yield f"data: {data}\n\n"

        # once the final token is handled, if stream_options.include_usage
        # is sent, send the usage
        if include_usage:
            completion_tokens = sum(previous_num_tokens)
            final_usage = UsageInfo(prompt_tokens=num_prompt_tokens,
                                    completion_tokens=completion_tokens,
                                    total_tokens=num_prompt_tokens +
                                    completion_tokens)
            if self.enable_prompt_tokens_details and num_cached_tokens:
                final_usage.prompt_tokens_details = PromptTokenUsageInfo(
                    cached_tokens=num_cached_tokens)

            final_usage_chunk = ChatCompletionStreamResponse(
                id=request_id,
                object=chunk_object_type,
                created=created_time,
                choices=[],
                model=model_name,
                usage=final_usage)
            final_usage_data = (final_usage_chunk.model_dump_json(
                exclude_unset=True, exclude_none=True))
            yield f"data: {final_usage_data}\n\n"

        # report to FastAPI middleware aggregate usage across all choices
        num_completion_tokens = sum(previous_num_tokens)
        request_metadata.final_usage_info = UsageInfo(
            prompt_tokens=num_prompt_tokens,
            completion_tokens=num_completion_tokens,
            total_tokens=num_prompt_tokens + num_completion_tokens,
        )

        # Log complete streaming response if output logging is enabled
        if self.enable_log_outputs and self.request_logger:
            # Log the complete response for each choice
            for i in range(num_choices):
                full_text = (
                    previous_texts[i]
                    if previous_texts and i < len(previous_texts) else
                    f"<streaming_complete: {previous_num_tokens[i]} tokens>"
                )
                self.request_logger.log_outputs(
                    request_id=request_id,
                    outputs=full_text,
                    output_token_ids=
                    None,  # Consider also logging all token IDs
                    finish_reason="streaming_complete",
                    is_streaming=True,
                    delta=False,
                )

    except Exception as e:
        # TODO: Use a vllm-specific Validation Error
        logger.exception("Error in chat completion stream generator.")
        data = self.create_streaming_error_response(str(e))
        yield f"data: {data}\n\n"
    # Send the final done message after all response.n are finished
    yield "data: [DONE]\n\n"

create_chat_completion async

create_chat_completion(
    request: ChatCompletionRequest,
    raw_request: Optional[Request] = None,
) -> Union[
    AsyncGenerator[str, None],
    ChatCompletionResponse,
    ErrorResponse,
]

Chat Completion API similar to OpenAI's API.

See https://platform.openai.com/docs/api-reference/chat/create for the API specification. This API mimics the OpenAI Chat Completion API.

Source code in vllm/entrypoints/openai/serving_chat.py
async def create_chat_completion(
    self,
    request: ChatCompletionRequest,
    raw_request: Optional[Request] = None,
) -> Union[AsyncGenerator[str, None], ChatCompletionResponse,
           ErrorResponse]:
    """
    Chat Completion API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/chat/create
    for the API specification. This API mimics the OpenAI
    Chat Completion API.
    """
    error_check_ret = await self._check_model(request)
    if error_check_ret is not None:
        logger.error("Error with model %s", error_check_ret)
        return error_check_ret

    # If the engine is dead, raise the engine's DEAD_ERROR.
    # This is required for the streaming case, where we return a
    # success status before we actually start generating text :).
    if self.engine_client.errored:
        raise self.engine_client.dead_error

    try:
        lora_request = self._maybe_get_adapters(
            request, supports_default_mm_loras=True)

        model_name = self._get_model_name(request.model, lora_request)

        tokenizer = await self.engine_client.get_tokenizer(lora_request)

        tool_parser = self.tool_parser

        if isinstance(tokenizer, MistralTokenizer):
            # because of issues with pydantic we need to potentially
            # re-serialize the tool_calls field of the request
            # for more info: see comment in `maybe_serialize_tool_calls`
            maybe_serialize_tool_calls(request)
            truncate_tool_call_ids(request)
            validate_request_params(request)

        if (request.tool_choice == "auto" and
                not (self.enable_auto_tools and tool_parser is not None)
                and not isinstance(tokenizer, MistralTokenizer)
                and not self.use_harmony):
            # for hf tokenizers, "auto" tools requires
            # --enable-auto-tool-choice and --tool-call-parser
            return self.create_error_response(
                "\"auto\" tool choice requires "
                "--enable-auto-tool-choice and --tool-call-parser to be set"
            )

        if (request.tools is None
                or (request.tool_choice == "none"
                    and self.exclude_tools_when_tool_choice_none)):
            tool_dicts = None
        else:
            tool_dicts = [tool.model_dump() for tool in request.tools]

        if not self.use_harmony:
            # Common case.
            (
                conversation,
                request_prompts,
                engine_prompts,
            ) = await self._preprocess_chat(
                request,
                tokenizer,
                request.messages,
                chat_template=request.chat_template or self.chat_template,
                chat_template_content_format=self.
                chat_template_content_format,
                add_generation_prompt=request.add_generation_prompt,
                continue_final_message=request.continue_final_message,
                tool_dicts=tool_dicts,
                documents=request.documents,
                chat_template_kwargs=request.chat_template_kwargs,
                tool_parser=tool_parser,
                truncate_prompt_tokens=request.truncate_prompt_tokens,
                add_special_tokens=request.add_special_tokens,
            )
        else:
            # For GPT-OSS.
            (
                conversation,
                request_prompts,
                engine_prompts,
            ) = self._make_request_with_harmony(request)
    except (ValueError, TypeError, RuntimeError,
            jinja2.TemplateError) as e:
        logger.exception("Error in preprocessing prompt inputs")
        return self.create_error_response(f"{e} {e.__cause__}")

    request_id = "chatcmpl-" \
                 f"{self._base_request_id(raw_request, request.request_id)}"

    request_metadata = RequestResponseMetadata(request_id=request_id)
    if raw_request:
        raw_request.state.request_metadata = request_metadata

    # Schedule the request and get the result generator.
    generators: list[AsyncGenerator[RequestOutput, None]] = []
    try:
        for i, engine_prompt in enumerate(engine_prompts):
            sampling_params: Union[SamplingParams, BeamSearchParams]

            if self.default_sampling_params is None:
                self.default_sampling_params = {}

            max_tokens = get_max_tokens(
                max_model_len=self.max_model_len,
                request=request,
                input_length=len(engine_prompt["prompt_token_ids"]),
                default_sampling_params=self.default_sampling_params)

            if request.use_beam_search:
                sampling_params = request.to_beam_search_params(
                    max_tokens, self.default_sampling_params)
            else:
                sampling_params = request.to_sampling_params(
                    max_tokens, self.model_config.logits_processor_pattern,
                    self.default_sampling_params)

            self._log_inputs(request_id,
                             request_prompts[i],
                             params=sampling_params,
                             lora_request=lora_request)

            trace_headers = (None if raw_request is None else await
                             self._get_trace_headers(raw_request.headers))

            if isinstance(sampling_params, BeamSearchParams):
                generator = self.engine_client.beam_search(
                    prompt=engine_prompt,
                    request_id=request_id,
                    params=sampling_params,
                    lora_request=lora_request,
                )
            else:
                generator = self.engine_client.generate(
                    engine_prompt,
                    sampling_params,
                    request_id,
                    lora_request=lora_request,
                    trace_headers=trace_headers,
                    priority=request.priority,
                )

            generators.append(generator)
    except ValueError as e:
        # TODO: Use a vllm-specific Validation Error
        return self.create_error_response(str(e))

    assert len(generators) == 1
    result_generator, = generators

    # Streaming response
    if request.stream:
        return self.chat_completion_stream_generator(
            request,
            result_generator,
            request_id,
            model_name,
            conversation,
            tokenizer,
            request_metadata,
            enable_force_include_usage=self.enable_force_include_usage)

    try:
        return await self.chat_completion_full_generator(
            request, result_generator, request_id, model_name,
            conversation, tokenizer, request_metadata)
    except ValueError as e:
        # TODO: Use a vllm-specific Validation Error
        return self.create_error_response(str(e))

extract_tool_call_required_streaming

extract_tool_call_required_streaming(
    previous_text: str,
    current_text: Optional[str],
    delta_text: str,
    function_name_returned: bool,
    tool_call_idx: Optional[int] = None,
) -> tuple[Optional[DeltaMessage], bool]
Source code in vllm/entrypoints/openai/serving_chat.py
def extract_tool_call_required_streaming(
    self,
    previous_text: str,
    current_text: Optional[str],
    delta_text: str,
    function_name_returned: bool,
    tool_call_idx: Optional[int] = None
) -> tuple[Optional[DeltaMessage], bool]:
    if current_text is None or current_text == "":
        # if the current text is empty, we cannot parse it
        return None, function_name_returned
    try:
        obj = partial_json_parser.loads(current_text)
    except partial_json_parser.core.exceptions.MalformedJSON:
        logger.debug('not enough tokens to parse into JSON yet')
        obj = None

    # check if the current text is a valid array
    # containing a partial tool calling object
    # if not repeat
    if obj is None or not isinstance(obj, list) or not len(obj) > 0:
        function_name_returned = False
        delta_message = None
    else:
        _, finishes_previous_tool = OpenAIServingChat._filter_delta_text(
            delta_text, previous_text)
        # take the last tool call from the generated list
        current_tool_call = obj[-1]

        # once parameters have been generated the name is complete as well
        if not finishes_previous_tool and ("name" not in current_tool_call
                                           or "parameters"
                                           not in current_tool_call):
            function_name_returned = False
            delta_message = None
        else:
            if not function_name_returned:
                # get partly generated arguments from the latest tool call
                param_match = re.search(r'.*"parameters":\s*(.*)',
                                        current_text)
                arguments = param_match.group(1) if param_match else ""
                arguments, _ = OpenAIServingChat._filter_delta_text(
                    arguments, previous_text)

                # if this iteration finishes a previous tool call but a
                # new incomplete tool is already generated, take the
                # previous from the list
                if (finishes_previous_tool
                        and "parameters" not in current_tool_call):
                    current_tool_call = obj[-2]

                function_name_returned = True
                tool_call_id = make_tool_call_id(
                    id_type=self.tool_call_id_type,
                    func_name=current_tool_call["name"],
                    idx=tool_call_idx)
                delta_message = DeltaMessage(tool_calls=[
                    DeltaToolCall(id=tool_call_id,
                                  function=DeltaFunctionCall(
                                      name=current_tool_call["name"],
                                      arguments=arguments),
                                  index=len(obj) - 1,
                                  type="function")
                ])

            else:
                delta_text, _ = OpenAIServingChat._filter_delta_text(
                    delta_text, previous_text)

                if delta_text != "":
                    delta_message = DeltaMessage(tool_calls=[
                        DeltaToolCall(
                            function=DeltaFunctionCall(
                                # OpenAI API returns None
                                # instead of name every time
                                name=None,
                                arguments=delta_text),
                            index=len(obj) - 1)
                    ])
                else:
                    delta_message = None

    return delta_message, function_name_returned

get_chat_request_role

get_chat_request_role(
    request: ChatCompletionRequest,
) -> str
Source code in vllm/entrypoints/openai/serving_chat.py
def get_chat_request_role(self, request: ChatCompletionRequest) -> str:
    if request.add_generation_prompt:
        return self.response_role
    return request.messages[-1]["role"]