Skip to content

vllm.v1.engine.utils

STARTUP_POLL_PERIOD_MS module-attribute

STARTUP_POLL_PERIOD_MS = 10000

logger module-attribute

logger = init_logger(__name__)

CoreEngine

One per data parallel rank, used to track state during handshaking.

Source code in vllm/v1/engine/utils.py
class CoreEngine:
    """One per data parallel rank, used to track state during handshaking."""

    def __init__(self, index: int = 0, local: bool = True):
        self.local = local
        self.identity = index.to_bytes(2, "little")

        self.state = CoreEngineState.NEW

identity instance-attribute

identity = to_bytes(2, 'little')

local instance-attribute

local = local

state instance-attribute

state = NEW

__init__

__init__(index: int = 0, local: bool = True)
Source code in vllm/v1/engine/utils.py
def __init__(self, index: int = 0, local: bool = True):
    self.local = local
    self.identity = index.to_bytes(2, "little")

    self.state = CoreEngineState.NEW

CoreEngineActorManager

Utility class to handle creation, readiness, and shutdown of core engine Ray actors used by the AsyncLLM and LLMEngine.

Different from CoreEngineProcManager, this class manages core engines for both local and remote nodes.

Source code in vllm/v1/engine/utils.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
class CoreEngineActorManager:
    """
    Utility class to handle creation, readiness, and shutdown
    of core engine Ray actors used by the AsyncLLM and LLMEngine.

    Different from CoreEngineProcManager, this class manages
    core engines for both local and remote nodes.
    """

    def __init__(
        self,
        vllm_config: VllmConfig,
        addresses: EngineZmqAddresses,
        executor_class: type[Executor],
        log_stats: bool,
        placement_groups: Optional[list["PlacementGroup"]] = None,
        local_dp_ranks: Optional[list[int]] = None,
    ):
        import copy

        import ray
        from ray.runtime_env import RuntimeEnv
        from ray.util.scheduling_strategies import (
            PlacementGroupSchedulingStrategy)

        from vllm.v1.engine.core import DPEngineCoreActor

        self.local_engine_actors: list[ray.ActorHandle] = []
        self.remote_engine_actors: list[ray.ActorHandle] = []

        env_vars_list = get_env_vars_to_copy(destination="DPEngineCoreActor")
        self.env_vars_dict = {
            name: os.environ[name]
            for name in env_vars_list if name in os.environ
        }
        runtime_env = RuntimeEnv(env_vars=self.env_vars_dict)

        self.addresses = addresses
        self.executor_class = executor_class
        self.log_stats = log_stats
        dp_size = vllm_config.parallel_config.data_parallel_size
        local_engine_count = \
            vllm_config.parallel_config.data_parallel_size_local
        world_size = vllm_config.parallel_config.world_size

        if ray.is_initialized():
            logger.info(
                "Ray is already initialized. Skipping Ray initialization.")
        else:
            ray.init()

        if placement_groups is not None:
            assert local_dp_ranks is not None, (
                "local_dp_ranks must be provided if "
                "placement_groups is provided")
            assert len(placement_groups) == len(local_dp_ranks), (
                "placement_groups and local_dp_ranks must "
                "have the same length")
            logger.info("Using provided placement groups")
            # TODO(rui): validate passed-in placement groups
            self.created_placement_groups = []
        else:
            placement_groups, local_dp_ranks = \
                CoreEngineActorManager.create_dp_placement_groups(vllm_config)
            self.created_placement_groups = placement_groups
        assert len(placement_groups) == dp_size, (
            "Number of placement groups must match data parallel size")

        self.placement_group_is_local = []
        refs = []
        for index, local_index, pg in zip(range(dp_size), local_dp_ranks,
                                          placement_groups):
            dp_vllm_config = copy.deepcopy(vllm_config)
            dp_vllm_config.parallel_config.placement_group = pg
            local_client = index < local_engine_count
            actor = ray.remote(DPEngineCoreActor).options(
                scheduling_strategy=PlacementGroupSchedulingStrategy(
                    placement_group=pg,
                    placement_group_bundle_index=world_size,
                ),
                runtime_env=runtime_env).remote(vllm_config=dp_vllm_config,
                                                executor_class=executor_class,
                                                log_stats=log_stats,
                                                local_client=local_client,
                                                addresses=addresses,
                                                dp_rank=index,
                                                local_dp_rank=local_index)
            if local_client:
                self.local_engine_actors.append(actor)
            else:
                self.remote_engine_actors.append(actor)
            self.placement_group_is_local.append(local_client)
            refs.append(actor.wait_for_init.remote())

        ray.get(refs)
        self.run_refs = []
        for actor in self.local_engine_actors + self.remote_engine_actors:
            self.run_refs.append(actor.run.remote())

    @staticmethod
    def create_dp_placement_groups(
            vllm_config: VllmConfig
    ) -> tuple[list["PlacementGroup"], list[int]]:
        """
        Create placement groups for data parallel.
        """

        import ray
        from ray._private.state import available_resources_per_node
        from ray.util.state import list_nodes

        logger.info("Creating placement groups for data parallel")
        dp_master_ip = \
            vllm_config.parallel_config.data_parallel_master_ip
        num_pg_to_create = vllm_config.parallel_config.data_parallel_size
        local_engine_count = \
            vllm_config.parallel_config.data_parallel_size_local

        nodes = sorted(list_nodes(filters=[("state", "=", "ALIVE")]),
                       key=lambda node: node.node_ip != dp_master_ip)
        assert nodes[0].node_ip == dp_master_ip, (
            "The head node is missing or dead")
        assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, (
            "There can only be one head node")

        available_resources = available_resources_per_node()
        world_size = vllm_config.parallel_config.world_size
        placement_groups: list[PlacementGroup] = []
        local_dp_ranks: list[int] = []

        for node in nodes:
            node_ip = node.node_ip
            node_resources = available_resources[node.node_id]
            if "GPU" not in node_resources:
                continue
            # For now, each DP rank can only be assigned to one node
            # TODO(rui): support allocating a single DP rank
            # to multiple nodes
            available_engine_count = int(node_resources["GPU"]) // world_size
            if node_ip == dp_master_ip:
                assert available_engine_count >= local_engine_count, (
                    "Not enough resources to allocate DP ranks "
                    f"on DP master node {node_ip}")
                for i in range(local_engine_count):
                    bundles = [{
                        "GPU": 1.0,
                        "node:" + dp_master_ip: 0.001
                    }] * world_size + [{
                        "CPU": 1.0
                    }]
                    pg = ray.util.placement_group(
                        name=f"dp_rank_{len(placement_groups)}",
                        strategy="STRICT_PACK",
                        bundles=bundles,
                    )
                    placement_groups.append(pg)
                    local_dp_ranks.append(i)
            else:
                for i in range(available_engine_count):
                    if len(placement_groups) == num_pg_to_create:
                        break
                    bundles = [{"GPU": 1.0}] * world_size + [{"CPU": 1.0}]
                    pg = ray.util.placement_group(
                        name=f"dp_rank_{len(placement_groups)}",
                        strategy="STRICT_PACK",
                        bundles=bundles,
                    )
                    placement_groups.append(pg)
                    local_dp_ranks.append(i)
        if len(placement_groups) < num_pg_to_create:
            raise ValueError(
                f"Not enough resources to allocate {num_pg_to_create} "
                "placement groups, only created "
                f"{len(placement_groups)} placement groups. "
                "Available resources: "
                f"{available_resources}")
        return placement_groups, local_dp_ranks

    @staticmethod
    def add_dp_placement_groups(
        old_vllm_config: VllmConfig, new_data_parallel_size: int
    ) -> tuple[list["PlacementGroup"], list[int]]:
        """
        Add placement groups for new data parallel size.
        """
        import ray
        from ray._private.state import (available_resources_per_node,
                                        total_resources_per_node)
        from ray.util.state import list_nodes

        old_dp_size = old_vllm_config.parallel_config.data_parallel_size
        num_pg_to_create = new_data_parallel_size - old_dp_size

        if num_pg_to_create <= 0:
            return [], []

        dp_master_ip = old_vllm_config.parallel_config.data_parallel_master_ip
        world_size = old_vllm_config.parallel_config.world_size

        nodes = list_nodes()
        nodes = sorted(nodes, key=lambda node: node.node_ip != dp_master_ip)
        assert nodes[0].node_ip == dp_master_ip, (
            "The first node must be the head node")
        assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, (
            "There can only be one head node")

        available_resources = available_resources_per_node()
        total_resources = total_resources_per_node()

        placement_groups = []
        local_dp_ranks = []
        num_pg_created = 0

        for node in nodes:
            if num_pg_created >= num_pg_to_create:
                break

            node_ip = node.node_ip
            node_id = node.node_id
            available_gpus = int(available_resources[node_id]["GPU"])

            # Get total GPUs on this node from the node's resources
            # Ray stores node resources with node ID as key
            total_gpus = int(total_resources[node_id]["GPU"])

            # Calculate used GPUs and used engines on this node
            used_gpus = max(0, total_gpus - available_gpus)
            used_engines_on_node = used_gpus // world_size

            # Calculate how many new engines this node can accommodate
            available_engine_count = available_gpus // world_size

            # Create placement groups for new engines on this node
            for i in range(available_engine_count):
                if num_pg_created >= num_pg_to_create:
                    break

                rank = old_dp_size + num_pg_created

                # Create bundles with node constraint for master node
                if node_ip == dp_master_ip:
                    bundles = [{
                        "GPU": 1.0,
                        "node:" + dp_master_ip: 0.001
                    }] * world_size + [{
                        "CPU": 1.0
                    }]
                else:
                    bundles = [{"GPU": 1.0}] * world_size + [{"CPU": 1.0}]

                pg = ray.util.placement_group(
                    name=f"dp_rank_{rank}",
                    strategy="STRICT_PACK",
                    bundles=bundles,
                )
                placement_groups.append(pg)

                # Local rank starts from the number of engines already used
                # on this node
                local_rank = used_engines_on_node + i
                local_dp_ranks.append(local_rank)
                num_pg_created += 1

        return placement_groups, local_dp_ranks

    def scale_up_elastic_ep(self, cur_vllm_config: VllmConfig,
                            new_data_parallel_size: int) -> None:
        import copy

        import ray
        from ray.runtime_env import RuntimeEnv
        from ray.util.scheduling_strategies import (
            PlacementGroupSchedulingStrategy)

        from vllm.v1.engine.core import DPEngineCoreActor

        cur_data_parallel_size = len(self.local_engine_actors) + \
            len(self.remote_engine_actors)

        assert new_data_parallel_size > cur_data_parallel_size, (
            f"New data parallel size {new_data_parallel_size} must be greater "
            f"than current data parallel size {cur_data_parallel_size} "
            "for scale up")

        placement_groups, local_dp_ranks = \
            self.add_dp_placement_groups(
                cur_vllm_config, new_data_parallel_size)

        world_size = cur_vllm_config.parallel_config.world_size
        dp_master_ip = cur_vllm_config.parallel_config.data_parallel_master_ip
        new_local_engines = 0

        runtime_env = RuntimeEnv(env_vars=self.env_vars_dict
                                 | {"VLLM_ELASTIC_EP_SCALE_UP_LAUNCH": "1"})
        for i, (pg,
                local_rank) in enumerate(zip(placement_groups,
                                             local_dp_ranks)):
            rank = cur_data_parallel_size + i
            dp_vllm_config = copy.deepcopy(cur_vllm_config)
            dp_vllm_config.parallel_config.data_parallel_size = \
                new_data_parallel_size
            dp_vllm_config.parallel_config.placement_group = pg

            # Check if this placement group is on the head node
            local_client = any(
                bundle.get("node:" + dp_master_ip, 0) > 0
                for bundle in pg.bundle_specs)

            if local_client:
                new_local_engines += 1
                # Update data_parallel_size_local
                dp_vllm_config.parallel_config.data_parallel_size_local = (
                    cur_vllm_config.parallel_config.data_parallel_size_local +
                    new_local_engines)

            actor = ray.remote(DPEngineCoreActor).options(
                scheduling_strategy=PlacementGroupSchedulingStrategy(
                    placement_group=pg,
                    placement_group_bundle_index=world_size,
                ),
                runtime_env=runtime_env).remote(
                    vllm_config=dp_vllm_config,
                    executor_class=self.executor_class,
                    log_stats=self.log_stats,
                    local_client=local_client,
                    addresses=self.addresses,
                    dp_rank=rank,
                    local_dp_rank=local_rank)

            if local_client:
                self.local_engine_actors.append(actor)
            else:
                self.remote_engine_actors.append(actor)
            self.created_placement_groups.append(pg)
            self.placement_group_is_local.append(local_client)

        ray.get([
            actor.wait_for_init.remote()
            for actor in (self.local_engine_actors[-new_local_engines:]
                          if new_local_engines > 0 else []) +
            self.remote_engine_actors[-(len(placement_groups) -
                                        new_local_engines):]
        ])

        actors = (self.local_engine_actors[-new_local_engines:]
                  if new_local_engines > 0 else []) + \
            self.remote_engine_actors[-(len(placement_groups) -
                                        new_local_engines):]

        for actor in actors:
            self.run_refs.append(actor.run.remote())

        cur_vllm_config.parallel_config.data_parallel_size = \
            new_data_parallel_size
        # Update old_vllm_config with new data_parallel_size_local if any new
        # local engines were added
        if new_local_engines > 0:
            cur_vllm_config.parallel_config.data_parallel_size_local += \
                new_local_engines

    def scale_down_elastic_ep(self, cur_data_parallel_size: int,
                              new_data_parallel_size: int) -> None:
        import ray
        assert cur_data_parallel_size > new_data_parallel_size, (
            f"cur_data_parallel_size {cur_data_parallel_size} must be greater "
            f"than new_data_parallel_size {new_data_parallel_size} "
            "for scale down")
        for _ in range(cur_data_parallel_size - new_data_parallel_size):
            pg = self.created_placement_groups.pop()
            is_local = self.placement_group_is_local.pop()
            if is_local:
                self.local_engine_actors.pop()
            else:
                self.remote_engine_actors.pop()
            ray.util.remove_placement_group(pg)

    def get_run_refs(self):
        return self.run_refs

    def close(self):
        import ray
        for actor in self.local_engine_actors + self.remote_engine_actors:
            ray.kill(actor)
        for pg in self.created_placement_groups:
            ray.util.remove_placement_group(pg)

addresses instance-attribute

addresses = addresses

created_placement_groups instance-attribute

created_placement_groups = []

env_vars_dict instance-attribute

env_vars_dict = {
    name: (environ[name])
    for name in env_vars_list
    if name in environ
}

executor_class instance-attribute

executor_class = executor_class

local_engine_actors instance-attribute

local_engine_actors: list[ActorHandle] = []

log_stats instance-attribute

log_stats = log_stats

placement_group_is_local instance-attribute

placement_group_is_local = []

remote_engine_actors instance-attribute

remote_engine_actors: list[ActorHandle] = []

run_refs instance-attribute

run_refs = []

__init__

__init__(
    vllm_config: VllmConfig,
    addresses: EngineZmqAddresses,
    executor_class: type[Executor],
    log_stats: bool,
    placement_groups: Optional[list[PlacementGroup]] = None,
    local_dp_ranks: Optional[list[int]] = None,
)
Source code in vllm/v1/engine/utils.py
def __init__(
    self,
    vllm_config: VllmConfig,
    addresses: EngineZmqAddresses,
    executor_class: type[Executor],
    log_stats: bool,
    placement_groups: Optional[list["PlacementGroup"]] = None,
    local_dp_ranks: Optional[list[int]] = None,
):
    import copy

    import ray
    from ray.runtime_env import RuntimeEnv
    from ray.util.scheduling_strategies import (
        PlacementGroupSchedulingStrategy)

    from vllm.v1.engine.core import DPEngineCoreActor

    self.local_engine_actors: list[ray.ActorHandle] = []
    self.remote_engine_actors: list[ray.ActorHandle] = []

    env_vars_list = get_env_vars_to_copy(destination="DPEngineCoreActor")
    self.env_vars_dict = {
        name: os.environ[name]
        for name in env_vars_list if name in os.environ
    }
    runtime_env = RuntimeEnv(env_vars=self.env_vars_dict)

    self.addresses = addresses
    self.executor_class = executor_class
    self.log_stats = log_stats
    dp_size = vllm_config.parallel_config.data_parallel_size
    local_engine_count = \
        vllm_config.parallel_config.data_parallel_size_local
    world_size = vllm_config.parallel_config.world_size

    if ray.is_initialized():
        logger.info(
            "Ray is already initialized. Skipping Ray initialization.")
    else:
        ray.init()

    if placement_groups is not None:
        assert local_dp_ranks is not None, (
            "local_dp_ranks must be provided if "
            "placement_groups is provided")
        assert len(placement_groups) == len(local_dp_ranks), (
            "placement_groups and local_dp_ranks must "
            "have the same length")
        logger.info("Using provided placement groups")
        # TODO(rui): validate passed-in placement groups
        self.created_placement_groups = []
    else:
        placement_groups, local_dp_ranks = \
            CoreEngineActorManager.create_dp_placement_groups(vllm_config)
        self.created_placement_groups = placement_groups
    assert len(placement_groups) == dp_size, (
        "Number of placement groups must match data parallel size")

    self.placement_group_is_local = []
    refs = []
    for index, local_index, pg in zip(range(dp_size), local_dp_ranks,
                                      placement_groups):
        dp_vllm_config = copy.deepcopy(vllm_config)
        dp_vllm_config.parallel_config.placement_group = pg
        local_client = index < local_engine_count
        actor = ray.remote(DPEngineCoreActor).options(
            scheduling_strategy=PlacementGroupSchedulingStrategy(
                placement_group=pg,
                placement_group_bundle_index=world_size,
            ),
            runtime_env=runtime_env).remote(vllm_config=dp_vllm_config,
                                            executor_class=executor_class,
                                            log_stats=log_stats,
                                            local_client=local_client,
                                            addresses=addresses,
                                            dp_rank=index,
                                            local_dp_rank=local_index)
        if local_client:
            self.local_engine_actors.append(actor)
        else:
            self.remote_engine_actors.append(actor)
        self.placement_group_is_local.append(local_client)
        refs.append(actor.wait_for_init.remote())

    ray.get(refs)
    self.run_refs = []
    for actor in self.local_engine_actors + self.remote_engine_actors:
        self.run_refs.append(actor.run.remote())

add_dp_placement_groups staticmethod

add_dp_placement_groups(
    old_vllm_config: VllmConfig, new_data_parallel_size: int
) -> tuple[list[PlacementGroup], list[int]]

Add placement groups for new data parallel size.

Source code in vllm/v1/engine/utils.py
@staticmethod
def add_dp_placement_groups(
    old_vllm_config: VllmConfig, new_data_parallel_size: int
) -> tuple[list["PlacementGroup"], list[int]]:
    """
    Add placement groups for new data parallel size.
    """
    import ray
    from ray._private.state import (available_resources_per_node,
                                    total_resources_per_node)
    from ray.util.state import list_nodes

    old_dp_size = old_vllm_config.parallel_config.data_parallel_size
    num_pg_to_create = new_data_parallel_size - old_dp_size

    if num_pg_to_create <= 0:
        return [], []

    dp_master_ip = old_vllm_config.parallel_config.data_parallel_master_ip
    world_size = old_vllm_config.parallel_config.world_size

    nodes = list_nodes()
    nodes = sorted(nodes, key=lambda node: node.node_ip != dp_master_ip)
    assert nodes[0].node_ip == dp_master_ip, (
        "The first node must be the head node")
    assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, (
        "There can only be one head node")

    available_resources = available_resources_per_node()
    total_resources = total_resources_per_node()

    placement_groups = []
    local_dp_ranks = []
    num_pg_created = 0

    for node in nodes:
        if num_pg_created >= num_pg_to_create:
            break

        node_ip = node.node_ip
        node_id = node.node_id
        available_gpus = int(available_resources[node_id]["GPU"])

        # Get total GPUs on this node from the node's resources
        # Ray stores node resources with node ID as key
        total_gpus = int(total_resources[node_id]["GPU"])

        # Calculate used GPUs and used engines on this node
        used_gpus = max(0, total_gpus - available_gpus)
        used_engines_on_node = used_gpus // world_size

        # Calculate how many new engines this node can accommodate
        available_engine_count = available_gpus // world_size

        # Create placement groups for new engines on this node
        for i in range(available_engine_count):
            if num_pg_created >= num_pg_to_create:
                break

            rank = old_dp_size + num_pg_created

            # Create bundles with node constraint for master node
            if node_ip == dp_master_ip:
                bundles = [{
                    "GPU": 1.0,
                    "node:" + dp_master_ip: 0.001
                }] * world_size + [{
                    "CPU": 1.0
                }]
            else:
                bundles = [{"GPU": 1.0}] * world_size + [{"CPU": 1.0}]

            pg = ray.util.placement_group(
                name=f"dp_rank_{rank}",
                strategy="STRICT_PACK",
                bundles=bundles,
            )
            placement_groups.append(pg)

            # Local rank starts from the number of engines already used
            # on this node
            local_rank = used_engines_on_node + i
            local_dp_ranks.append(local_rank)
            num_pg_created += 1

    return placement_groups, local_dp_ranks

close

close()
Source code in vllm/v1/engine/utils.py
def close(self):
    import ray
    for actor in self.local_engine_actors + self.remote_engine_actors:
        ray.kill(actor)
    for pg in self.created_placement_groups:
        ray.util.remove_placement_group(pg)

create_dp_placement_groups staticmethod

create_dp_placement_groups(
    vllm_config: VllmConfig,
) -> tuple[list[PlacementGroup], list[int]]

Create placement groups for data parallel.

Source code in vllm/v1/engine/utils.py
@staticmethod
def create_dp_placement_groups(
        vllm_config: VllmConfig
) -> tuple[list["PlacementGroup"], list[int]]:
    """
    Create placement groups for data parallel.
    """

    import ray
    from ray._private.state import available_resources_per_node
    from ray.util.state import list_nodes

    logger.info("Creating placement groups for data parallel")
    dp_master_ip = \
        vllm_config.parallel_config.data_parallel_master_ip
    num_pg_to_create = vllm_config.parallel_config.data_parallel_size
    local_engine_count = \
        vllm_config.parallel_config.data_parallel_size_local

    nodes = sorted(list_nodes(filters=[("state", "=", "ALIVE")]),
                   key=lambda node: node.node_ip != dp_master_ip)
    assert nodes[0].node_ip == dp_master_ip, (
        "The head node is missing or dead")
    assert len(nodes) == 1 or nodes[1].node_ip != dp_master_ip, (
        "There can only be one head node")

    available_resources = available_resources_per_node()
    world_size = vllm_config.parallel_config.world_size
    placement_groups: list[PlacementGroup] = []
    local_dp_ranks: list[int] = []

    for node in nodes:
        node_ip = node.node_ip
        node_resources = available_resources[node.node_id]
        if "GPU" not in node_resources:
            continue
        # For now, each DP rank can only be assigned to one node
        # TODO(rui): support allocating a single DP rank
        # to multiple nodes
        available_engine_count = int(node_resources["GPU"]) // world_size
        if node_ip == dp_master_ip:
            assert available_engine_count >= local_engine_count, (
                "Not enough resources to allocate DP ranks "
                f"on DP master node {node_ip}")
            for i in range(local_engine_count):
                bundles = [{
                    "GPU": 1.0,
                    "node:" + dp_master_ip: 0.001
                }] * world_size + [{
                    "CPU": 1.0
                }]
                pg = ray.util.placement_group(
                    name=f"dp_rank_{len(placement_groups)}",
                    strategy="STRICT_PACK",
                    bundles=bundles,
                )
                placement_groups.append(pg)
                local_dp_ranks.append(i)
        else:
            for i in range(available_engine_count):
                if len(placement_groups) == num_pg_to_create:
                    break
                bundles = [{"GPU": 1.0}] * world_size + [{"CPU": 1.0}]
                pg = ray.util.placement_group(
                    name=f"dp_rank_{len(placement_groups)}",
                    strategy="STRICT_PACK",
                    bundles=bundles,
                )
                placement_groups.append(pg)
                local_dp_ranks.append(i)
    if len(placement_groups) < num_pg_to_create:
        raise ValueError(
            f"Not enough resources to allocate {num_pg_to_create} "
            "placement groups, only created "
            f"{len(placement_groups)} placement groups. "
            "Available resources: "
            f"{available_resources}")
    return placement_groups, local_dp_ranks

get_run_refs

get_run_refs()
Source code in vllm/v1/engine/utils.py
def get_run_refs(self):
    return self.run_refs

scale_down_elastic_ep

scale_down_elastic_ep(
    cur_data_parallel_size: int, new_data_parallel_size: int
) -> None
Source code in vllm/v1/engine/utils.py
def scale_down_elastic_ep(self, cur_data_parallel_size: int,
                          new_data_parallel_size: int) -> None:
    import ray
    assert cur_data_parallel_size > new_data_parallel_size, (
        f"cur_data_parallel_size {cur_data_parallel_size} must be greater "
        f"than new_data_parallel_size {new_data_parallel_size} "
        "for scale down")
    for _ in range(cur_data_parallel_size - new_data_parallel_size):
        pg = self.created_placement_groups.pop()
        is_local = self.placement_group_is_local.pop()
        if is_local:
            self.local_engine_actors.pop()
        else:
            self.remote_engine_actors.pop()
        ray.util.remove_placement_group(pg)

scale_up_elastic_ep

scale_up_elastic_ep(
    cur_vllm_config: VllmConfig, new_data_parallel_size: int
) -> None
Source code in vllm/v1/engine/utils.py
def scale_up_elastic_ep(self, cur_vllm_config: VllmConfig,
                        new_data_parallel_size: int) -> None:
    import copy

    import ray
    from ray.runtime_env import RuntimeEnv
    from ray.util.scheduling_strategies import (
        PlacementGroupSchedulingStrategy)

    from vllm.v1.engine.core import DPEngineCoreActor

    cur_data_parallel_size = len(self.local_engine_actors) + \
        len(self.remote_engine_actors)

    assert new_data_parallel_size > cur_data_parallel_size, (
        f"New data parallel size {new_data_parallel_size} must be greater "
        f"than current data parallel size {cur_data_parallel_size} "
        "for scale up")

    placement_groups, local_dp_ranks = \
        self.add_dp_placement_groups(
            cur_vllm_config, new_data_parallel_size)

    world_size = cur_vllm_config.parallel_config.world_size
    dp_master_ip = cur_vllm_config.parallel_config.data_parallel_master_ip
    new_local_engines = 0

    runtime_env = RuntimeEnv(env_vars=self.env_vars_dict
                             | {"VLLM_ELASTIC_EP_SCALE_UP_LAUNCH": "1"})
    for i, (pg,
            local_rank) in enumerate(zip(placement_groups,
                                         local_dp_ranks)):
        rank = cur_data_parallel_size + i
        dp_vllm_config = copy.deepcopy(cur_vllm_config)
        dp_vllm_config.parallel_config.data_parallel_size = \
            new_data_parallel_size
        dp_vllm_config.parallel_config.placement_group = pg

        # Check if this placement group is on the head node
        local_client = any(
            bundle.get("node:" + dp_master_ip, 0) > 0
            for bundle in pg.bundle_specs)

        if local_client:
            new_local_engines += 1
            # Update data_parallel_size_local
            dp_vllm_config.parallel_config.data_parallel_size_local = (
                cur_vllm_config.parallel_config.data_parallel_size_local +
                new_local_engines)

        actor = ray.remote(DPEngineCoreActor).options(
            scheduling_strategy=PlacementGroupSchedulingStrategy(
                placement_group=pg,
                placement_group_bundle_index=world_size,
            ),
            runtime_env=runtime_env).remote(
                vllm_config=dp_vllm_config,
                executor_class=self.executor_class,
                log_stats=self.log_stats,
                local_client=local_client,
                addresses=self.addresses,
                dp_rank=rank,
                local_dp_rank=local_rank)

        if local_client:
            self.local_engine_actors.append(actor)
        else:
            self.remote_engine_actors.append(actor)
        self.created_placement_groups.append(pg)
        self.placement_group_is_local.append(local_client)

    ray.get([
        actor.wait_for_init.remote()
        for actor in (self.local_engine_actors[-new_local_engines:]
                      if new_local_engines > 0 else []) +
        self.remote_engine_actors[-(len(placement_groups) -
                                    new_local_engines):]
    ])

    actors = (self.local_engine_actors[-new_local_engines:]
              if new_local_engines > 0 else []) + \
        self.remote_engine_actors[-(len(placement_groups) -
                                    new_local_engines):]

    for actor in actors:
        self.run_refs.append(actor.run.remote())

    cur_vllm_config.parallel_config.data_parallel_size = \
        new_data_parallel_size
    # Update old_vllm_config with new data_parallel_size_local if any new
    # local engines were added
    if new_local_engines > 0:
        cur_vllm_config.parallel_config.data_parallel_size_local += \
            new_local_engines

CoreEngineProcManager

Utility class to handle creation, readiness, and shutdown of background processes used by the AsyncLLM and LLMEngine.

Source code in vllm/v1/engine/utils.py
class CoreEngineProcManager:
    """
    Utility class to handle creation, readiness, and shutdown
    of background processes used by the AsyncLLM and LLMEngine.
    """

    def __init__(
        self,
        target_fn: Callable,
        local_engine_count: int,
        start_index: int,
        local_start_index: int,
        vllm_config: VllmConfig,
        local_client: bool,
        handshake_address: str,
        executor_class: type[Executor],
        log_stats: bool,
        client_handshake_address: Optional[str] = None,
    ):
        context = get_mp_context()
        common_kwargs = {
            "vllm_config": vllm_config,
            "local_client": local_client,
            "handshake_address": handshake_address,
            "executor_class": executor_class,
            "log_stats": log_stats,
        }

        if client_handshake_address:
            common_kwargs[
                "client_handshake_address"] = client_handshake_address

        self.processes: list[BaseProcess] = []
        local_dp_ranks = []
        for index in range(local_engine_count):
            local_index = local_start_index + index
            global_index = start_index + index

            # Start EngineCore in background process.
            local_dp_ranks.append(local_index)
            self.processes.append(
                context.Process(target=target_fn,
                                name=f"EngineCore_{global_index}",
                                kwargs=common_kwargs | {
                                    "dp_rank": global_index,
                                    "local_dp_rank": local_index,
                                }))

        self._finalizer = weakref.finalize(self, shutdown, self.processes)

        data_parallel = vllm_config.parallel_config.data_parallel_size > 1
        try:
            for proc, local_dp_rank in zip(self.processes, local_dp_ranks):
                with set_device_control_env_var(
                        vllm_config, local_dp_rank) if (
                            data_parallel) else contextlib.nullcontext():
                    proc.start()
        finally:
            # Kill other procs if not all are running.
            if self.finished_procs():
                self.close()

    def close(self):
        """Shutdown all procs."""
        self._finalizer()

    def join_first(self):
        """Wait for any process to exit."""
        connection.wait(proc.sentinel for proc in self.processes)

    def sentinels(self) -> list:
        return [proc.sentinel for proc in self.processes]

    def finished_procs(self) -> dict[str, int]:
        """Returns dict of proc name -> exit code for any finished procs."""
        return {
            proc.name: proc.exitcode
            for proc in self.processes if proc.exitcode is not None
        }

_finalizer instance-attribute

_finalizer = finalize(self, shutdown, processes)

processes instance-attribute

processes: list[BaseProcess] = []

__init__

__init__(
    target_fn: Callable,
    local_engine_count: int,
    start_index: int,
    local_start_index: int,
    vllm_config: VllmConfig,
    local_client: bool,
    handshake_address: str,
    executor_class: type[Executor],
    log_stats: bool,
    client_handshake_address: Optional[str] = None,
)
Source code in vllm/v1/engine/utils.py
def __init__(
    self,
    target_fn: Callable,
    local_engine_count: int,
    start_index: int,
    local_start_index: int,
    vllm_config: VllmConfig,
    local_client: bool,
    handshake_address: str,
    executor_class: type[Executor],
    log_stats: bool,
    client_handshake_address: Optional[str] = None,
):
    context = get_mp_context()
    common_kwargs = {
        "vllm_config": vllm_config,
        "local_client": local_client,
        "handshake_address": handshake_address,
        "executor_class": executor_class,
        "log_stats": log_stats,
    }

    if client_handshake_address:
        common_kwargs[
            "client_handshake_address"] = client_handshake_address

    self.processes: list[BaseProcess] = []
    local_dp_ranks = []
    for index in range(local_engine_count):
        local_index = local_start_index + index
        global_index = start_index + index

        # Start EngineCore in background process.
        local_dp_ranks.append(local_index)
        self.processes.append(
            context.Process(target=target_fn,
                            name=f"EngineCore_{global_index}",
                            kwargs=common_kwargs | {
                                "dp_rank": global_index,
                                "local_dp_rank": local_index,
                            }))

    self._finalizer = weakref.finalize(self, shutdown, self.processes)

    data_parallel = vllm_config.parallel_config.data_parallel_size > 1
    try:
        for proc, local_dp_rank in zip(self.processes, local_dp_ranks):
            with set_device_control_env_var(
                    vllm_config, local_dp_rank) if (
                        data_parallel) else contextlib.nullcontext():
                proc.start()
    finally:
        # Kill other procs if not all are running.
        if self.finished_procs():
            self.close()

close

close()

Shutdown all procs.

Source code in vllm/v1/engine/utils.py
def close(self):
    """Shutdown all procs."""
    self._finalizer()

finished_procs

finished_procs() -> dict[str, int]

Returns dict of proc name -> exit code for any finished procs.

Source code in vllm/v1/engine/utils.py
def finished_procs(self) -> dict[str, int]:
    """Returns dict of proc name -> exit code for any finished procs."""
    return {
        proc.name: proc.exitcode
        for proc in self.processes if proc.exitcode is not None
    }

join_first

join_first()

Wait for any process to exit.

Source code in vllm/v1/engine/utils.py
def join_first(self):
    """Wait for any process to exit."""
    connection.wait(proc.sentinel for proc in self.processes)

sentinels

sentinels() -> list
Source code in vllm/v1/engine/utils.py
def sentinels(self) -> list:
    return [proc.sentinel for proc in self.processes]

CoreEngineState

Bases: Enum

Source code in vllm/v1/engine/utils.py
class CoreEngineState(Enum):
    NEW = auto()
    CONNECTED = auto()
    READY = auto()

CONNECTED class-attribute instance-attribute

CONNECTED = auto()

NEW class-attribute instance-attribute

NEW = auto()

READY class-attribute instance-attribute

READY = auto()

EngineHandshakeMetadata dataclass

Metadata sent to each engine process during startup handshake, including addresses of the front-end ZMQ queues that they should connect to.

Source code in vllm/v1/engine/utils.py
@dataclass
class EngineHandshakeMetadata:
    """Metadata sent to each engine process during startup handshake,
    including addresses of the front-end ZMQ queues that they should
    connect to.
    """
    addresses: EngineZmqAddresses
    parallel_config: dict[str, Union[int, str, list[int]]]

addresses instance-attribute

addresses: EngineZmqAddresses

parallel_config instance-attribute

parallel_config: dict[str, Union[int, str, list[int]]]

__init__

__init__(
    addresses: EngineZmqAddresses,
    parallel_config: dict[str, Union[int, str, list[int]]],
) -> None

EngineZmqAddresses dataclass

Source code in vllm/v1/engine/utils.py
@dataclass
class EngineZmqAddresses:
    # ZMQ input socket addresses for each front-end client (requests)
    inputs: list[str]
    # ZMQ output socket addresses for each front-end client (responses)
    outputs: list[str]
    # ZMQ input socket address of DP coordinator if applicable
    coordinator_input: Optional[str] = None
    # ZMQ output socket address of DP coordinator if applicable
    coordinator_output: Optional[str] = None
    # ZMQ socket for front-end to connect to DP coordinator.
    # Not used by engine, just relayed to front-end in handshake response.
    # Only required for external DP LB case.
    frontend_stats_publish_address: Optional[str] = None

coordinator_input class-attribute instance-attribute

coordinator_input: Optional[str] = None

coordinator_output class-attribute instance-attribute

coordinator_output: Optional[str] = None

frontend_stats_publish_address class-attribute instance-attribute

frontend_stats_publish_address: Optional[str] = None

inputs instance-attribute

inputs: list[str]

outputs instance-attribute

outputs: list[str]

__init__

__init__(
    inputs: list[str],
    outputs: list[str],
    coordinator_input: Optional[str] = None,
    coordinator_output: Optional[str] = None,
    frontend_stats_publish_address: Optional[str] = None,
) -> None

launch_core_engines

launch_core_engines(
    vllm_config: VllmConfig,
    executor_class: type[Executor],
    log_stats: bool,
    num_api_servers: int = 1,
) -> Iterator[
    tuple[
        Optional[
            Union[
                CoreEngineProcManager,
                CoreEngineActorManager,
            ]
        ],
        Optional[DPCoordinator],
        EngineZmqAddresses,
    ]
]

Launch engine and DP coordinator processes as needed.

Source code in vllm/v1/engine/utils.py
@contextlib.contextmanager
def launch_core_engines(
    vllm_config: VllmConfig,
    executor_class: type[Executor],
    log_stats: bool,
    num_api_servers: int = 1,
) -> Iterator[tuple[
        Optional[Union[CoreEngineProcManager, CoreEngineActorManager]],
        Optional[DPCoordinator],
        EngineZmqAddresses,
]]:
    """Launch engine and DP coordinator processes as needed."""

    parallel_config = vllm_config.parallel_config
    dp_size = parallel_config.data_parallel_size
    local_engine_count = parallel_config.data_parallel_size_local
    local_start_index = parallel_config.data_parallel_rank_local
    dp_rank = parallel_config.data_parallel_rank
    host = parallel_config.data_parallel_master_ip
    local_engines_only = (parallel_config.data_parallel_hybrid_lb
                          or parallel_config.data_parallel_external_lb)

    # In offline mode there is an LLM instance per DP rank and
    # one core engine per LLM, see
    # examples/offline_inference/data_parallel.py.
    offline_mode = local_start_index is not None

    # client_local_only = True for cases where this front-end
    # sends requests only to colocated engines.
    client_local_only = (offline_mode or local_engines_only
                         or (local_engine_count == dp_size))

    # Set up input and output addresses.
    addresses = EngineZmqAddresses(
        inputs=[
            get_engine_client_zmq_addr(client_local_only, host)
            for _ in range(num_api_servers)
        ],
        outputs=[
            get_engine_client_zmq_addr(client_local_only, host)
            for _ in range(num_api_servers)
        ],
    )

    # Run the DP Coordinator process with rank 0 when in
    # online DP mode.
    run_coordinator = dp_size > 1 and not offline_mode and dp_rank == 0

    if run_coordinator:
        coordinator = DPCoordinator(parallel_config)

        addresses.coordinator_input, addresses.coordinator_output = (
            coordinator.get_engine_socket_addresses())
        addresses.frontend_stats_publish_address = (
            coordinator.get_stats_publish_address())

        logger.info("Started DP Coordinator process (PID: %d)",
                    coordinator.proc.pid)
    else:
        coordinator = None

    if parallel_config.data_parallel_backend == "ray":
        logger.info("Starting ray-based data parallel backend")

        engine_actor_manager = CoreEngineActorManager(
            vllm_config=vllm_config,
            addresses=addresses,
            executor_class=executor_class,
            log_stats=log_stats,
        )

        yield engine_actor_manager, coordinator, addresses
        return

    if offline_mode:
        assert local_engine_count == 1
        engines_to_handshake = [CoreEngine(index=dp_rank, local=True)]
    elif dp_rank == 0:
        # Rank 0 holds Coordinator, so it handshakes with all Cores
        # in both external dplb and internal dplb mode.
        # Note this also covers the case where we have zero local engines
        # and rank 0 is headless.
        engines_to_handshake = [
            CoreEngine(index=i, local=(i < local_engine_count))
            for i in range(dp_size)
        ]
    else:
        # Rank > 0 handshakes with just the local cores it is managing.
        assert local_engines_only, (
            "Attempting to launch core_engines from dp_rank > 0, but "
            "found internal DPLB, which is incompatible.")
        engines_to_handshake = [
            CoreEngine(index=i, local=True)
            for i in range(dp_rank, dp_rank + local_engine_count)
        ]

    # Whether the started engines will handshake only with co-located
    # front-end processes. In external_dp_lb mode, ranks > 0 handshake with
    # their co-located frontend and also the rank 0 front-end, and hence this
    # will be False.
    handshake_local_only = offline_mode or local_engine_count == dp_size

    handshake_address = get_engine_client_zmq_addr(
        handshake_local_only, host, parallel_config.data_parallel_rpc_port)

    if local_engines_only and dp_rank > 0:
        assert not handshake_local_only
        local_handshake_address = get_open_zmq_ipc_path()
        client_handshake_address = local_handshake_address
    else:
        local_handshake_address = handshake_address
        client_handshake_address = None

    with zmq_socket_ctx(local_handshake_address, zmq.ROUTER,
                        bind=True) as handshake_socket:

        from vllm.v1.engine.core import EngineCoreProc

        # Start local engines.
        if local_engine_count:
            local_engine_manager = CoreEngineProcManager(
                EngineCoreProc.run_engine_core,
                vllm_config=vllm_config,
                executor_class=executor_class,
                log_stats=log_stats,
                handshake_address=handshake_address,
                client_handshake_address=client_handshake_address,
                local_client=True,
                local_engine_count=local_engine_count,
                start_index=dp_rank,
                local_start_index=local_start_index or 0)
        else:
            local_engine_manager = None

        yield local_engine_manager, coordinator, addresses

        # Now wait for engines to start.
        wait_for_engine_startup(
            handshake_socket,
            addresses,
            engines_to_handshake,
            parallel_config,
            vllm_config.cache_config,
            local_engine_manager,
            coordinator.proc if coordinator else None,
        )

set_device_control_env_var

set_device_control_env_var(
    vllm_config: VllmConfig, local_dp_rank: int
) -> Iterator[None]

Temporarily set CUDA_VISIBLE_DEVICES or equivalent for engine subprocess.

Source code in vllm/v1/engine/utils.py
@contextlib.contextmanager
def set_device_control_env_var(vllm_config: VllmConfig,
                               local_dp_rank: int) -> Iterator[None]:
    """
    Temporarily set CUDA_VISIBLE_DEVICES or equivalent
    for engine subprocess.
    """
    world_size = vllm_config.parallel_config.world_size
    evar = current_platform.device_control_env_var
    try:
        value = ",".join(
            str(current_platform.device_id_to_physical_device_id(i))
            for i in range(local_dp_rank * world_size, (local_dp_rank + 1) *
                           world_size))
    except IndexError as e:
        raise Exception(f"Error setting {evar}: "
                        f"local range: [{local_dp_rank * world_size}, "
                        f"{(local_dp_rank + 1) * world_size}) "
                        "base value: "
                        f"\"{os.getenv(evar)}\"") from e
    with patch.dict(os.environ, values=((evar, value), )):
        yield

wait_for_engine_startup

wait_for_engine_startup(
    handshake_socket: Socket,
    addresses: EngineZmqAddresses,
    core_engines: list[CoreEngine],
    parallel_config: ParallelConfig,
    cache_config: CacheConfig,
    proc_manager: Optional[CoreEngineProcManager],
    coord_process: Optional[Process],
)
Source code in vllm/v1/engine/utils.py
def wait_for_engine_startup(
    handshake_socket: zmq.Socket,
    addresses: EngineZmqAddresses,
    core_engines: list[CoreEngine],
    parallel_config: ParallelConfig,
    cache_config: CacheConfig,
    proc_manager: Optional[CoreEngineProcManager],
    coord_process: Optional[Process],
):
    # Wait for engine core process(es) to send ready messages.
    local_count = parallel_config.data_parallel_size_local
    remote_count = len(core_engines) - local_count
    # [local, remote] counts
    conn_pending, start_pending = [local_count, remote_count], [0, 0]
    poller = zmq.Poller()
    poller.register(handshake_socket, zmq.POLLIN)

    remote_should_be_headless = not parallel_config.data_parallel_hybrid_lb \
        and not parallel_config.data_parallel_external_lb

    if proc_manager is not None:
        for sentinel in proc_manager.sentinels():
            poller.register(sentinel, zmq.POLLIN)
    if coord_process is not None:
        poller.register(coord_process.sentinel, zmq.POLLIN)
    while any(conn_pending) or any(start_pending):
        events = poller.poll(STARTUP_POLL_PERIOD_MS)
        if not events:
            if any(conn_pending):
                logger.debug(
                    "Waiting for %d local, %d remote core engine proc(s) "
                    "to connect.", *conn_pending)
            if any(start_pending):
                logger.debug(
                    "Waiting for %d local, %d remote core engine proc(s) "
                    "to start.", *start_pending)
            continue
        if len(events) > 1 or events[0][0] != handshake_socket:
            # One of the local core processes exited.
            finished = proc_manager.finished_procs() if proc_manager else {}
            if coord_process is not None and coord_process.exitcode is not None:
                finished[coord_process.name] = coord_process.exitcode
            raise RuntimeError("Engine core initialization failed. "
                               "See root cause above. "
                               f"Failed core proc(s): {finished}")

        # Receive HELLO and READY messages from the input socket.
        eng_identity, ready_msg_bytes = handshake_socket.recv_multipart()
        eng_index = int.from_bytes(eng_identity, "little")
        engine = next((e for e in core_engines if e.identity == eng_identity),
                      None)
        if engine is None:
            raise RuntimeError(f"Message from engine with unexpected data "
                               f"parallel rank: {eng_index}")
        msg = msgspec.msgpack.decode(ready_msg_bytes)
        status, local, headless = msg["status"], msg["local"], msg["headless"]
        if local != engine.local:
            raise RuntimeError(f"{status} message from "
                               f"{'local' if local else 'remote'} "
                               f"engine {eng_index}, expected it to be "
                               f"{'local' if engine.local else 'remote'}")

        # Remote engines must be headless iff we aren't in hybrid dp lb mode.
        if not local and headless != remote_should_be_headless:
            if headless:
                raise RuntimeError(f"Remote engine {eng_index} must not use "
                                   f"--headless in external or hybrid dp lb "
                                   f"mode")
            else:
                raise RuntimeError(f"Remote engine {eng_index} must use "
                                   f"--headless unless in external or hybrid "
                                   f"dp lb mode")

        if status == "HELLO" and engine.state == CoreEngineState.NEW:

            # Send init message with DP config info.
            init_message = msgspec.msgpack.encode(
                EngineHandshakeMetadata(
                    addresses=addresses,
                    parallel_config={
                        "data_parallel_master_ip":
                        parallel_config.data_parallel_master_ip,
                        "data_parallel_master_port":
                        parallel_config.data_parallel_master_port,
                        "_data_parallel_master_port_list":
                        parallel_config._data_parallel_master_port_list,
                        "data_parallel_size":
                        parallel_config.data_parallel_size,
                    }))
            handshake_socket.send_multipart((eng_identity, init_message),
                                            copy=False)
            conn_pending[0 if local else 1] -= 1
            start_pending[0 if local else 1] += 1
            engine.state = CoreEngineState.CONNECTED
        elif status == "READY" and engine.state == CoreEngineState.CONNECTED:
            # Setup KV cache config with initialization state from
            # engine core process. Sum values from all engines in DP case.
            num_gpu_blocks = cache_config.num_gpu_blocks or 0
            num_gpu_blocks += msg["num_gpu_blocks"]
            cache_config.num_gpu_blocks = num_gpu_blocks

            # In external DP LB mode, the coordinator address that the
            # front-end procs connect to is obtained from rank 0 via
            # one of the engine handshakes, and passed to the local
            # front-end process in the response from the other.
            if addresses.frontend_stats_publish_address is None:
                addresses.frontend_stats_publish_address = msg.get(
                    "dp_stats_address")

            start_pending[0 if local else 1] -= 1
            engine.state = CoreEngineState.READY
        else:
            raise RuntimeError(f"Unexpected {status} message for "
                               f"{'local' if local else 'remote'} engine "
                               f"{eng_index} in {engine.state} state.")

        logger.debug("%s from %s core engine process %s.", status,
                     "local" if local else "remote", eng_index)