Skip to content

vllm.v1.core.sched.async_scheduler

logger module-attribute

logger = init_logger(__name__)

AsyncScheduler

Bases: Scheduler

Source code in vllm/v1/core/sched/async_scheduler.py
class AsyncScheduler(Scheduler):

    def _update_after_schedule(
        self,
        scheduler_output: SchedulerOutput,
    ) -> None:
        super()._update_after_schedule(scheduler_output)
        for req_id in scheduler_output.num_scheduled_tokens:
            request = self.requests[req_id]
            if (request.num_computed_tokens == request.num_tokens +
                    request.num_output_placeholders):
                # The request will generate a new token in this scheduling step.
                # TODO(woosuk): Support speculative decoding.
                request.num_output_placeholders += 1

    def _update_request_with_output(
        self,
        request: Request,
        new_token_ids: list[int],
    ) -> tuple[list[int], bool]:
        status_before_update = request.status
        new_token_ids, stopped = super()._update_request_with_output(
            request, new_token_ids)

        # Update the number of output placeholders.
        request.num_output_placeholders -= len(new_token_ids)
        assert request.num_output_placeholders >= 0

        # Cache the new tokens. Preempted requests should be skipped.
        if status_before_update == RequestStatus.RUNNING:
            self.kv_cache_manager.cache_blocks(
                request,
                request.num_computed_tokens - request.num_output_placeholders)
        return new_token_ids, stopped

_update_after_schedule

_update_after_schedule(
    scheduler_output: SchedulerOutput,
) -> None
Source code in vllm/v1/core/sched/async_scheduler.py
def _update_after_schedule(
    self,
    scheduler_output: SchedulerOutput,
) -> None:
    super()._update_after_schedule(scheduler_output)
    for req_id in scheduler_output.num_scheduled_tokens:
        request = self.requests[req_id]
        if (request.num_computed_tokens == request.num_tokens +
                request.num_output_placeholders):
            # The request will generate a new token in this scheduling step.
            # TODO(woosuk): Support speculative decoding.
            request.num_output_placeholders += 1

_update_request_with_output

_update_request_with_output(
    request: Request, new_token_ids: list[int]
) -> tuple[list[int], bool]
Source code in vllm/v1/core/sched/async_scheduler.py
def _update_request_with_output(
    self,
    request: Request,
    new_token_ids: list[int],
) -> tuple[list[int], bool]:
    status_before_update = request.status
    new_token_ids, stopped = super()._update_request_with_output(
        request, new_token_ids)

    # Update the number of output placeholders.
    request.num_output_placeholders -= len(new_token_ids)
    assert request.num_output_placeholders >= 0

    # Cache the new tokens. Preempted requests should be skipped.
    if status_before_update == RequestStatus.RUNNING:
        self.kv_cache_manager.cache_blocks(
            request,
            request.num_computed_tokens - request.num_output_placeholders)
    return new_token_ids, stopped