Skip to content

vllm.platforms.rocm

_ROCM_DEVICE_ID_NAME_MAP module-attribute

_ROCM_DEVICE_ID_NAME_MAP: dict[str, str] = {
    "0x74a0": "AMD_Instinct_MI300A",
    "0x74a1": "AMD_Instinct_MI300X",
    "0x74b5": "AMD_Instinct_MI300X",
    "0x74a5": "AMD_Instinct_MI325X",
    "0x74b9": "AMD_Instinct_MI325X",
    "0x74a9": "AMD_Instinct_MI300X_HF",
    "0x74bd": "AMD_Instinct_MI300X_HF",
}

_ROCM_PARTIALLY_SUPPORTED_MODELS module-attribute

_ROCM_PARTIALLY_SUPPORTED_MODELS: dict[str, str] = {
    "Qwen2ForCausalLM": _ROCM_SWA_REASON,
    "MistralForCausalLM": _ROCM_SWA_REASON,
    "MixtralForCausalLM": _ROCM_SWA_REASON,
    "PaliGemmaForConditionalGeneration": "ROCm flash attention does not yet fully support 32-bit precision on PaliGemma",
    "Phi3VForCausalLM": "ROCm Triton flash attention may run into compilation errors due to excessive use of shared memory. If this happens, disable Triton FA by setting `VLLM_USE_TRITON_FLASH_ATTN=0`",
}

_ROCM_SWA_REASON module-attribute

_ROCM_SWA_REASON = "Sliding window attention (SWA) is not yet supported in Triton flash attention. For half-precision SWA support, please use CK flash attention by setting `VLLM_USE_TRITON_FLASH_ATTN=0`"

_ROCM_UNSUPPORTED_MODELS module-attribute

_ROCM_UNSUPPORTED_MODELS: list[str] = []

logger module-attribute

logger = init_logger(__name__)

val module-attribute

val = environ['HIP_VISIBLE_DEVICES']

RocmPlatform

Bases: Platform

Source code in vllm/platforms/rocm.py
class RocmPlatform(Platform):
    _enum = PlatformEnum.ROCM
    device_name: str = "rocm"
    device_type: str = "cuda"
    dispatch_key: str = "CUDA"
    ray_device_key: str = "GPU"
    dist_backend: str = "nccl"
    # rocm shares the same device control env var as CUDA
    device_control_env_var: str = "CUDA_VISIBLE_DEVICES"

    supported_quantization: list[str] = [
        "awq", "gptq", "fp8", "compressed-tensors", "fbgemm_fp8", "gguf",
        "quark", "ptpc_fp8", "mxfp4", "petit_nvfp4"
    ]

    @classmethod
    def get_vit_attn_backend(cls, support_fa: bool = False) -> _Backend:
        if support_fa:
            if (envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA
                    and on_gfx9()):
                # Note: AITER FA is only supported for Qwen-VL models.
                # TODO: Add support for other VL models in their model class.
                return _Backend.ROCM_AITER_FA
            if on_gfx9():
                return _Backend.FLASH_ATTN
        return _Backend.TORCH_SDPA

    @classmethod
    def get_attn_backend_cls(cls, selected_backend, head_size, dtype,
                             kv_cache_dtype, block_size, use_v1, use_mla,
                             has_sink) -> str:
        if use_mla:
            from vllm.attention.backends.rocm_aiter_mla import (
                is_aiter_mla_enabled)

            if selected_backend is None:
                selected_backend = (_Backend.ROCM_AITER_MLA if
                                    is_aiter_mla_enabled() or block_size == 1
                                    else _Backend.TRITON_MLA)

            if selected_backend == _Backend.TRITON_MLA:
                if block_size != 1:
                    if use_v1:
                        logger.info_once(
                            "Using Triton MLA backend on V1 engine.")
                        return ("vllm.v1.attention.backends.mla."
                                "triton_mla.TritonMLABackend")
                    else:
                        logger.info("Using Triton MLA backend.")
                        return "vllm.attention.backends.triton_mla.TritonMLABackend"  # noqa: E501
                else:
                    raise ValueError(
                        f" The selected backend, {selected_backend.name},"
                        f"does not support block size {block_size}.")
            elif selected_backend == _Backend.ROCM_AITER_MLA \
                or selected_backend == _Backend.ROCM_AITER_MLA_VLLM_V1:
                if block_size == 1:
                    if use_v1:
                        logger.info("Using AITER MLA backend on V1 engine.")
                        return "vllm.v1.attention.backends.mla.rocm_aiter_mla.AiterMLABackend"  # noqa: E501
                    else:
                        logger.info("Using AITER MLA backend")
                        return "vllm.attention.backends.rocm_aiter_mla.AiterMLABackend"  # noqa: E501
                else:
                    raise ValueError(
                        f" The selected backend, {selected_backend.name},"
                        f"does not support block size {block_size}."
                        "(currently only supports block size 1)")
            else:
                raise ValueError(
                    f" The selected backend, {selected_backend.name},"
                    f"is not MLA type while requested for MLA backend.")

        if selected_backend is None or selected_backend == _Backend.FLASH_ATTN:
            selected_backend = _Backend.ROCM_FLASH

        if envs.VLLM_USE_V1:
            if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA \
                and on_gfx9():
                logger.info("Using Flash Attention backend on V1 engine.")
                return ("vllm.v1.attention.backends."
                        "rocm_aiter_fa.AiterFlashAttentionBackend")
            else:
                logger.info("Using Triton Attention backend on V1 engine.")
                return ("vllm.v1.attention.backends."
                        "triton_attn.TritonAttentionBackend")
        if selected_backend == _Backend.ROCM_FLASH:
            if not cls.has_device_capability(90):
                # not Instinct series GPUs.
                logger.info("flash_attn is not supported on NAVI GPUs.")
        else:
            logger.info("%s is not supported in AMD GPUs.", selected_backend)
        logger.info("Using ROCmFlashAttention backend.")
        return "vllm.attention.backends.rocm_flash_attn.ROCmFlashAttentionBackend"  # noqa: E501

    @classmethod
    def set_device(cls, device: torch.device) -> None:
        """
        Set the device for the current platform.
        """
        torch.cuda.set_device(device)

    @classmethod
    @lru_cache(maxsize=8)
    def get_device_capability(cls,
                              device_id: int = 0
                              ) -> Optional[DeviceCapability]:
        major, minor = torch.cuda.get_device_capability(device_id)
        return DeviceCapability(major=major, minor=minor)

    @classmethod
    @with_amdsmi_context
    def is_fully_connected(cls, physical_device_ids: list[int]) -> bool:
        """
        Query if the set of gpus are fully connected by xgmi (1 hop)
        """
        handles = [
            amdsmi_get_processor_handles()[i] for i in physical_device_ids
        ]
        for i, handle in enumerate(handles):
            for j, peer_handle in enumerate(handles):
                if i < j:
                    try:
                        link_type = amdsmi_topo_get_link_type(
                            handle, peer_handle)
                        # type is 2 for XGMI
                        if link_type["hops"] != 1 or link_type["type"] != 2:
                            return False
                    except AmdSmiException as error:
                        logger.error("AMD 1 hop XGMI detection failed.",
                                     exc_info=error)
                        return False
        return True

    @classmethod
    @with_amdsmi_context
    @lru_cache(maxsize=8)
    def get_device_name(cls, device_id: int = 0) -> str:
        physical_device_id = cls.device_id_to_physical_device_id(device_id)
        handle = amdsmi_get_processor_handles()[physical_device_id]
        asic_info = amdsmi_get_gpu_asic_info(handle)
        device_name: str = asic_info["device_id"]
        if device_name in _ROCM_DEVICE_ID_NAME_MAP:
            return _ROCM_DEVICE_ID_NAME_MAP[device_name]
        return asic_info["market_name"]

    @classmethod
    def get_device_total_memory(cls, device_id: int = 0) -> int:
        device_props = torch.cuda.get_device_properties(device_id)
        return device_props.total_memory

    @classmethod
    def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool:
        if enforce_eager and not envs.VLLM_USE_V1:
            logger.warning(
                "To see benefits of async output processing, enable CUDA "
                "graph. Since, enforce-eager is enabled, async output "
                "processor cannot be used")
            return False
        return True

    @classmethod
    def check_and_update_config(cls, vllm_config: "VllmConfig") -> None:
        cache_config = vllm_config.cache_config
        if cache_config and cache_config.block_size is None:
            cache_config.block_size = 16

        parallel_config = vllm_config.parallel_config
        if parallel_config.worker_cls == "auto":
            if vllm_config.speculative_config:
                if not envs.VLLM_USE_V1:
                    raise NotImplementedError(
                        "Speculative decoding is not supported on vLLM V0.")
                parallel_config.worker_cls = "vllm.v1.worker.gpu_worker.Worker"
            else:
                if envs.VLLM_USE_V1:
                    parallel_config.worker_cls = \
                        "vllm.v1.worker.gpu_worker.Worker"
                else:
                    parallel_config.worker_cls = "vllm.worker.worker.Worker"

    @classmethod
    def verify_model_arch(cls, model_arch: str) -> None:
        if model_arch in _ROCM_UNSUPPORTED_MODELS:
            raise ValueError(f"Model architecture '{model_arch}' is not "
                             "supported by ROCm for now.")

        if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
            msg = _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]
            logger.warning(
                "Model architecture '%s' is partially "
                "supported by ROCm: %s", model_arch, msg)

    @classmethod
    def verify_quantization(cls, quant: str) -> None:
        super().verify_quantization(quant)
        if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ:
            logger.warning(
                "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ"
                " is not set, enabling VLLM_USE_TRITON_AWQ.")
        envs.VLLM_USE_TRITON_AWQ = True

    @classmethod
    def get_punica_wrapper(cls) -> str:
        return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU"

    @classmethod
    def get_current_memory_usage(cls,
                                 device: Optional[torch.types.Device] = None
                                 ) -> float:
        torch.cuda.reset_peak_memory_stats(device)
        return torch.cuda.mem_get_info(device)[1] - torch.cuda.mem_get_info(
            device)[0]

    @classmethod
    def get_device_communicator_cls(cls) -> str:
        return "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator"  # noqa

    @classmethod
    def supports_mx(cls) -> bool:
        gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
        return any(gfx in gcn_arch for gfx in ["gfx95"])

    @classmethod
    def supports_fp8(cls) -> bool:
        gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
        return any(gfx in gcn_arch for gfx in ['gfx94', 'gfx95', 'gfx12'])

    @classmethod
    def is_fp8_fnuz(cls) -> bool:
        # only device 0 is checked, this assumes MI300 platforms are homogeneous
        return 'gfx94' in torch.cuda.get_device_properties(0).gcnArchName

    @classmethod
    def fp8_dtype(cls) -> torch.dtype:
        if cls.is_fp8_fnuz():
            return torch.float8_e4m3fnuz
        else:
            return torch.float8_e4m3fn

    @classmethod
    def supports_v1(cls, model_config: "ModelConfig") -> bool:
        # V1 support on AMD gpus is experimental
        return True

    @classmethod
    def use_custom_allreduce(cls) -> bool:
        # We only enable custom allreduce for MI300 series
        gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
        supported_archs = ['gfx94', 'gfx95']
        return any(gfx in gcn_arch for gfx in supported_archs)

    @classmethod
    def get_cu_count(cls, device_id: int = 0) -> int:
        return torch.cuda.get_device_properties(
            device_id).multi_processor_count

    @classmethod
    def is_navi(cls) -> bool:
        return 'gfx1' in torch.cuda.get_device_properties(0).gcnArchName

    @classmethod
    def get_static_graph_wrapper_cls(cls) -> str:
        return "vllm.compilation.cuda_graph.CUDAGraphWrapper"

    @classmethod
    def stateless_init_device_torch_dist_pg(
        cls,
        backend: str,
        prefix_store: PrefixStore,
        group_rank: int,
        group_size: int,
        timeout: timedelta,
    ) -> ProcessGroup:
        assert is_nccl_available()
        pg: ProcessGroup = ProcessGroup(
            prefix_store,
            group_rank,
            group_size,
        )
        from torch.distributed.distributed_c10d import ProcessGroupNCCL

        backend_options = ProcessGroupNCCL.Options()
        backend_options._timeout = timeout

        backend_class = ProcessGroupNCCL(prefix_store, group_rank, group_size,
                                         backend_options)
        backend_type = ProcessGroup.BackendType.NCCL
        device = torch.device("cuda")
        pg._set_default_backend(backend_type)
        backend_class._set_sequence_number_for_group()

        pg._register_backend(device, backend_type, backend_class)
        return pg

    @classmethod
    def device_count(cls) -> int:
        return cuda_device_count_stateless()

    @classmethod
    def is_kv_cache_dtype_supported(cls, kv_cache_dtype: str,
                                    model_config: "ModelConfig") -> bool:
        return True

    @classmethod
    def check_if_supports_dtype(cls, torch_dtype: torch.dtype):
        if torch_dtype == torch.bfloat16:  # noqa: SIM102
            if not cls.has_device_capability(80):
                capability = cls.get_device_capability()
                gpu_name = cls.get_device_name()

                if capability is None:
                    compute_str = "does not have a compute capability"
                else:
                    version_str = capability.as_version_str()
                    compute_str = f"has compute capability {version_str}"

                raise ValueError(
                    "Bfloat16 is only supported on GPUs "
                    "with compute capability of at least 8.0. "
                    f"Your {gpu_name} GPU {compute_str}. "
                    "You can use float16 instead by explicitly setting the "
                    "`dtype` flag in CLI, for example: --dtype=half.")

_enum class-attribute instance-attribute

_enum = ROCM

device_control_env_var class-attribute instance-attribute

device_control_env_var: str = 'CUDA_VISIBLE_DEVICES'

device_name class-attribute instance-attribute

device_name: str = 'rocm'

device_type class-attribute instance-attribute

device_type: str = 'cuda'

dispatch_key class-attribute instance-attribute

dispatch_key: str = 'CUDA'

dist_backend class-attribute instance-attribute

dist_backend: str = 'nccl'

ray_device_key class-attribute instance-attribute

ray_device_key: str = 'GPU'

supported_quantization class-attribute instance-attribute

supported_quantization: list[str] = [
    "awq",
    "gptq",
    "fp8",
    "compressed-tensors",
    "fbgemm_fp8",
    "gguf",
    "quark",
    "ptpc_fp8",
    "mxfp4",
    "petit_nvfp4",
]

check_and_update_config classmethod

check_and_update_config(vllm_config: VllmConfig) -> None
Source code in vllm/platforms/rocm.py
@classmethod
def check_and_update_config(cls, vllm_config: "VllmConfig") -> None:
    cache_config = vllm_config.cache_config
    if cache_config and cache_config.block_size is None:
        cache_config.block_size = 16

    parallel_config = vllm_config.parallel_config
    if parallel_config.worker_cls == "auto":
        if vllm_config.speculative_config:
            if not envs.VLLM_USE_V1:
                raise NotImplementedError(
                    "Speculative decoding is not supported on vLLM V0.")
            parallel_config.worker_cls = "vllm.v1.worker.gpu_worker.Worker"
        else:
            if envs.VLLM_USE_V1:
                parallel_config.worker_cls = \
                    "vllm.v1.worker.gpu_worker.Worker"
            else:
                parallel_config.worker_cls = "vllm.worker.worker.Worker"

check_if_supports_dtype classmethod

check_if_supports_dtype(torch_dtype: dtype)
Source code in vllm/platforms/rocm.py
@classmethod
def check_if_supports_dtype(cls, torch_dtype: torch.dtype):
    if torch_dtype == torch.bfloat16:  # noqa: SIM102
        if not cls.has_device_capability(80):
            capability = cls.get_device_capability()
            gpu_name = cls.get_device_name()

            if capability is None:
                compute_str = "does not have a compute capability"
            else:
                version_str = capability.as_version_str()
                compute_str = f"has compute capability {version_str}"

            raise ValueError(
                "Bfloat16 is only supported on GPUs "
                "with compute capability of at least 8.0. "
                f"Your {gpu_name} GPU {compute_str}. "
                "You can use float16 instead by explicitly setting the "
                "`dtype` flag in CLI, for example: --dtype=half.")

device_count classmethod

device_count() -> int
Source code in vllm/platforms/rocm.py
@classmethod
def device_count(cls) -> int:
    return cuda_device_count_stateless()

fp8_dtype classmethod

fp8_dtype() -> dtype
Source code in vllm/platforms/rocm.py
@classmethod
def fp8_dtype(cls) -> torch.dtype:
    if cls.is_fp8_fnuz():
        return torch.float8_e4m3fnuz
    else:
        return torch.float8_e4m3fn

get_attn_backend_cls classmethod

get_attn_backend_cls(
    selected_backend,
    head_size,
    dtype,
    kv_cache_dtype,
    block_size,
    use_v1,
    use_mla,
    has_sink,
) -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_attn_backend_cls(cls, selected_backend, head_size, dtype,
                         kv_cache_dtype, block_size, use_v1, use_mla,
                         has_sink) -> str:
    if use_mla:
        from vllm.attention.backends.rocm_aiter_mla import (
            is_aiter_mla_enabled)

        if selected_backend is None:
            selected_backend = (_Backend.ROCM_AITER_MLA if
                                is_aiter_mla_enabled() or block_size == 1
                                else _Backend.TRITON_MLA)

        if selected_backend == _Backend.TRITON_MLA:
            if block_size != 1:
                if use_v1:
                    logger.info_once(
                        "Using Triton MLA backend on V1 engine.")
                    return ("vllm.v1.attention.backends.mla."
                            "triton_mla.TritonMLABackend")
                else:
                    logger.info("Using Triton MLA backend.")
                    return "vllm.attention.backends.triton_mla.TritonMLABackend"  # noqa: E501
            else:
                raise ValueError(
                    f" The selected backend, {selected_backend.name},"
                    f"does not support block size {block_size}.")
        elif selected_backend == _Backend.ROCM_AITER_MLA \
            or selected_backend == _Backend.ROCM_AITER_MLA_VLLM_V1:
            if block_size == 1:
                if use_v1:
                    logger.info("Using AITER MLA backend on V1 engine.")
                    return "vllm.v1.attention.backends.mla.rocm_aiter_mla.AiterMLABackend"  # noqa: E501
                else:
                    logger.info("Using AITER MLA backend")
                    return "vllm.attention.backends.rocm_aiter_mla.AiterMLABackend"  # noqa: E501
            else:
                raise ValueError(
                    f" The selected backend, {selected_backend.name},"
                    f"does not support block size {block_size}."
                    "(currently only supports block size 1)")
        else:
            raise ValueError(
                f" The selected backend, {selected_backend.name},"
                f"is not MLA type while requested for MLA backend.")

    if selected_backend is None or selected_backend == _Backend.FLASH_ATTN:
        selected_backend = _Backend.ROCM_FLASH

    if envs.VLLM_USE_V1:
        if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA \
            and on_gfx9():
            logger.info("Using Flash Attention backend on V1 engine.")
            return ("vllm.v1.attention.backends."
                    "rocm_aiter_fa.AiterFlashAttentionBackend")
        else:
            logger.info("Using Triton Attention backend on V1 engine.")
            return ("vllm.v1.attention.backends."
                    "triton_attn.TritonAttentionBackend")
    if selected_backend == _Backend.ROCM_FLASH:
        if not cls.has_device_capability(90):
            # not Instinct series GPUs.
            logger.info("flash_attn is not supported on NAVI GPUs.")
    else:
        logger.info("%s is not supported in AMD GPUs.", selected_backend)
    logger.info("Using ROCmFlashAttention backend.")
    return "vllm.attention.backends.rocm_flash_attn.ROCmFlashAttentionBackend"  # noqa: E501

get_cu_count classmethod

get_cu_count(device_id: int = 0) -> int
Source code in vllm/platforms/rocm.py
@classmethod
def get_cu_count(cls, device_id: int = 0) -> int:
    return torch.cuda.get_device_properties(
        device_id).multi_processor_count

get_current_memory_usage classmethod

get_current_memory_usage(
    device: Optional[Device] = None,
) -> float
Source code in vllm/platforms/rocm.py
@classmethod
def get_current_memory_usage(cls,
                             device: Optional[torch.types.Device] = None
                             ) -> float:
    torch.cuda.reset_peak_memory_stats(device)
    return torch.cuda.mem_get_info(device)[1] - torch.cuda.mem_get_info(
        device)[0]

get_device_capability cached classmethod

get_device_capability(
    device_id: int = 0,
) -> Optional[DeviceCapability]
Source code in vllm/platforms/rocm.py
@classmethod
@lru_cache(maxsize=8)
def get_device_capability(cls,
                          device_id: int = 0
                          ) -> Optional[DeviceCapability]:
    major, minor = torch.cuda.get_device_capability(device_id)
    return DeviceCapability(major=major, minor=minor)

get_device_communicator_cls classmethod

get_device_communicator_cls() -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_device_communicator_cls(cls) -> str:
    return "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator"  # noqa

get_device_name cached classmethod

get_device_name(device_id: int = 0) -> str
Source code in vllm/platforms/rocm.py
@classmethod
@with_amdsmi_context
@lru_cache(maxsize=8)
def get_device_name(cls, device_id: int = 0) -> str:
    physical_device_id = cls.device_id_to_physical_device_id(device_id)
    handle = amdsmi_get_processor_handles()[physical_device_id]
    asic_info = amdsmi_get_gpu_asic_info(handle)
    device_name: str = asic_info["device_id"]
    if device_name in _ROCM_DEVICE_ID_NAME_MAP:
        return _ROCM_DEVICE_ID_NAME_MAP[device_name]
    return asic_info["market_name"]

get_device_total_memory classmethod

get_device_total_memory(device_id: int = 0) -> int
Source code in vllm/platforms/rocm.py
@classmethod
def get_device_total_memory(cls, device_id: int = 0) -> int:
    device_props = torch.cuda.get_device_properties(device_id)
    return device_props.total_memory

get_punica_wrapper classmethod

get_punica_wrapper() -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_punica_wrapper(cls) -> str:
    return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU"

get_static_graph_wrapper_cls classmethod

get_static_graph_wrapper_cls() -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_static_graph_wrapper_cls(cls) -> str:
    return "vllm.compilation.cuda_graph.CUDAGraphWrapper"

get_vit_attn_backend classmethod

get_vit_attn_backend(support_fa: bool = False) -> _Backend
Source code in vllm/platforms/rocm.py
@classmethod
def get_vit_attn_backend(cls, support_fa: bool = False) -> _Backend:
    if support_fa:
        if (envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA
                and on_gfx9()):
            # Note: AITER FA is only supported for Qwen-VL models.
            # TODO: Add support for other VL models in their model class.
            return _Backend.ROCM_AITER_FA
        if on_gfx9():
            return _Backend.FLASH_ATTN
    return _Backend.TORCH_SDPA

is_async_output_supported classmethod

is_async_output_supported(
    enforce_eager: Optional[bool],
) -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool:
    if enforce_eager and not envs.VLLM_USE_V1:
        logger.warning(
            "To see benefits of async output processing, enable CUDA "
            "graph. Since, enforce-eager is enabled, async output "
            "processor cannot be used")
        return False
    return True

is_fp8_fnuz classmethod

is_fp8_fnuz() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def is_fp8_fnuz(cls) -> bool:
    # only device 0 is checked, this assumes MI300 platforms are homogeneous
    return 'gfx94' in torch.cuda.get_device_properties(0).gcnArchName

is_fully_connected classmethod

is_fully_connected(physical_device_ids: list[int]) -> bool

Query if the set of gpus are fully connected by xgmi (1 hop)

Source code in vllm/platforms/rocm.py
@classmethod
@with_amdsmi_context
def is_fully_connected(cls, physical_device_ids: list[int]) -> bool:
    """
    Query if the set of gpus are fully connected by xgmi (1 hop)
    """
    handles = [
        amdsmi_get_processor_handles()[i] for i in physical_device_ids
    ]
    for i, handle in enumerate(handles):
        for j, peer_handle in enumerate(handles):
            if i < j:
                try:
                    link_type = amdsmi_topo_get_link_type(
                        handle, peer_handle)
                    # type is 2 for XGMI
                    if link_type["hops"] != 1 or link_type["type"] != 2:
                        return False
                except AmdSmiException as error:
                    logger.error("AMD 1 hop XGMI detection failed.",
                                 exc_info=error)
                    return False
    return True

is_kv_cache_dtype_supported classmethod

is_kv_cache_dtype_supported(
    kv_cache_dtype: str, model_config: ModelConfig
) -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def is_kv_cache_dtype_supported(cls, kv_cache_dtype: str,
                                model_config: "ModelConfig") -> bool:
    return True

is_navi classmethod

is_navi() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def is_navi(cls) -> bool:
    return 'gfx1' in torch.cuda.get_device_properties(0).gcnArchName

set_device classmethod

set_device(device: device) -> None

Set the device for the current platform.

Source code in vllm/platforms/rocm.py
@classmethod
def set_device(cls, device: torch.device) -> None:
    """
    Set the device for the current platform.
    """
    torch.cuda.set_device(device)

stateless_init_device_torch_dist_pg classmethod

stateless_init_device_torch_dist_pg(
    backend: str,
    prefix_store: PrefixStore,
    group_rank: int,
    group_size: int,
    timeout: timedelta,
) -> ProcessGroup
Source code in vllm/platforms/rocm.py
@classmethod
def stateless_init_device_torch_dist_pg(
    cls,
    backend: str,
    prefix_store: PrefixStore,
    group_rank: int,
    group_size: int,
    timeout: timedelta,
) -> ProcessGroup:
    assert is_nccl_available()
    pg: ProcessGroup = ProcessGroup(
        prefix_store,
        group_rank,
        group_size,
    )
    from torch.distributed.distributed_c10d import ProcessGroupNCCL

    backend_options = ProcessGroupNCCL.Options()
    backend_options._timeout = timeout

    backend_class = ProcessGroupNCCL(prefix_store, group_rank, group_size,
                                     backend_options)
    backend_type = ProcessGroup.BackendType.NCCL
    device = torch.device("cuda")
    pg._set_default_backend(backend_type)
    backend_class._set_sequence_number_for_group()

    pg._register_backend(device, backend_type, backend_class)
    return pg

supports_fp8 classmethod

supports_fp8() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def supports_fp8(cls) -> bool:
    gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
    return any(gfx in gcn_arch for gfx in ['gfx94', 'gfx95', 'gfx12'])

supports_mx classmethod

supports_mx() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def supports_mx(cls) -> bool:
    gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
    return any(gfx in gcn_arch for gfx in ["gfx95"])

supports_v1 classmethod

supports_v1(model_config: ModelConfig) -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def supports_v1(cls, model_config: "ModelConfig") -> bool:
    # V1 support on AMD gpus is experimental
    return True

use_custom_allreduce classmethod

use_custom_allreduce() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def use_custom_allreduce(cls) -> bool:
    # We only enable custom allreduce for MI300 series
    gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
    supported_archs = ['gfx94', 'gfx95']
    return any(gfx in gcn_arch for gfx in supported_archs)

verify_model_arch classmethod

verify_model_arch(model_arch: str) -> None
Source code in vllm/platforms/rocm.py
@classmethod
def verify_model_arch(cls, model_arch: str) -> None:
    if model_arch in _ROCM_UNSUPPORTED_MODELS:
        raise ValueError(f"Model architecture '{model_arch}' is not "
                         "supported by ROCm for now.")

    if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
        msg = _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]
        logger.warning(
            "Model architecture '%s' is partially "
            "supported by ROCm: %s", model_arch, msg)

verify_quantization classmethod

verify_quantization(quant: str) -> None
Source code in vllm/platforms/rocm.py
@classmethod
def verify_quantization(cls, quant: str) -> None:
    super().verify_quantization(quant)
    if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ:
        logger.warning(
            "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ"
            " is not set, enabling VLLM_USE_TRITON_AWQ.")
    envs.VLLM_USE_TRITON_AWQ = True

on_gfx1x cached

on_gfx1x() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_gfx1x() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx11", "gfx12"])

on_gfx9 cached

on_gfx9() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_gfx9() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942", "gfx950"])

on_mi3xx cached

on_mi3xx() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_mi3xx() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx942", "gfx950"])

use_rocm_custom_paged_attention cached

use_rocm_custom_paged_attention(
    qtype: dtype,
    head_size: int,
    block_size: int,
    gqa_ratio: int,
    max_seq_len: int,
    sliding_window: int,
    kv_cache_dtype: str,
    alibi_slopes: Optional[Tensor] = None,
    sinks: Optional[Tensor] = None,
) -> bool
Source code in vllm/platforms/rocm.py
@cache
def use_rocm_custom_paged_attention(
        qtype: torch.dtype,
        head_size: int,
        block_size: int,
        gqa_ratio: int,
        max_seq_len: int,
        sliding_window: int,
        kv_cache_dtype: str,
        alibi_slopes: Optional[torch.Tensor] = None,
        sinks: Optional[torch.Tensor] = None) -> bool:

    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    ON_GFX9 = any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942", "gfx950"])
    ON_GFX11_GFX12 = any(arch in GPU_ARCH for arch in ["gfx11", "gfx12"])

    # custom paged attn always supported on V0. On V1, requires sliding window
    # disabled due to observed numerical discrepancy.
    if ON_GFX9:
        return ((not envs.VLLM_USE_V1 or sliding_window == 0
                 or sliding_window == (-1, -1))
                and (qtype == torch.half or qtype == torch.bfloat16)
                and (head_size == 64 or head_size == 128)
                and (block_size == 16 or block_size == 32)
                and (gqa_ratio >= 1 and gqa_ratio <= 16)
                and max_seq_len <= 128 * 1024
                and (envs.VLLM_ROCM_CUSTOM_PAGED_ATTN)
                and not (envs.VLLM_ROCM_USE_AITER_PAGED_ATTN
                         and envs.VLLM_ROCM_USE_AITER) and sinks is None)

    else:
        return (ON_GFX11_GFX12 and (not envs.VLLM_USE_V1 or sliding_window == 0
                                    or sliding_window == (-1, -1))
                and (qtype == torch.half or qtype == torch.bfloat16)
                and head_size == 128 and block_size == 16
                and (gqa_ratio >= 3 and gqa_ratio <= 16)
                and max_seq_len <= 128 * 1024 and alibi_slopes is None
                and kv_cache_dtype == "auto"
                and envs.VLLM_ROCM_CUSTOM_PAGED_ATTN and sinks is None)

with_amdsmi_context

with_amdsmi_context(fn)
Source code in vllm/platforms/rocm.py
def with_amdsmi_context(fn):

    @wraps(fn)
    def wrapper(*args, **kwargs):
        amdsmi_init()
        try:
            return fn(*args, **kwargs)
        finally:
            amdsmi_shut_down()

    return wrapper