[Misc] Remove lru_cache in NvmlCudaPlatform (#14156)

Signed-off-by: Cody Yu <hao.yu.cody@gmail.com>
This commit is contained in:
Cody Yu
2025-03-03 19:09:34 -08:00
committed by GitHub
parent bb5b640359
commit 989f4f430c

View File

@ -4,7 +4,7 @@ pynvml. However, it should not initialize cuda context.
"""
import os
from functools import lru_cache, wraps
from functools import wraps
from typing import (TYPE_CHECKING, Callable, List, Optional, Tuple, TypeVar,
Union)
@ -284,7 +284,6 @@ class CudaPlatformBase(Platform):
class NvmlCudaPlatform(CudaPlatformBase):
@classmethod
@lru_cache(maxsize=8)
@with_nvml_context
def get_device_capability(cls,
device_id: int = 0
@ -298,7 +297,6 @@ class NvmlCudaPlatform(CudaPlatformBase):
return None
@classmethod
@lru_cache(maxsize=8)
@with_nvml_context
def has_device_capability(
cls,
@ -311,14 +309,12 @@ class NvmlCudaPlatform(CudaPlatformBase):
return False
@classmethod
@lru_cache(maxsize=8)
@with_nvml_context
def get_device_name(cls, device_id: int = 0) -> str:
physical_device_id = device_id_to_physical_device_id(device_id)
return cls._get_physical_device_name(physical_device_id)
@classmethod
@lru_cache(maxsize=8)
@with_nvml_context
def get_device_uuid(cls, device_id: int = 0) -> str:
physical_device_id = device_id_to_physical_device_id(device_id)
@ -326,7 +322,6 @@ class NvmlCudaPlatform(CudaPlatformBase):
return pynvml.nvmlDeviceGetUUID(handle)
@classmethod
@lru_cache(maxsize=8)
@with_nvml_context
def get_device_total_memory(cls, device_id: int = 0) -> int:
physical_device_id = device_id_to_physical_device_id(device_id)