mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix SEMI_STRUCTURED_SUPPORTED_BACKENDS selection on CUDA and ROCm (#163223)
It should work with the current CUDA/ROCm device_capability enumeration anyway. But it will help to avoid unexpected triggering in the future Pull Request resolved: https://github.com/pytorch/pytorch/pull/163223 Approved by: https://github.com/jeffdaily
This commit is contained in:
committed by
PyTorch MergeBot
parent
708dc6e3cd
commit
6f9b4ccf8f
@ -50,8 +50,8 @@ _IS_SM9X = False
|
||||
_IS_HIPSPARSELT_AVAILABLE = False
|
||||
|
||||
if torch.cuda.is_available():
|
||||
_IS_SM8X = torch.cuda.get_device_capability(0)[0] == 8
|
||||
_IS_SM9X = torch.cuda.get_device_capability(0)[0] == 9
|
||||
_IS_SM8X = torch.version.cuda is not None and (torch.cuda.get_device_capability(0)[0] == 8)
|
||||
_IS_SM9X = torch.version.cuda is not None and (torch.cuda.get_device_capability(0)[0] == 9)
|
||||
_IS_HIPSPARSELT_AVAILABLE = torch.version.hip is not None and tuple(int(v) for v in torch.version.hip.split('.')[:2]) > (6, 4)
|
||||
# CUTLASS kernels only work for Ampere
|
||||
if _IS_SM8X:
|
||||
|
Reference in New Issue
Block a user