Revert "Add xpu to getAccelerator (#129205)"

This reverts commit 3e2df3ca9d0a593e09bc94c14bbf2b213413cbf3.

Reverted https://github.com/pytorch/pytorch/pull/129205 on behalf of https://github.com/kit1980 due to Need to revert https://github.com/pytorch/pytorch/pull/129463 which breaks Meta builds ([comment](https://github.com/pytorch/pytorch/pull/129205#issuecomment-2207514346))
This commit is contained in:
PyTorch MergeBot
2024-07-03 23:37:24 +00:00
parent a9a744e442
commit 8a9725bedb
3 changed files with 33 additions and 32 deletions

View File

@ -1,37 +1,39 @@
#include <ATen/Context.h>
#include <ATen/DeviceAccelerator.h>
#include <ATen/Context.h>
namespace at {
C10_API std::optional<DeviceType> getAccelerator(bool checked) {
#define DETECT_AND_ASSIGN_ACCELERATOR(device_name) \
if (at::has##device_name()) { \
device_type = k##device_name; \
TORCH_CHECK( \
!is_accelerator_detected, \
"Cannot have ", \
device_type.value(), \
" with other accelerators."); \
is_accelerator_detected = true; \
}
#define CHECK_NO_CUDA \
TORCH_CHECK(!at::hasCUDA(), "Cannot have both CUDA and PrivateUse1");
if (is_privateuse1_backend_registered()) {
// We explicitly allow PrivateUse1 and another device at the same time as we
// use this for testing. Whenever a PrivateUse1 device is registered, use it
// first.
return kPrivateUse1;
}
std::optional<DeviceType> device_type = std::nullopt;
bool is_accelerator_detected = false;
DETECT_AND_ASSIGN_ACCELERATOR(CUDA)
DETECT_AND_ASSIGN_ACCELERATOR(MTIA)
DETECT_AND_ASSIGN_ACCELERATOR(XPU)
if (checked) {
TORCH_CHECK(
device_type, "Cannot access accelerator device when none is available.")
}
return device_type;
#define CHECK_NO_PU1 \
TORCH_CHECK(!is_privateuse1_backend_registered(), "Cannot have both CUDA and PrivateUse1");
#undef DETECT_AND_ASSIGN_ACCELERATOR
#define CHECK_NO_MTIA \
TORCH_CHECK(!at::hasMTIA(), "Cannot have MTIA with other devices");
if (is_privateuse1_backend_registered()) {
// We explicitly allow PrivateUse1 and another device at the same time
// as we use this for testing.
// Whenever a PrivateUse1 device is registered, use it first.
return kPrivateUse1;
} else if (at::hasCUDA()) {
CHECK_NO_PU1
CHECK_NO_MTIA
return kCUDA;
} else if (at::hasMTIA()) {
CHECK_NO_CUDA
CHECK_NO_PU1
return kMTIA;
} else {
TORCH_CHECK(!checked, "Cannot access accelerator device when none is available.")
return std::nullopt;
}
#undef CHECK_NO_CUDA
#undef CHECK_NO_PU1
}
} // namespace at

View File

@ -13,9 +13,9 @@
// - It provides a set of common APIs as defined by AcceleratorHooksInterface
//
// As of today, accelerator devices are (in no particular order):
// CUDA, MTIA, XPU, PrivateUse1
// CUDA, MTIA, PrivateUse1
// We want to add once all the proper APIs are supported and tested:
// HIP, MPS
// HIP, MPS, XPU
namespace at {

View File

@ -15,7 +15,6 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo,
TEST_CUDA,
TEST_PRIVATEUSE1,
TEST_XPU,
)
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
@ -37,7 +36,7 @@ def remove_build_path():
# Since we use a fake MTIA device backend to test generic Stream/Event, device backends are mutual exclusive to each other.
# The test will be skipped if any of the following conditions are met:
@unittest.skipIf(
IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_XPU or TEST_PRIVATEUSE1 or TEST_ROCM,
IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1 or TEST_ROCM,
"Only on linux platform and mutual exclusive to other backends",
)
@torch.testing._internal.common_utils.markDynamoStrictTest