mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: FBGEMM uses `self.iter.is_cuda` to check if the tensor is for CUDA. This diff enables similar feature `self.iter.is_mtia` for tensors with MTIA device key. Test Plan: See diff D48693225 Reviewed By: jackm321 Differential Revision: D48809191 Pull Request resolved: https://github.com/pytorch/pytorch/pull/108310 Approved by: https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
d569e506ab
commit
8289ad8e5e
@ -1035,6 +1035,7 @@ def gen_pyi(
|
||||
"is_quantized": ["is_quantized: _bool"],
|
||||
"is_meta": ["is_meta: _bool"],
|
||||
"is_mps": ["is_mps: _bool"],
|
||||
"is_mtia": ["is_mtia: _bool"],
|
||||
"is_ort": ["is_ort: _bool"],
|
||||
"is_mkldnn": ["is_mkldnn: _bool"],
|
||||
"is_vulkan": ["is_vulkan: _bool"],
|
||||
|
@ -160,6 +160,7 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused) {
|
||||
.value("Metal", c10::DeviceType::Metal)
|
||||
.value("XPU", c10::DeviceType::XPU)
|
||||
.value("MPS", c10::DeviceType::MPS)
|
||||
.value("MTIA", c10::DeviceType::MTIA)
|
||||
.value("Meta", c10::DeviceType::Meta)
|
||||
.value("HPU", c10::DeviceType::HPU)
|
||||
.value("VE", c10::DeviceType::VE)
|
||||
|
@ -1253,6 +1253,16 @@ PyObject* THPVariable_is_cuda(THPVariable* self, void* unused) {
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
PyObject* THPVariable_is_mtia(THPVariable* self, void* unused) {
|
||||
HANDLE_TH_ERRORS
|
||||
if (check_has_torch_function((PyObject*)self)) {
|
||||
return handle_torch_function_getter(self, "is_mtia");
|
||||
}
|
||||
auto& self_ = THPVariable_Unpack(self);
|
||||
return torch::autograd::utils::wrap(self_.is_mtia());
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
PyObject* THPVariable_is_xla(THPVariable* self, void* unused) {
|
||||
HANDLE_TH_ERRORS
|
||||
if (check_has_torch_function((PyObject*)self)) {
|
||||
@ -1533,6 +1543,7 @@ static struct PyGetSetDef THPVariable_properties[] = {
|
||||
{"name", (getter)THPVariable_get_name, nullptr, nullptr, nullptr},
|
||||
{"shape", (getter)THPVariable_get_shape, nullptr, nullptr, nullptr},
|
||||
{"is_cuda", (getter)THPVariable_is_cuda, nullptr, nullptr, nullptr},
|
||||
{"is_mtia", (getter)THPVariable_is_mtia, nullptr, nullptr, nullptr},
|
||||
{"is_cpu", (getter)THPVariable_is_cpu, nullptr, nullptr, nullptr},
|
||||
{"is_xla", (getter)THPVariable_is_xla, nullptr, nullptr, nullptr},
|
||||
{"is_xpu", (getter)THPVariable_is_xpu, nullptr, nullptr, nullptr},
|
||||
|
@ -120,6 +120,7 @@ std::shared_ptr<SugaredValue> SimpleValue::attr(
|
||||
{"is_sparse_csr", "prim"},
|
||||
{"is_mkldnn", "prim"},
|
||||
{"is_mps", "prim"},
|
||||
{"is_mtia", "prim"},
|
||||
{"is_quantized", "prim"},
|
||||
{"is_vulkan", "prim"},
|
||||
{"is_ipu", "prim"},
|
||||
|
@ -1190,6 +1190,14 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs{
|
||||
push(stack, a.is_xla());
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
OperatorGeneratorArgs(
|
||||
TORCH_SELECTIVE_SCHEMA("prim::is_mtia(Tensor a) -> bool"),
|
||||
[](Stack& stack) {
|
||||
at::Tensor a;
|
||||
pop(stack, a);
|
||||
push(stack, a.is_mtia());
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
OperatorGeneratorArgs(
|
||||
TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
|
||||
[](Stack& stack) {
|
||||
|
@ -1228,6 +1228,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
|
||||
Tensor.retains_grad.__get__: lambda self: -1,
|
||||
Tensor.is_meta.__get__: lambda self: -1,
|
||||
Tensor.is_mps.__get__: lambda self: -1,
|
||||
Tensor.is_mtia.__get__: lambda self: -1,
|
||||
Tensor.is_nested.__get__: lambda self: -1,
|
||||
Tensor.is_ort.__get__: lambda self: -1,
|
||||
Tensor.is_mkldnn.__get__: lambda self: -1,
|
||||
|
Reference in New Issue
Block a user