mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: FBGEMM uses `self.iter.is_cuda` to check if the tensor is for CUDA. This diff enables similar feature `self.iter.is_mtia` for tensors with MTIA device key. Test Plan: See diff D48693225 Reviewed By: jackm321 Differential Revision: D48809191 Pull Request resolved: https://github.com/pytorch/pytorch/pull/108310 Approved by: https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
d569e506ab
commit
8289ad8e5e
@ -1190,6 +1190,14 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs{
|
||||
push(stack, a.is_xla());
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
OperatorGeneratorArgs(
|
||||
TORCH_SELECTIVE_SCHEMA("prim::is_mtia(Tensor a) -> bool"),
|
||||
[](Stack& stack) {
|
||||
at::Tensor a;
|
||||
pop(stack, a);
|
||||
push(stack, a.is_mtia());
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
OperatorGeneratorArgs(
|
||||
TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
|
||||
[](Stack& stack) {
|
||||
|
Reference in New Issue
Block a user