mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
By enabling the test_operators on MPS device Pull Request resolved: https://github.com/pytorch/pytorch/pull/151166 Approved by: https://github.com/dcci
30 lines
933 B
Python
30 lines
933 B
Python
from typing import Any
|
|
|
|
import torch.library
|
|
from torch import Tensor
|
|
from torch.autograd import Function
|
|
|
|
|
|
if not torch._running_with_deploy():
|
|
_test_lib_def = torch.library.Library("_inductor_test", "DEF")
|
|
_test_lib_def.define(
|
|
"realize(Tensor self) -> Tensor", tags=torch.Tag.pt2_compliant_tag
|
|
)
|
|
|
|
_test_lib_impl = torch.library.Library("_inductor_test", "IMPL")
|
|
for dispatch_key in ("CPU", "CUDA", "MPS", "Meta"):
|
|
_test_lib_impl.impl("realize", lambda x: x.clone(), dispatch_key)
|
|
|
|
class Realize(Function):
|
|
@staticmethod
|
|
def forward(ctx: object, x: Tensor) -> Tensor:
|
|
return torch.ops._inductor_test.realize(x)
|
|
|
|
@staticmethod
|
|
# types need to stay consistent with _SingleLevelFunction
|
|
def backward(ctx: Any, *grad_output: Any) -> Any:
|
|
return grad_output[0]
|
|
|
|
def realize(x: Tensor) -> Tensor:
|
|
return Realize.apply(x)
|