Scoped extension building for C++ backed custom ops tests (#136695)

FIXES #125579 #131103 #133197 #133283 #134738 #135369 #135685

Tests that create C++ extensions can cause flakiness in CI due to library namespace conflict and test ordering. We can build them in temp dirs to ensure isolation.

An alternative is to build these as part of the build process and have build time errors.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/136695
Approved by: https://github.com/zou3519
This commit is contained in:
Simon Fan
2024-10-25 13:19:47 -07:00
committed by PyTorch MergeBot
parent 10e2840ce3
commit 99608ceed6
5 changed files with 73 additions and 34 deletions

View File

@ -33,6 +33,7 @@ from torch.testing._internal.common_utils import (
IS_WINDOWS,
parametrize,
run_tests,
scoped_load_inline,
skipIfTorchDynamo,
subtest,
TestCase,
@ -2088,7 +2089,8 @@ dynamic shape operator: _torch_testing.numpy_nonzero.default
with self.assertRaisesRegex(RuntimeError, "Expected one of cpu, cuda"):
torch.library.impl("blah::blah", "somethingsomething")
def test_autograd_function_backed_op(self):
@scoped_load_inline
def test_autograd_function_backed_op(self, load_inline):
cpp_source = """
struct CustomOpAutogradFunction : public torch::autograd::Function<CustomOpAutogradFunction> {
static constexpr bool is_traceable = true;
@ -2110,13 +2112,13 @@ torch::Tensor custom_op_backed_by_autograd_fn(const torch::Tensor& x) {
return CustomOpAutogradFunction::apply(x);
}
TORCH_LIBRARY(mylib, m) {
TORCH_LIBRARY(test_autograd_function_backed_op, m) {
m.def("custom_op_backed_by_autograd_fn", custom_op_backed_by_autograd_fn);
}
"""
module = torch.utils.cpp_extension.load_inline(
name="mylib",
module = load_inline(
name="test_autograd_function_backed_op",
cpp_sources=cpp_source,
functions="custom_op_backed_by_autograd_fn",
verbose=True,
@ -2124,7 +2126,11 @@ TORCH_LIBRARY(mylib, m) {
x = torch.ones(2, 2, requires_grad=True)
temp = x.clone().detach()
out = torch.ops.mylib.custom_op_backed_by_autograd_fn(x)
out = (
torch.ops.test_autograd_function_backed_op.custom_op_backed_by_autograd_fn(
x
)
)
loss = out.sum()
loss.backward()
self.assertEqual(x.grad, temp)