Compare commits

..

3 Commits

Author SHA1 Message Date
e85d494707 make valgrind_toggle and valgrind_supported_platform private (#46718) 2020-10-23 12:31:23 -07:00
a6e96b190e Avoid leaking has_torch_function and handle_torch_function in torch namespace (#46680) (#46719)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/46680

Reviewed By: zou3519

Differential Revision: D24459823

Pulled By: albanD

fbshipit-source-id: 4ff6925afcf14214dc45921bca0d2f33ca1944a1
2020-10-23 12:31:12 -07:00
f9df694843 Make add_relu an internal function (#46676) (#46765)
Summary:
Cleanup for 1.7

Pull Request resolved: https://github.com/pytorch/pytorch/pull/46676

Reviewed By: gchanan

Differential Revision: D24458565

Pulled By: albanD

fbshipit-source-id: b1e4b4630233d3f1a4bac20e3077411d1ae17f7b

# Conflicts:
#	test/backward_compatibility/check_backward_compatibility.py
2020-10-23 10:15:09 -07:00
13 changed files with 31 additions and 31 deletions

View File

@ -37,9 +37,9 @@ TORCH_LIBRARY_IMPL(aten, Named, m) {
m.impl("add.out", CppFunction::makeFallthrough());
m.impl("add_.Scalar", CppFunction::makeFallthrough());
m.impl("add_.Tensor", CppFunction::makeFallthrough());
m.impl("add_relu.Tensor", CppFunction::makeFallthrough());
m.impl("add_relu.out", CppFunction::makeFallthrough());
m.impl("add_relu_.Tensor", CppFunction::makeFallthrough());
m.impl("_add_relu.Tensor", CppFunction::makeFallthrough());
m.impl("_add_relu.out", CppFunction::makeFallthrough());
m.impl("_add_relu_.Tensor", CppFunction::makeFallthrough());
m.impl("addcdiv", CppFunction::makeFallthrough());
m.impl("addcdiv.out", CppFunction::makeFallthrough());
m.impl("addcdiv_", CppFunction::makeFallthrough());

View File

@ -368,19 +368,19 @@
SparseCUDA: add_out_sparse_cuda
MkldnnCPU: mkldnn_add_out
- func: add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- func: _add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
use_c10_dispatcher: full
variants: function
dispatch:
CPU: add_relu
- func: add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
- func: _add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
use_c10_dispatcher: full
variants: function
dispatch:
CPU: add_relu_
- func: add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: _add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
variants: function
dispatch:
CPU: add_relu_out

View File

@ -118,6 +118,8 @@ allow_list = [
('aten::contiguous', datetime.date(2020, 12, 1)),
('aten::to', datetime.date(2020, 12, 1)),
("tensorexpr::Group", datetime.date(2020, 12, 1)),
("aten::add_relu", datetime.date(2020, 12, 1)),
("aten::add_relu_", datetime.date(2020, 12, 1)),
]

View File

@ -581,7 +581,7 @@ class TestJit(JitTestCase):
m = torch.jit.load(buffer)
new_res = m(a, b, c)
FileCheck().check_not("aten::relu(") \
.check("aten::add_relu(") \
.check("aten::_add_relu(") \
.run(m.graph)
torch.testing.assert_allclose(orig_res, new_res)
@ -600,7 +600,7 @@ class TestJit(JitTestCase):
m = torch.jit.load(buffer)
new_res = m(a, b, c)
FileCheck().check_not("aten::relu_(") \
.check("aten::add_relu(") \
.check("aten::_add_relu(") \
.run(m.graph)
torch.testing.assert_allclose(orig_res, new_res)
@ -631,10 +631,10 @@ class TestJit(JitTestCase):
new_res = m(a_copy, b)
FileCheck().check_not("aten::add_(") \
.check_not("aten::relu_(") \
.check("aten::add_relu_(") \
.check("aten::_add_relu_(") \
.run(m.graph)
torch.testing.assert_allclose(orig_res, new_res)
# Since add_relu_ does inplace mutation ensure
# Since _add_relu_ does inplace mutation ensure
# a_copy is modified
torch.testing.assert_allclose(orig_res, a_copy)
@ -669,10 +669,10 @@ class TestJit(JitTestCase):
new_res = m(a_copy, b)
FileCheck().check_not("aten::add(") \
.check_not("aten::relu_(") \
.check("aten::add_relu(") \
.check("aten::_add_relu(") \
.run(m.graph)
torch.testing.assert_allclose(orig_res, new_res)
# Since add_relu_ with out=a does inplace mutation ensure
# Since _add_relu_ with out=a does inplace mutation ensure
# a_copy is modified
torch.testing.assert_allclose(orig_res, a_copy)

View File

@ -95,7 +95,7 @@ class TestOptimizer(unittest.TestCase):
.check_count("prepacked::linear_clamp_run", 1, exactly=True) \
.check_not("aten::add(") \
.check_not("aten::relu(") \
.check_count("aten::add_relu(", 1, exactly=True) \
.check_count("aten::_add_relu(", 1, exactly=True) \
.run(optimized_scripted_model.graph)
torch.testing.assert_allclose(initial_result, optimized_result, rtol=1e-2, atol=1e-3)

View File

@ -9152,7 +9152,7 @@ class TestAddRelu(TestCase):
a = a + 5
add_res = a + b
relu_res = torch.relu(add_res)
add_relu_res = torch.add_relu(a, b)
add_relu_res = torch._VF._add_relu(a, b)
self.assertTrue(torch.allclose(add_relu_res, relu_res))

View File

@ -1903,7 +1903,7 @@
- name: aten::resize_as_
- name: aten::scalar_tensor
- name: aten::to
- name: aten::add_relu
- name: aten::_add_relu
depends:
- name: aten::as_strided_
- name: aten::copy_
@ -1915,7 +1915,7 @@
- name: aten::resize_
- name: aten::resize_as_
- name: aten::to
- name: aten::add_relu_
- name: aten::_add_relu_
depends:
- name: aten::as_strided_
- name: aten::copy_

View File

@ -381,8 +381,8 @@ def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython
def _demangle(str) -> str: ... # c10::demangle
# Defined in `valgrind.h` and `callgrind.h` respecitively.
def valgrind_supported_platform() -> _bool: ... # NVALGRIND
def valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT
def _valgrind_supported_platform() -> _bool: ... # NVALGRIND
def _valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT
has_openmp: _bool
has_mkl: _bool

View File

@ -614,11 +614,12 @@ from ._lobpcg import lobpcg
quantized_lstm = torch.ops.aten.quantized_lstm
quantized_gru = torch.ops.aten.quantized_gru
from .overrides import has_torch_function, handle_torch_function
def Assert(condition, message):
r"""A wrapper around Python's assert which is symbolically traceable.
"""
from .overrides import has_torch_function, handle_torch_function
if type(condition) is not torch.Tensor and has_torch_function((condition,)):
return handle_torch_function(Assert, (condition,), condition, message)
assert condition, message

View File

@ -828,7 +828,7 @@ Call this whenever a new thread is created in order to propagate values from
ASSERT_TRUE(set_module_attr("has_lapack", at::hasLAPACK() ? Py_True : Py_False));
py_module.def(
"valgrind_supported_platform", [](){
"_valgrind_supported_platform", [](){
#if defined(NVALGRIND)
return false;
#else
@ -838,7 +838,7 @@ Call this whenever a new thread is created in order to propagate values from
);
py_module.def(
"valgrind_toggle", [](){
"_valgrind_toggle", [](){
#if defined(NVALGRIND)
TORCH_CHECK(false, "Valgrind is not supported.");
#else

View File

@ -17,7 +17,7 @@ void fuseAddReluImpl(std::shared_ptr<Graph>& graph) {
return (%res))";
std::string add_relu_fused = R"(
graph(%a, %b, %alpha):
%res = aten::add_relu(%a, %b, %alpha)
%res = aten::_add_relu(%a, %b, %alpha)
return (%res))";
rewriter.RegisterRewritePattern(add_relu_0, add_relu_fused);
@ -35,7 +35,7 @@ void fuseAddReluImpl(std::shared_ptr<Graph>& graph) {
return (%res))";
std::string add_inplace_relu_fused = R"(
graph(%a, %b, %alpha):
%res = aten::add_relu_(%a, %b, %alpha)
%res = aten::_add_relu_(%a, %b, %alpha)
return (%res))";
rewriter.RegisterRewritePattern(add_inplace_relu_1, add_inplace_relu_fused);
@ -46,7 +46,7 @@ void fuseAddReluImpl(std::shared_ptr<Graph>& graph) {
return (%res))";
std::string add_out_relu_fused = R"(
graph(%a, %b, %alpha, %out):
%res = aten::add_relu(%a, %b, %alpha, %out)
%res = aten::_add_relu(%a, %b, %alpha, %out)
return (%res))";
rewriter.RegisterRewritePattern(add_out_relu, add_out_relu_fused);

View File

@ -156,8 +156,6 @@ def get_ignored_functions() -> Set[Callable]:
torch.is_deterministic,
torch.set_deterministic,
torch.unify_type_list,
torch.valgrind_supported_platform,
torch.valgrind_toggle,
Tensor.__delitem__,
Tensor.__dir__,
Tensor.__getattribute__,
@ -211,7 +209,6 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.arccos: lambda input, out=None: -1,
torch.acosh: lambda input, out=None: -1,
torch.arccosh: lambda input, out=None: -1,
torch.add_relu: lambda input, other, out=None: -1,
torch.add: lambda input, other, out=None: -1,
torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,

View File

@ -176,7 +176,7 @@ class CallgrindStats(object):
class _ValgrindWrapper(object):
def __init__(self) -> None:
self._commands_available: Dict[str, bool] = {}
if torch._C.valgrind_supported_platform():
if torch._C._valgrind_supported_platform():
# Only bother checking on supported platforms.
for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"):
self._commands_available[cmd] = not subprocess.run(
@ -193,7 +193,7 @@ class _ValgrindWrapper(object):
self._baseline_cache: Dict[Tuple[int, int], Tuple[Tuple[FunctionCount, ...], Tuple[FunctionCount, ...]]] = {}
def _validate(self) -> None:
if not torch._C.valgrind_supported_platform():
if not torch._C._valgrind_supported_platform():
raise OSError("Valgrind is not supported on this platform.")
missing_cmds = [cmd for cmd, available in self._commands_available.items() if not available]
@ -444,13 +444,13 @@ class _ValgrindWrapper(object):
# =============================================================================
# == User code block ==========================================================
# =============================================================================
torch._C.valgrind_toggle()
torch._C._valgrind_toggle()
{blocked_stmt}
# Sleep is to allow the interpreter to catch up before we stop collecting in
# order to reduce jitter.
time.sleep(0.01)
torch._C.valgrind_toggle()
torch._C._valgrind_toggle()
""").strip().format(
indented_stmt=textwrap.indent(stmt, " " * 4),
blocked_stmt=blocked_stmt,