Revert "[BE][6/16] fix typos in torch/ (#156316)"

This reverts commit b210cf1ea56bcd9f937a2805d9e70d8684d25ee4.

Reverted https://github.com/pytorch/pytorch/pull/156316 on behalf of https://github.com/atalman due to export/test_torchbind.py::TestCompileTorchbind::test_compile_error_on_input_aliasing_contents_backend_aot_eager [GH job link](https://github.com/pytorch/pytorch/actions/runs/15804799771/job/44548489912) [HUD commit link](c95f7fa874) ([comment](https://github.com/pytorch/pytorch/pull/156313#issuecomment-2994171213))
This commit is contained in:
PyTorch MergeBot
2025-06-22 12:31:57 +00:00
parent 035a68d25a
commit 3f44fdc03d
32 changed files with 59 additions and 58 deletions

View File

@ -1,6 +1,6 @@
# mypy: ignore-errors
r"""Importing this file includes common utility methods and base classes for
r"""Importing this file includes common utility methods and base clases for
checking quantization api and properties of resulting modules.
"""
@ -2806,7 +2806,7 @@ class ModelWithFunctionals(torch.nn.Module):
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
self.mymatmul = nnq.FloatFunctional()
# Tracing doesn't work yet for c10 ops with scalar inputs
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# self.my_scalar_add = nnq.FloatFunctional()
# self.my_scalar_mul = nnq.FloatFunctional()
@ -2816,7 +2816,7 @@ class ModelWithFunctionals(torch.nn.Module):
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
u = self.mymatmul.matmul(w, w.T)
# Tracing doesn't work yet for c10 ops with scalar inputs
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# w = self.my_scalar_add.add_scalar(w, -0.5)
# w = self.my_scalar_mul.mul_scalar(w, 0.5)