[numpy] torch.erf{c} : promote integer inputs to float (#48472)

Summary:
Reference: https://github.com/pytorch/pytorch/issues/42515

Pull Request resolved: https://github.com/pytorch/pytorch/pull/48472

Reviewed By: ngimel

Differential Revision: D25192324

Pulled By: mruberry

fbshipit-source-id: 6ef2fec8a27425f9c4c917fc3ae25ac1e1f5f454
This commit is contained in:
kshitij12345
2020-11-27 15:06:48 -08:00
committed by Facebook GitHub Bot
parent 7df8445242
commit f95af7a79a
5 changed files with 23 additions and 19 deletions

View File

@ -258,12 +258,12 @@ Tensor& expm1_out(Tensor& result, const Tensor& self) { return unary_op_impl_out
Tensor expm1(const Tensor& self) { return unary_op_impl(self, at::expm1_out); }
Tensor& expm1_(Tensor& self) { return unary_op_impl_(self, at::expm1_out); }
Tensor& erf_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, erf_stub); }
Tensor erf(const Tensor& self) { return unary_op_impl(self, at::erf_out); }
Tensor& erf_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, erf_stub); }
Tensor erf(const Tensor& self) { return unary_op_impl_float(self, erf_stub); }
Tensor& erf_(Tensor& self) { return unary_op_impl_(self, at::erf_out); }
Tensor& erfc_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, erfc_stub); }
Tensor erfc(const Tensor& self) { return unary_op_impl(self, at::erfc_out); }
Tensor& erfc_out(Tensor& result, const Tensor& self) { return unary_op_impl_float_out(result, self, erfc_stub); }
Tensor erfc(const Tensor& self) { return unary_op_impl_float(self, erfc_stub); }
Tensor& erfc_(Tensor& self) { return unary_op_impl_(self, at::erfc_out); }
Tensor& frac_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, frac_stub); }

View File

@ -131,7 +131,7 @@ void logit_kernel_cuda(TensorIterator& iter, Scalar eps_scalar) {
}
void erf_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "erf_cuda", [&]() {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
@ -139,7 +139,7 @@ void erf_kernel_cuda(TensorIterator& iter) {
}
void erfc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfc_cuda", [&]() {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});

View File

@ -919,9 +919,9 @@ class TestTypePromotion(TestCase):
t = torch.tensor((1), dtype=dtypes[0], device=device)
out = torch.empty(0, dtype=dtypes[1], device=device)
ops = (torch.neg, torch.floor, torch.ceil, torch.erf)
float_only_ops = {torch.floor, torch.ceil, torch.erf}
real_only_ops = {torch.floor, torch.ceil, torch.erf}
ops = (torch.neg, torch.floor, torch.ceil)
float_only_ops = {torch.floor, torch.ceil}
real_only_ops = {torch.floor, torch.ceil}
for op in ops:
if dtypes[0] is not dtypes[1]:
with self.assertRaises(RuntimeError):

View File

@ -984,13 +984,15 @@ Tensor* TensorExprKernel::computeValue(const torch::jit::Value* v) {
} break;
case aten::erf: {
return computeOneOperand(
"aten_erf", v, [](const ExprHandle& a) { return erf(a); });
return computeOneOperand("aten_erf", v, [](const ExprHandle& a) {
return erf(promoteIntegerToFloat(a));
});
} break;
case aten::erfc: {
return computeOneOperand(
"aten_erfc", v, [](const ExprHandle& a) { return erfc(a); });
return computeOneOperand("aten_erfc", v, [](const ExprHandle& a) {
return erfc(promoteIntegerToFloat(a));
});
} break;
case aten::cos: {

View File

@ -478,16 +478,18 @@ if TEST_SCIPY:
ref=scipy.special.erf,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
promotes_integers_to_float=True),
UnaryUfuncInfo('erfc',
ref=scipy.special.erfc,
decorators=(precisionOverride({torch.float16: 1e-2,
torch.bfloat16: 1e-2}),),
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half)),
dtypes=all_types_and(torch.bool),
dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half),
promotes_integers_to_float=True),
]
op_db = op_db + op_db_scipy_reference