mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[codemod][lint][fbcode/c*] Enable BLACK by default
Test Plan: manual inspection & sandcastle Reviewed By: zertosh Differential Revision: D30279364 fbshipit-source-id: c1ed77dfe43a3bde358f92737cd5535ae5d13c9a
This commit is contained in:
committed by
Facebook GitHub Bot
parent
aac3c7bd06
commit
b004307252
@ -1,18 +1,22 @@
|
||||
import collections
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
|
||||
|
||||
class TestAutocastCPU(TestCase):
|
||||
def setUp(self):
|
||||
super(TestAutocastCPU, self).setUp()
|
||||
self.autocast_lists = AutocastCPUTestLists(torch.device('cpu'))
|
||||
self.autocast_lists = AutocastCPUTestLists(torch.device("cpu"))
|
||||
|
||||
def tearDown(self):
|
||||
del self.autocast_lists
|
||||
super(TestAutocastCPU, self).tearDown()
|
||||
|
||||
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
|
||||
def _run_autocast_outofplace(
|
||||
self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None
|
||||
):
|
||||
# helper to cast args
|
||||
def cast(val, to_type):
|
||||
if isinstance(val, torch.Tensor):
|
||||
@ -35,20 +39,29 @@ class TestAutocastCPU(TestCase):
|
||||
if module is not None and hasattr(module, op):
|
||||
output = getattr(module, op)(*args, **add_kwargs)
|
||||
if isinstance(output, torch.Tensor):
|
||||
self.assertTrue(out_type == output.dtype,
|
||||
"autocast for torch.{} produced {}, should produce {}"
|
||||
.format(op, output.dtype, out_type))
|
||||
self.assertTrue(
|
||||
out_type == output.dtype,
|
||||
"autocast for torch.{} produced {}, should produce {}".format(
|
||||
op, output.dtype, out_type
|
||||
),
|
||||
)
|
||||
# Try Tensor.* variant:
|
||||
if hasattr(torch.Tensor, op):
|
||||
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
|
||||
if isinstance(output_method, torch.Tensor):
|
||||
self.assertTrue(out_type == output_method.dtype,
|
||||
"autocast for torch.{} produced {}, should produce torch.{}"
|
||||
.format(op, output_method.dtype, out_type))
|
||||
self.assertTrue(
|
||||
out_type == output_method.dtype,
|
||||
"autocast for torch.{} produced {}, should produce torch.{}".format(
|
||||
op, output_method.dtype, out_type
|
||||
),
|
||||
)
|
||||
|
||||
self.assertTrue((output is not None) or (output_method is not None),
|
||||
"{} not found as an attribute on either Tensor or the requested module {}".format(
|
||||
op, module))
|
||||
self.assertTrue(
|
||||
(output is not None) or (output_method is not None),
|
||||
"{} not found as an attribute on either Tensor or the requested module {}".format(
|
||||
op, module
|
||||
),
|
||||
)
|
||||
|
||||
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
|
||||
# For example, lstm_cell returns a tuple and equal returns bool.
|
||||
@ -64,7 +77,10 @@ class TestAutocastCPU(TestCase):
|
||||
if (output is not None) and (output_method is not None):
|
||||
self.assertTrue(type(output) == type(output_method))
|
||||
comparison = compare(output, output_method)
|
||||
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
|
||||
self.assertTrue(
|
||||
comparison,
|
||||
"torch.{0} result did not match Tensor.{0} result".format(op),
|
||||
)
|
||||
|
||||
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
|
||||
# as the C++-side autocasting, and should be bitwise accurate.
|
||||
@ -73,12 +89,18 @@ class TestAutocastCPU(TestCase):
|
||||
self.assertFalse(torch.is_autocast_cpu_enabled())
|
||||
|
||||
if module is not None and hasattr(module, op):
|
||||
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
|
||||
control = getattr(module, op)(
|
||||
*cast(args, run_as_type), **add_kwargs
|
||||
)
|
||||
else:
|
||||
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
|
||||
control = getattr(args[0].to(run_as_type), op)(
|
||||
*cast(args[1:], run_as_type), **add_kwargs
|
||||
)
|
||||
self.assertTrue(type(output_to_compare) == type(control))
|
||||
comparison = compare(output_to_compare, control)
|
||||
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
|
||||
self.assertTrue(
|
||||
comparison, "torch.{} result did not match control".format(op)
|
||||
)
|
||||
self.assertTrue(torch.is_autocast_cpu_enabled())
|
||||
self.assertFalse(torch.is_autocast_cpu_enabled())
|
||||
|
||||
@ -94,12 +116,16 @@ class TestAutocastCPU(TestCase):
|
||||
|
||||
def test_autocast_methods_expect_builtin_promote(self):
|
||||
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
|
||||
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
|
||||
self._run_autocast_outofplace(
|
||||
op, args, torch.float32, module=None, out_type=out_type
|
||||
)
|
||||
|
||||
def test_autocast_torch_bf16(self):
|
||||
for op_with_args in self.autocast_lists.torch_bf16:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
self._run_autocast_outofplace(op, args, torch.bfloat16, add_kwargs=maybe_kwargs)
|
||||
self._run_autocast_outofplace(
|
||||
op, args, torch.bfloat16, add_kwargs=maybe_kwargs
|
||||
)
|
||||
|
||||
def test_autocast_nn_bf16(self):
|
||||
for op, args in self.autocast_lists.nn_bf16:
|
||||
@ -108,16 +134,21 @@ class TestAutocastCPU(TestCase):
|
||||
def test_autocast_torch_fp32(self):
|
||||
for op_with_args in self.autocast_lists.torch_fp32:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
|
||||
self._run_autocast_outofplace(
|
||||
op, args, torch.float32, add_kwargs=maybe_kwargs
|
||||
)
|
||||
|
||||
def test_autocast_nn_fp32(self):
|
||||
for op_with_args in self.autocast_lists.nn_fp32:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs)
|
||||
self._run_autocast_outofplace(
|
||||
op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs
|
||||
)
|
||||
|
||||
def test_autocast_torch_need_autocast_promote(self):
|
||||
for op, args in self.autocast_lists.torch_need_autocast_promote:
|
||||
self._run_autocast_outofplace(op, args, torch.float32)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user