mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert D30279364: [codemod][lint][fbcode/c*] Enable BLACK by default
Test Plan: revert-hammer
Differential Revision:
D30279364 (b004307252
)
Original commit changeset: c1ed77dfe43a
fbshipit-source-id: eab50857675c51e0088391af06ec0ecb14e2347e
This commit is contained in:
committed by
Facebook GitHub Bot
parent
ed0b8a3e83
commit
1022443168
@ -1,8 +1,5 @@
|
||||
from torch.testing._check_kernel_launches import (
|
||||
check_cuda_kernel_launches,
|
||||
check_code_for_cuda_kernel_launches,
|
||||
)
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch.testing._check_kernel_launches import check_cuda_kernel_launches, check_code_for_cuda_kernel_launches
|
||||
|
||||
|
||||
class AlwaysCheckCudaLaunchTest(TestCase):
|
||||
@ -10,10 +7,7 @@ class AlwaysCheckCudaLaunchTest(TestCase):
|
||||
"""Verifies that the regex works for a few different situations"""
|
||||
|
||||
# Try some different spacings
|
||||
self.assertEqual(
|
||||
2,
|
||||
check_code_for_cuda_kernel_launches(
|
||||
"""
|
||||
self.assertEqual(2, check_code_for_cuda_kernel_launches("""
|
||||
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
some_function_call<TemplateArg><<<1,2,0,stream>>>(arg1,arg2,arg3);
|
||||
@ -29,15 +23,10 @@ C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
some_function_call<TemplateArg><<<1,2,0,stream>>> ( arg1 , arg2 , arg3 ) ;
|
||||
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
"""
|
||||
),
|
||||
)
|
||||
"""))
|
||||
|
||||
# Does it work for macros?
|
||||
self.assertEqual(
|
||||
0,
|
||||
check_code_for_cuda_kernel_launches(
|
||||
r"""
|
||||
self.assertEqual(0, check_code_for_cuda_kernel_launches(r"""
|
||||
#define SOME_MACRO(x) some_function_call<<<1,2>>> ( x ) ; \
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
|
||||
@ -47,15 +36,10 @@ some_function_call<TemplateArg><<<1,2,0,stream>>> ( arg1 , arg2 , arg3 ) ;
|
||||
selfInfo, sourceInfo, indexInfo, \
|
||||
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize); \
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
"""
|
||||
),
|
||||
)
|
||||
"""))
|
||||
|
||||
# Does it work for lambdas?
|
||||
self.assertEqual(
|
||||
1,
|
||||
check_code_for_cuda_kernel_launches(
|
||||
r"""
|
||||
self.assertEqual(1, check_code_for_cuda_kernel_launches(r"""
|
||||
rrelu_with_noise_cuda_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
|
||||
numel,
|
||||
rng_engine_inputs,
|
||||
@ -82,14 +66,12 @@ some_function_call<TemplateArg><<<1,2,0,stream>>> ( arg1 , arg2 , arg3 ) ;
|
||||
});
|
||||
uh oh;
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
"""
|
||||
),
|
||||
)
|
||||
"""))
|
||||
|
||||
def test_check_cuda_launches(self):
|
||||
unsafeLaunchesCount = check_cuda_kernel_launches()
|
||||
self.assertTrue(unsafeLaunchesCount == 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user