mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 21:49:24 +08:00
Fix code descriptions in the test package. (#148145)
The parameter and function description have something wrong and make them correct. Pull Request resolved: https://github.com/pytorch/pytorch/pull/148145 Approved by: https://github.com/janeyx99
This commit is contained in:
@ -222,7 +222,7 @@ class ModuleInfo:
|
||||
# channels last output
|
||||
train_and_eval_differ=False, # whether the module has differing behavior between train and eval
|
||||
module_error_inputs_func=None, # Function to generate module inputs that error
|
||||
gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.
|
||||
gradcheck_fast_mode=None, # Whether to use the fast implementation for gradcheck/gradgradcheck.
|
||||
# When set to None, defers to the default value provided by the wrapper
|
||||
# function around gradcheck (testing._internal.common_utils.gradcheck)
|
||||
):
|
||||
@ -3575,7 +3575,7 @@ module_db: list[ModuleInfo] = [
|
||||
DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'),
|
||||
# Failure on ROCM for float32 issue #70125
|
||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
||||
# Not implmented for chalf on CPU
|
||||
# Not implemented for chalf on CPU
|
||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
||||
dtypes=(torch.chalf,), device_type='cuda'),
|
||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||
@ -3640,7 +3640,7 @@ module_db: list[ModuleInfo] = [
|
||||
# These fail only on ROCm
|
||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda',
|
||||
dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM),
|
||||
# Not implmented for chalf on CPU
|
||||
# Not implemented for chalf on CPU
|
||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
||||
dtypes=(torch.chalf,), device_type='cuda'),
|
||||
),
|
||||
|
Reference in New Issue
Block a user