mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[ROCm][CI] unskip some test_memory_format tests (#162766)
Fixes #70125. Much of the work was done by #161687. This PR is additional test cleanup. Pull Request resolved: https://github.com/pytorch/pytorch/pull/162766 Approved by: https://github.com/jeffdaily Co-authored-by: Jeff Daily <jeff.daily@amd.com>
This commit is contained in:
committed by
PyTorch MergeBot
parent
03798b0f91
commit
7357eb66c5
@ -15,10 +15,16 @@ from torch.testing._internal.common_device_type import (
|
|||||||
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
|
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
|
||||||
from torch.testing._internal.common_utils import (
|
from torch.testing._internal.common_utils import (
|
||||||
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
|
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
|
||||||
gradgradcheck, parametrize, wrapSwapTensorsTest)
|
gradgradcheck, parametrize, wrapSwapTensorsTest, TEST_WITH_ROCM)
|
||||||
from unittest.mock import patch, call
|
from unittest.mock import patch, call
|
||||||
|
|
||||||
|
|
||||||
|
if TEST_WITH_ROCM:
|
||||||
|
import os
|
||||||
|
os.environ["PYTORCH_MIOPEN_SUGGEST_NHWC"] = "1"
|
||||||
|
os.environ["PYTORCH_MIOPEN_SUGGEST_NHWC_BATCHNORM"] = "1"
|
||||||
|
|
||||||
|
|
||||||
class TestModule(TestCase):
|
class TestModule(TestCase):
|
||||||
_do_cuda_memory_leak_check = True
|
_do_cuda_memory_leak_check = True
|
||||||
_do_cuda_non_default_stream = True
|
_do_cuda_non_default_stream = True
|
||||||
|
@ -16,7 +16,7 @@ from torch.testing._internal.common_dtype import (
|
|||||||
floating_types, floating_and_complex_types_and, get_all_fp_dtypes)
|
floating_types, floating_and_complex_types_and, get_all_fp_dtypes)
|
||||||
from torch.testing._internal.common_device_type import (
|
from torch.testing._internal.common_device_type import (
|
||||||
_TestParametrizer, _update_param_kwargs, expectedFailureMPS, toleranceOverride, tol,
|
_TestParametrizer, _update_param_kwargs, expectedFailureMPS, toleranceOverride, tol,
|
||||||
skipCUDAIfRocm, precisionOverride, skipMeta, skipMPS)
|
precisionOverride, skipMeta, skipMPS)
|
||||||
from torch.testing._internal.common_methods_invocations import DecorateInfo
|
from torch.testing._internal.common_methods_invocations import DecorateInfo
|
||||||
from torch.testing._internal.common_nn import (
|
from torch.testing._internal.common_nn import (
|
||||||
cosineembeddingloss_reference, cross_entropy_loss_reference, ctcloss_reference,
|
cosineembeddingloss_reference, cross_entropy_loss_reference, ctcloss_reference,
|
||||||
@ -3497,8 +3497,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
# See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32'
|
||||||
# xfail does not work due to Fatal Python error: Aborted
|
# xfail does not work due to Fatal Python error: Aborted
|
||||||
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format",
|
||||||
@ -3514,8 +3512,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# This was wrongly being skipped before and needs investigation.
|
# This was wrongly being skipped before and needs investigation.
|
||||||
# See https://github.com/pytorch/pytorch/issues/80247
|
# See https://github.com/pytorch/pytorch/issues/80247
|
||||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
|
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format",
|
||||||
@ -3538,8 +3534,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Conv3d is not supported on MPS backend
|
# Conv3d is not supported on MPS backend
|
||||||
DecorateInfo(skipMPS, device_type="mps"),
|
DecorateInfo(skipMPS, device_type="mps"),
|
||||||
# This was wrongly being skipped before and needs investigation.
|
# This was wrongly being skipped before and needs investigation.
|
||||||
@ -3555,8 +3549,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
dtypes=floating_and_complex_types_and(torch.chalf),
|
dtypes=floating_and_complex_types_and(torch.chalf),
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Not implemented for chalf on CPU
|
# Not implemented for chalf on CPU
|
||||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
||||||
dtypes=(torch.chalf,), device_type='cuda'),
|
dtypes=(torch.chalf,), device_type='cuda'),
|
||||||
@ -3576,8 +3568,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
dtypes=floating_and_complex_types_and(torch.chalf),
|
dtypes=floating_and_complex_types_and(torch.chalf),
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Fails on backward check because ViewAsRealBackward apply contiguous for grad
|
# Fails on backward check because ViewAsRealBackward apply contiguous for grad
|
||||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format',
|
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format',
|
||||||
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
|
dtypes=(torch.complex32, torch.complex64, torch.complex128)),
|
||||||
@ -3608,16 +3598,11 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# ConvTranspose3d is not supported on MPS backend
|
# ConvTranspose3d is not supported on MPS backend
|
||||||
DecorateInfo(skipMPS),
|
DecorateInfo(skipMPS),
|
||||||
# This was wrongly being skipped before and needs investigation.
|
# This was wrongly being skipped before and needs investigation.
|
||||||
# See https://github.com/pytorch/pytorch/issues/80247
|
# See https://github.com/pytorch/pytorch/issues/80247
|
||||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
|
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),
|
||||||
# These fail only on ROCm
|
|
||||||
DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda',
|
|
||||||
dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM),
|
|
||||||
# Not implemented for chalf on CPU
|
# Not implemented for chalf on CPU
|
||||||
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity',
|
||||||
dtypes=(torch.chalf,), device_type='cuda'),
|
dtypes=(torch.chalf,), device_type='cuda'),
|
||||||
@ -3677,8 +3662,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||||
DecorateInfo(skipMeta),
|
DecorateInfo(skipMeta),
|
||||||
@ -3697,8 +3680,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||||
DecorateInfo(skipMeta),
|
DecorateInfo(skipMeta),
|
||||||
@ -3724,8 +3705,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||||
DecorateInfo(skipMeta),
|
DecorateInfo(skipMeta),
|
||||||
@ -3743,8 +3722,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||||
DecorateInfo(skipMeta),
|
DecorateInfo(skipMeta),
|
||||||
@ -3763,8 +3740,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||||
DecorateInfo(skipMeta),
|
DecorateInfo(skipMeta),
|
||||||
@ -3790,8 +3765,6 @@ module_db: list[ModuleInfo] = [
|
|||||||
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
|
||||||
module_memformat_affects_out=True,
|
module_memformat_affects_out=True,
|
||||||
skips=(
|
skips=(
|
||||||
# Failure on ROCM for float32 issue #70125
|
|
||||||
DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]),
|
|
||||||
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
# Lazy modules don't currently play well with ModuleInfo tests on the meta device.
|
||||||
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
# See https://github.com/pytorch/pytorch/issues/70505 for more info.
|
||||||
DecorateInfo(skipMeta),
|
DecorateInfo(skipMeta),
|
||||||
|
Reference in New Issue
Block a user