mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Run only listed tests on s390x (#140265)
Skip tests that are failing This was previously part of https://github.com/pytorch/pytorch/pull/125401 Pull Request resolved: https://github.com/pytorch/pytorch/pull/140265 Approved by: https://github.com/malfet Co-authored-by: Nikita Shulga <2453524+malfet@users.noreply.github.com>
This commit is contained in:
committed by
PyTorch MergeBot
parent
701e06b643
commit
a82bab6419
@ -14,7 +14,7 @@ from torch.ao.quantization.quantize_fx import (
|
||||
prepare_fx,
|
||||
prepare_qat_fx,
|
||||
)
|
||||
from torch.testing._internal.common_utils import TestCase
|
||||
from torch.testing._internal.common_utils import TestCase, xfailIfS390X
|
||||
|
||||
|
||||
logging.basicConfig(
|
||||
@ -75,6 +75,7 @@ class TestComposability(TestCase):
|
||||
# This test checks whether performing quantization prepare before sparse prepare
|
||||
# causes any issues and verifies that the correct observers are inserted and that
|
||||
# the quantized model works as expected
|
||||
@xfailIfS390X
|
||||
def test_q_prep_before_s_prep(self):
|
||||
(
|
||||
mod,
|
||||
@ -104,6 +105,7 @@ class TestComposability(TestCase):
|
||||
# the post sparse prepare module names (adding parametrizations changes the module class names)
|
||||
# which would result in those parametrized modules not being quantized. This test verifies that
|
||||
# the fix for this was successful.
|
||||
@xfailIfS390X
|
||||
def test_s_prep_before_q_prep(self):
|
||||
(
|
||||
mod,
|
||||
@ -135,6 +137,7 @@ class TestComposability(TestCase):
|
||||
# that the problem outlined in test_s_prep_before_q_prep would occur. This test verifies
|
||||
# both that the fix to the convert flow avoids this issue and that the resulting quantized
|
||||
# module uses the sparse version of the weight value.
|
||||
@xfailIfS390X
|
||||
def test_convert_without_squash_mask(self):
|
||||
(
|
||||
mod,
|
||||
@ -175,6 +178,7 @@ class TestComposability(TestCase):
|
||||
# This tests whether performing sparse prepare before fusion causes any issues. The
|
||||
# worry was that the link created between the sparsifier and the modules that need to
|
||||
# be sparsified would be broken.
|
||||
@xfailIfS390X
|
||||
def test_s_prep_before_fusion(self):
|
||||
(
|
||||
mod,
|
||||
@ -204,6 +208,7 @@ class TestComposability(TestCase):
|
||||
|
||||
# This tests whether performing fusion before sparse prepare causes and issues. The
|
||||
# main worry was that the links to the modules in the sparse config would be broken by fusion.
|
||||
@xfailIfS390X
|
||||
def test_fusion_before_s_prep(self):
|
||||
(
|
||||
mod,
|
||||
@ -258,6 +263,7 @@ class TestComposability(TestCase):
|
||||
# The primary worries were that qat_prep wouldn't recognize the parametrized
|
||||
# modules and that the convert step for qat would remove the parametrizations
|
||||
# from the modules.
|
||||
@xfailIfS390X
|
||||
def test_s_prep_before_qat_prep(self):
|
||||
(
|
||||
mod,
|
||||
@ -285,6 +291,7 @@ class TestComposability(TestCase):
|
||||
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
|
||||
|
||||
# This tests whether performing qat prepare before sparse prepare causes issues.
|
||||
@xfailIfS390X
|
||||
def test_qat_prep_before_s_prep(self):
|
||||
mod, sparsifier, _ = _get_model_and_sparsifier_and_sparse_config(
|
||||
tq.get_default_qat_qconfig("fbgemm")
|
||||
@ -338,6 +345,7 @@ class TestFxComposability(TestCase):
|
||||
compose cleanly despite variation in sequencing.
|
||||
"""
|
||||
|
||||
@xfailIfS390X
|
||||
def test_q_prep_fx_before_s_prep(self):
|
||||
r"""
|
||||
This test checks that the ordering of prepare_fx -> sparse prepare -> convert_fx
|
||||
@ -403,6 +411,7 @@ class TestFxComposability(TestCase):
|
||||
)
|
||||
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
|
||||
|
||||
@xfailIfS390X
|
||||
def test_q_prep_fx_s_prep_ref_conv(self):
|
||||
r"""
|
||||
This checks that the ordering: prepare_fx -> sparse prepare -> convert_to_reference_fx
|
||||
@ -470,6 +479,7 @@ class TestFxComposability(TestCase):
|
||||
)
|
||||
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
|
||||
|
||||
@xfailIfS390X
|
||||
def test_s_prep_before_q_prep_fx(self):
|
||||
r"""
|
||||
This test checks that the ordering of sparse prepare -> prepare_fx -> convert_fx
|
||||
@ -521,6 +531,7 @@ class TestFxComposability(TestCase):
|
||||
)
|
||||
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
|
||||
|
||||
@xfailIfS390X
|
||||
def test_s_prep_before_qat_prep_fx(self):
|
||||
r"""
|
||||
This test checks that the ordering of sparse prepare -> prepare_qat_fx -> convert_fx
|
||||
@ -575,6 +586,7 @@ class TestFxComposability(TestCase):
|
||||
)
|
||||
self.assertGreaterAlmostEqual(cur_sparsity, sparse_config[0]["sparsity_level"])
|
||||
|
||||
@xfailIfS390X
|
||||
def test_s_prep_q_prep_fx_ref(self):
|
||||
r"""
|
||||
This checks that the ordering: sparse prepare -> prepare_fx -> convert_to_reference_fx
|
||||
|
@ -70,6 +70,7 @@ from torch.testing._internal.common_utils import (
|
||||
skipIfTorchDynamo,
|
||||
TestCase,
|
||||
xfail_inherited_tests,
|
||||
xfailIfS390X,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
from torch.testing._internal.custom_tensor import ConstantExtraMetadataTensor
|
||||
@ -6525,6 +6526,7 @@ class TestEagerFusionOpInfo(AOTTestCase):
|
||||
def test_aot_autograd_exhaustive(self, device, dtype, op):
|
||||
_test_aot_autograd_helper(self, device, dtype, op)
|
||||
|
||||
@xfailIfS390X
|
||||
@ops(op_db + hop_db, allowed_dtypes=(torch.float,))
|
||||
@patch("functorch.compile.config.debug_assert", True)
|
||||
@skipOps(
|
||||
@ -6571,11 +6573,13 @@ symbolic_aot_autograd_module_failures = {
|
||||
|
||||
|
||||
class TestEagerFusionModuleInfo(AOTTestCase):
|
||||
@xfailIfS390X
|
||||
@modules(module_db, allowed_dtypes=(torch.float,))
|
||||
@decorateForModules(unittest.expectedFailure, aot_autograd_module_failures)
|
||||
def test_aot_autograd_module_exhaustive(self, device, dtype, training, module_info):
|
||||
_test_aot_autograd_module_helper(self, device, dtype, training, module_info)
|
||||
|
||||
@xfailIfS390X
|
||||
@modules(module_db, allowed_dtypes=(torch.float,))
|
||||
@decorateForModules(
|
||||
unittest.expectedFailure,
|
||||
|
@ -56,6 +56,7 @@ from torch.testing._internal.common_utils import (
|
||||
TEST_WITH_ROCM,
|
||||
TestCase,
|
||||
unMarkDynamoStrictTest,
|
||||
xfailIfS390X,
|
||||
)
|
||||
from torch.testing._internal.opinfo.core import SampleInput
|
||||
from torch.utils import _pytree as pytree
|
||||
@ -1036,6 +1037,12 @@ class TestOperators(TestCase):
|
||||
xfail("_native_batch_norm_legit"),
|
||||
# TODO: implement batching rule
|
||||
xfail("_batch_norm_with_update"),
|
||||
decorate("linalg.tensorsolve", decorator=xfailIfS390X),
|
||||
decorate("nn.functional.max_pool1d", decorator=xfailIfS390X),
|
||||
decorate("nn.functional.max_unpool2d", decorator=xfailIfS390X),
|
||||
decorate(
|
||||
"nn.functional.multilabel_margin_loss", decorator=xfailIfS390X
|
||||
),
|
||||
}
|
||||
),
|
||||
)
|
||||
|
@ -24,7 +24,11 @@ from torch._dynamo.device_interface import get_interface_for_device
|
||||
from torch._dynamo.utils import counters
|
||||
from torch._inductor import config as inductor_config
|
||||
from torch._inductor.test_case import run_tests, TestCase
|
||||
from torch.testing._internal.common_utils import scoped_load_inline, skipIfWindows
|
||||
from torch.testing._internal.common_utils import (
|
||||
scoped_load_inline,
|
||||
skipIfWindows,
|
||||
xfailIfS390X,
|
||||
)
|
||||
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CPU, HAS_CUDA, HAS_GPU
|
||||
from torch.testing._internal.logging_utils import logs_to_string
|
||||
|
||||
@ -2753,6 +2757,7 @@ TORCH_LIBRARY(test_cudagraphs_cpu_scalar_used_in_cpp_custom_op, m) {
|
||||
not in logs.getvalue()
|
||||
)
|
||||
|
||||
@xfailIfS390X
|
||||
def test_verbose_logs_graph(self):
|
||||
def fn():
|
||||
model = torch.nn.Sequential(
|
||||
@ -2965,6 +2970,7 @@ TORCH_LIBRARY(test_cudagraphs_cpu_scalar_used_in_cpp_custom_op, m) {
|
||||
)
|
||||
|
||||
@skipIfWindows(msg="AssertionError: Scalars are not equal!")
|
||||
@xfailIfS390X
|
||||
def test_verbose_logs_cpp(self):
|
||||
torch._logging.set_logs(compiled_autograd_verbose=True)
|
||||
|
||||
|
@ -36,6 +36,7 @@ from torch.testing._internal.common_utils import (
|
||||
skipIfRocm,
|
||||
slowTest,
|
||||
TEST_MKL,
|
||||
xfailIfS390X,
|
||||
)
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
|
||||
@ -3036,6 +3037,7 @@ class CPUReproTests(TestCase):
|
||||
kernel_profile_events.append(e.name)
|
||||
assert len(kernel_profile_events) > 0
|
||||
|
||||
@xfailIfS390X
|
||||
@requires_vectorization
|
||||
def test_channel_shuffle_cl_output(self):
|
||||
"""code and shape extracted from shufflenet_v2_x1_0"""
|
||||
@ -3817,6 +3819,7 @@ class CPUReproTests(TestCase):
|
||||
self.assertTrue("cvt_lowp_fp_to_fp32" not in code)
|
||||
self.assertTrue("cvt_fp32_to_lowp_fp" not in code)
|
||||
|
||||
@xfailIfS390X
|
||||
def test_concat_inner_vec(self):
|
||||
def fn(x, y):
|
||||
return F.relu(torch.cat([x, y], dim=1))
|
||||
|
@ -32,7 +32,7 @@ from torch._inductor.codegen.common import (
|
||||
get_wrapper_codegen_for_device,
|
||||
register_backend_for_device,
|
||||
)
|
||||
from torch.testing._internal.common_utils import IS_FBCODE, IS_MACOS
|
||||
from torch.testing._internal.common_utils import IS_FBCODE, IS_MACOS, xfailIfS390X
|
||||
|
||||
|
||||
try:
|
||||
@ -50,6 +50,7 @@ run_and_get_cpp_code = test_torchinductor.run_and_get_cpp_code
|
||||
TestCase = test_torchinductor.TestCase
|
||||
|
||||
|
||||
@xfailIfS390X
|
||||
class BaseExtensionBackendTests(TestCase):
|
||||
module = None
|
||||
|
||||
|
@ -91,6 +91,7 @@ from torch.testing._internal.common_utils import (
|
||||
subtest,
|
||||
TEST_WITH_ASAN,
|
||||
TEST_WITH_ROCM,
|
||||
xfailIfS390X,
|
||||
)
|
||||
from torch.utils import _pytree as pytree
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
@ -1901,6 +1902,7 @@ class CommonTemplate:
|
||||
|
||||
@skip_if_gpu_halide
|
||||
@skipCPUIf(IS_MACOS, "fails on macos")
|
||||
@xfailIfS390X
|
||||
def test_multilayer_var(self):
|
||||
def fn(a):
|
||||
return torch.var(a)
|
||||
@ -1920,6 +1922,7 @@ class CommonTemplate:
|
||||
|
||||
@skipCPUIf(IS_MACOS, "fails on macos")
|
||||
@skip_if_halide # accuracy 4.7% off
|
||||
@xfailIfS390X
|
||||
def test_multilayer_var_lowp(self):
|
||||
def fn(a):
|
||||
return torch.var(a)
|
||||
@ -9124,6 +9127,7 @@ class CommonTemplate:
|
||||
"TODO: debug this with asan",
|
||||
)
|
||||
@skip_if_gpu_halide
|
||||
@xfailIfS390X
|
||||
def test_tmp_not_defined_issue2(self):
|
||||
def forward(arg38_1, arg81_1, getitem_17, new_zeros_default_4):
|
||||
div_tensor_7 = torch.ops.aten.div.Tensor(getitem_17, arg81_1)
|
||||
@ -10312,6 +10316,7 @@ class CommonTemplate:
|
||||
# Calling div only torch.SymInt arguments is not yet supported.
|
||||
# To support this behavior, we need to allow const-propping tensors that store symint data.
|
||||
# For now, dynamo will explicitly graph break when it encounters user code with this behavior.
|
||||
@xfailIfS390X
|
||||
@expectedFailureCodegenDynamic
|
||||
@skip_if_gpu_halide # accuracy error
|
||||
def test_AllenaiLongformerBase_repro(self):
|
||||
|
342
test/run_test.py
342
test/run_test.py
@ -5,6 +5,7 @@ import copy
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import shutil
|
||||
import signal
|
||||
@ -84,6 +85,7 @@ RERUN_DISABLED_TESTS = os.getenv("PYTORCH_TEST_RERUN_DISABLED_TESTS", "0") == "1
|
||||
DISTRIBUTED_TEST_PREFIX = "distributed"
|
||||
INDUCTOR_TEST_PREFIX = "inductor"
|
||||
IS_SLOW = "slow" in TEST_CONFIG or "slow" in BUILD_ENVIRONMENT
|
||||
IS_S390X = platform.machine() == "s390x"
|
||||
|
||||
|
||||
# Note [ROCm parallel CI testing]
|
||||
@ -183,6 +185,335 @@ ROCM_BLOCKLIST = [
|
||||
"distributed/_tensor/test_attention",
|
||||
]
|
||||
|
||||
# whitelist of tests for s390x
|
||||
S390X_TESTLIST = [
|
||||
"backends/xeon/test_launch.py",
|
||||
"benchmark_utils/test_benchmark_utils.py",
|
||||
"cpp/apply_utils_test",
|
||||
"cpp/atest",
|
||||
"cpp/basic",
|
||||
"cpp/broadcast_test",
|
||||
"cpp/cpu_generator_test",
|
||||
"cpp/Dict_test",
|
||||
"cpp/Dimname_test",
|
||||
"cpp/dlconvertor_test",
|
||||
"cpp/extension_backend_test",
|
||||
"cpp/lazy_tensor_test",
|
||||
"cpp/legacy_vmap_test",
|
||||
"cpp/NamedTensor_test",
|
||||
"cpp/native_test",
|
||||
"cpp/operators_test",
|
||||
"cpp/scalar_tensor_test",
|
||||
"cpp/scalar_test",
|
||||
"cpp/tensor_iterator_test",
|
||||
"cpp/test_api",
|
||||
"cpp/undefined_tensor_test",
|
||||
"cpp/wrapdim_test",
|
||||
"distributions/test_constraints",
|
||||
"doctests",
|
||||
"dynamo/test_activation_checkpointing",
|
||||
"dynamo/test_after_aot",
|
||||
"dynamo/test_aot_autograd",
|
||||
"dynamo/test_aot_autograd_cache",
|
||||
"dynamo/test_autograd_function",
|
||||
"dynamo/test_backends",
|
||||
"dynamo/test_backward_higher_order_ops",
|
||||
"dynamo/test_base_output",
|
||||
"dynamo/test_bytecode_utils",
|
||||
"dynamo/test_compile",
|
||||
"dynamo/test_comptime",
|
||||
"dynamo/test_config",
|
||||
"dynamo/test_ctx_manager",
|
||||
"dynamo/test_cudagraphs",
|
||||
"dynamo/test_cudagraphs_expandable_segments",
|
||||
"dynamo/test_debug_utils",
|
||||
"dynamo/test_decorators",
|
||||
"dynamo/test_deviceguard",
|
||||
"dynamo/test_export",
|
||||
"dynamo/test_export_mutations",
|
||||
"dynamo/test_frame_init",
|
||||
"dynamo/test_fx_passes_pre_grad",
|
||||
"dynamo/test_global",
|
||||
"dynamo/test_guard_manager",
|
||||
"dynamo/test_higher_order_ops",
|
||||
"dynamo/test_hooks",
|
||||
"dynamo/test_input_attr_tracking",
|
||||
"dynamo/test_interop",
|
||||
"dynamo/test_logging",
|
||||
"dynamo/test_minifier",
|
||||
"dynamo/test_model_output",
|
||||
"dynamo/test_modes",
|
||||
"dynamo/test_modules",
|
||||
"dynamo/test_nops",
|
||||
"dynamo/test_optimizers",
|
||||
"dynamo/test_pre_dispatch",
|
||||
"dynamo/test_profiler",
|
||||
"dynamo/test_python_autograd",
|
||||
"dynamo/test_recompiles",
|
||||
"dynamo/test_recompile_ux",
|
||||
"dynamo/test_reconstruct",
|
||||
"dynamo/test_reorder_logs",
|
||||
"dynamo/test_repros",
|
||||
"dynamo/test_resume",
|
||||
"dynamo/test_sdpa",
|
||||
"dynamo/test_skip_non_tensor",
|
||||
"dynamo/test_sources",
|
||||
"dynamo/test_structured_trace",
|
||||
"dynamo/test_subclasses",
|
||||
"dynamo/test_subgraphs",
|
||||
"dynamo/test_torchrec",
|
||||
"dynamo/test_unspec",
|
||||
"dynamo/test_utils",
|
||||
"dynamo/test_verify_correctness",
|
||||
"dynamo/test_view",
|
||||
"export/test_db",
|
||||
"export/test_experimental",
|
||||
"export/test_export",
|
||||
"export/test_export_nonstrict",
|
||||
"export/test_export_training_ir_to_run_decomp",
|
||||
"export/test_functionalized_assertions",
|
||||
"export/test_hop",
|
||||
"export/test_lift_unlift",
|
||||
"export/test_passes",
|
||||
"export/test_pass_infra",
|
||||
"export/test_retraceability",
|
||||
"export/test_schema",
|
||||
"export/test_serdes",
|
||||
"export/test_serialize",
|
||||
"export/test_sparse",
|
||||
"export/test_swap",
|
||||
"export/test_tools",
|
||||
"export/test_torchbind",
|
||||
"export/test_tree_utils",
|
||||
"export/test_unflatten",
|
||||
"export/test_unflatten_training_ir",
|
||||
"export/test_verifier",
|
||||
"functorch/test_ac",
|
||||
"functorch/test_control_flow",
|
||||
"functorch/test_eager_transforms",
|
||||
"functorch/test_logging",
|
||||
"functorch/test_minifier",
|
||||
"higher_order_ops/test_with_effects.py",
|
||||
"inductor/test_auto_functionalize",
|
||||
"inductor/test_autoheuristic",
|
||||
"inductor/test_b2b_gemm",
|
||||
"inductor/test_benchmarking",
|
||||
"inductor/test_ck_backend",
|
||||
"inductor/test_codecache",
|
||||
"inductor/test_codegen_triton",
|
||||
"inductor/test_combo_kernels",
|
||||
"inductor/test_compiled_autograd",
|
||||
"inductor/test_compiled_optimizers",
|
||||
"inductor/test_compile_worker",
|
||||
"inductor/test_config",
|
||||
"inductor/test_control_flow",
|
||||
"inductor/test_coordinate_descent_tuner",
|
||||
"inductor/test_cpp_wrapper_hipify",
|
||||
"inductor/test_cpu_cpp_wrapper",
|
||||
"inductor/test_cuda_cpp_wrapper",
|
||||
"inductor/test_cudagraph_trees",
|
||||
"inductor/test_cudagraph_trees_expandable_segments",
|
||||
"inductor/test_cuda_repro",
|
||||
"inductor/test_custom_lowering",
|
||||
"inductor/test_cutlass_backend",
|
||||
"inductor/test_debug_trace",
|
||||
"inductor/test_decompose_mem_bound_mm",
|
||||
"inductor/test_dependencies",
|
||||
"inductor/test_distributed_patterns",
|
||||
"inductor/test_efficient_conv_bn_eval",
|
||||
"inductor/test_extension_backend",
|
||||
"inductor/test_external_callables",
|
||||
"inductor/test_flex_attention",
|
||||
"inductor/test_flex_decoding",
|
||||
"inductor/test_foreach",
|
||||
"inductor/test_fp8",
|
||||
"inductor/test_fx_fusion",
|
||||
"inductor/test_graph_transform_observer",
|
||||
"inductor/test_group_batch_fusion",
|
||||
"inductor/test_halide",
|
||||
"inductor/test_indexing",
|
||||
"inductor/test_inductor_freezing",
|
||||
"inductor/test_loop_ordering",
|
||||
"inductor/test_memory",
|
||||
"inductor/test_memory_planning",
|
||||
"inductor/test_metrics",
|
||||
"inductor/test_minifier",
|
||||
"inductor/test_minifier_isolate",
|
||||
"inductor/test_mmdecomp",
|
||||
"inductor/test_padding",
|
||||
"inductor/test_pad_mm",
|
||||
"inductor/test_profiler",
|
||||
"inductor/test_scatter_optimization",
|
||||
"inductor/test_smoke",
|
||||
"inductor/test_standalone_compile",
|
||||
"inductor/test_torchbind",
|
||||
"inductor/test_triton_cpu_backend",
|
||||
"inductor/test_triton_extension_backend",
|
||||
"inductor/test_triton_heuristics",
|
||||
"inductor/test_triton_kernels",
|
||||
"inductor/test_utils",
|
||||
"inductor/test_xpu_basic",
|
||||
"lazy/test_bindings",
|
||||
"lazy/test_debug_util",
|
||||
"lazy/test_extract_compiled_graph",
|
||||
"lazy/test_functionalization",
|
||||
"lazy/test_generator",
|
||||
"lazy/test_reuse_ir",
|
||||
"lazy/test_step_closures",
|
||||
"lazy/test_ts_opinfo",
|
||||
"nn/test_convolution.py",
|
||||
"nn/test_dropout.py",
|
||||
"nn/test_embedding.py",
|
||||
"nn/test_init.py",
|
||||
"nn/test_lazy_modules.py",
|
||||
"nn/test_load_state_dict.py",
|
||||
"nn/test_module_hooks.py",
|
||||
"nn/test_multihead_attention.py",
|
||||
"nn/test_packed_sequence.py",
|
||||
"nn/test_parametrization.py",
|
||||
"nn/test_pooling.py",
|
||||
"nn/test_pruning.py",
|
||||
"optim/test_lrscheduler",
|
||||
"optim/test_swa_utils",
|
||||
"profiler/test_cpp_thread",
|
||||
"profiler/test_execution_trace",
|
||||
"profiler/test_memory_profiler",
|
||||
"profiler/test_record_function",
|
||||
"profiler/test_torch_tidy",
|
||||
"test_autocast",
|
||||
"test_autograd",
|
||||
"test_autograd_fallback",
|
||||
"test_autoload",
|
||||
"test_autoload_disable",
|
||||
"test_autoload_enable",
|
||||
"test_bundled_inputs",
|
||||
"test_comparison_utils",
|
||||
"test_compile_benchmark_util",
|
||||
"test_complex",
|
||||
"test_content_store",
|
||||
"test_cpp_api_parity",
|
||||
"test_cpp_extensions_aot_ninja",
|
||||
"test_cpp_extensions_aot_no_ninja",
|
||||
"test_cpp_extensions_jit",
|
||||
"test_cpp_extensions_mtia_backend",
|
||||
"test_cpp_extensions_stream_and_event",
|
||||
"test_cuda",
|
||||
"test_cuda_expandable_segments",
|
||||
"test_cuda_multigpu",
|
||||
"test_cuda_nvml_based_avail",
|
||||
"test_cuda_primary_ctx",
|
||||
"test_cuda_sanitizer",
|
||||
"test_cuda_trace",
|
||||
"test_custom_ops",
|
||||
"test_datapipe",
|
||||
"test_deploy",
|
||||
"test_dispatch",
|
||||
"test_dlpack",
|
||||
"test_dynamic_shapes",
|
||||
"test_expanded_weights",
|
||||
"test_fake_tensor",
|
||||
"test_file_check",
|
||||
"test_flop_counter",
|
||||
"test_functionalization",
|
||||
"test_functionalization_of_rng_ops",
|
||||
"test_functional_optim",
|
||||
"test_function_schema",
|
||||
"test_futures",
|
||||
"test_hub",
|
||||
"test_import_stats",
|
||||
"test_indexing",
|
||||
"test_itt",
|
||||
"test_legacy_vmap",
|
||||
"test_logging",
|
||||
"test_masked",
|
||||
"test_maskedtensor",
|
||||
"test_matmul_cuda",
|
||||
"test_mkldnn",
|
||||
"test_mkldnn_fusion",
|
||||
"test_mkldnn_verbose",
|
||||
"test_mkl_verbose",
|
||||
"test_mobile_optimizer",
|
||||
"test_model_dump",
|
||||
"test_model_exports_to_core_aten",
|
||||
"test_module_tracker",
|
||||
"test_monitor",
|
||||
"test_namedtuple_return_api",
|
||||
"test_native_mha",
|
||||
"test_nestedtensor",
|
||||
"test_numba_integration",
|
||||
"test_numpy_interop",
|
||||
"test_openmp",
|
||||
"test_out_dtype_op",
|
||||
"test_overrides",
|
||||
"test_package",
|
||||
"test_per_overload_api",
|
||||
"test_prims",
|
||||
"test_pruning_op",
|
||||
"test_python_dispatch",
|
||||
"test_scatter_gather_ops",
|
||||
"test_segment_reductions",
|
||||
"test_serialization",
|
||||
"test_set_default_mobile_cpu_allocator",
|
||||
"test_shape_ops",
|
||||
"test_show_pickle",
|
||||
"test_sort_and_select",
|
||||
"test_spectral_ops",
|
||||
"test_stateless",
|
||||
"test_subclass",
|
||||
"test_tensorboard",
|
||||
"test_tensor_creation_ops",
|
||||
"test_tensorexpr",
|
||||
"test_tensorexpr_pybind",
|
||||
"test_torch",
|
||||
"test_transformers",
|
||||
"test_type_hints",
|
||||
"test_type_info",
|
||||
"test_type_promotion",
|
||||
"test_typing",
|
||||
"test_utils",
|
||||
"test_utils_internal",
|
||||
"test_view_ops",
|
||||
"test_vulkan",
|
||||
"test_weak",
|
||||
"test_xnnpack_integration",
|
||||
"torch_np/numpy_tests/core/test_dlpack",
|
||||
"torch_np/numpy_tests/core/test_dtype",
|
||||
"torch_np/numpy_tests/core/test_einsum",
|
||||
"torch_np/numpy_tests/core/test_getlimits",
|
||||
"torch_np/numpy_tests/core/test_indexing",
|
||||
"torch_np/numpy_tests/core/test_numeric",
|
||||
"torch_np/numpy_tests/core/test_numerictypes",
|
||||
"torch_np/numpy_tests/core/test_scalar_ctors",
|
||||
"torch_np/numpy_tests/core/test_scalarinherit",
|
||||
"torch_np/numpy_tests/core/test_scalarmath",
|
||||
"torch_np/numpy_tests/core/test_scalar_methods",
|
||||
"torch_np/numpy_tests/core/test_shape_base",
|
||||
"torch_np/numpy_tests/fft/test_helper",
|
||||
"torch_np/numpy_tests/fft/test_pocketfft",
|
||||
"torch_np/numpy_tests/lib/test_arraypad",
|
||||
"torch_np/numpy_tests/lib/test_arraysetops",
|
||||
"torch_np/numpy_tests/lib/test_function_base",
|
||||
"torch_np/numpy_tests/lib/test_histograms",
|
||||
"torch_np/numpy_tests/lib/test_index_tricks",
|
||||
"torch_np/numpy_tests/lib/test_shape_base_",
|
||||
"torch_np/numpy_tests/lib/test_twodim_base",
|
||||
"torch_np/numpy_tests/lib/test_type_check",
|
||||
"torch_np/numpy_tests/linalg/test_linalg",
|
||||
"torch_np/test_basic",
|
||||
"torch_np/test_binary_ufuncs",
|
||||
"torch_np/test_dtype",
|
||||
"torch_np/test_function_base",
|
||||
"torch_np/test_ndarray_methods",
|
||||
"torch_np/test_nep50_examples",
|
||||
"torch_np/test_random",
|
||||
"torch_np/test_reductions",
|
||||
"torch_np/test_scalars_0D_arrays",
|
||||
"torch_np/test_ufuncs_basic",
|
||||
"torch_np/test_unary_ufuncs",
|
||||
"xpu/test_conv.py",
|
||||
"xpu/test_gemm.py",
|
||||
]
|
||||
|
||||
XPU_BLOCKLIST = [
|
||||
"test_autograd",
|
||||
"profiler/test_cpp_thread",
|
||||
@ -1394,6 +1725,10 @@ def can_run_in_pytest(test):
|
||||
def get_selected_tests(options) -> List[str]:
|
||||
selected_tests = options.include
|
||||
|
||||
# for s390x, override defaults
|
||||
if IS_S390X and selected_tests == TESTS:
|
||||
selected_tests = S390X_TESTLIST
|
||||
|
||||
# filter if there's JIT only and distributed only test options
|
||||
if options.jit:
|
||||
selected_tests = list(
|
||||
@ -1496,6 +1831,13 @@ def get_selected_tests(options) -> List[str]:
|
||||
elif TEST_WITH_ROCM:
|
||||
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, "on ROCm")
|
||||
|
||||
elif IS_S390X:
|
||||
selected_tests = exclude_tests(
|
||||
DISTRIBUTED_TESTS,
|
||||
selected_tests,
|
||||
"Skip distributed tests on s390x",
|
||||
)
|
||||
|
||||
# skip all distributed tests if distributed package is not available.
|
||||
if not dist.is_available():
|
||||
selected_tests = exclude_tests(
|
||||
|
@ -77,6 +77,7 @@ from torch.testing._internal.common_utils import (
|
||||
skipIfWindows,
|
||||
slowTest,
|
||||
TestCase,
|
||||
xfailIfS390X,
|
||||
xfailIfTorchDynamo,
|
||||
)
|
||||
from torch.utils._mode_utils import no_dispatch
|
||||
@ -3178,6 +3179,7 @@ class TestAutograd(TestCase):
|
||||
with self.assertRaises(RuntimeError):
|
||||
b.add_(5)
|
||||
|
||||
@xfailIfS390X
|
||||
def test_attribute_deletion(self):
|
||||
x = torch.randn((5, 5), requires_grad=True)
|
||||
del x.grad
|
||||
@ -6770,6 +6772,7 @@ for shape in [(1,), ()]:
|
||||
IS_MACOS,
|
||||
"Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941",
|
||||
)
|
||||
@xfailIfS390X
|
||||
def test_deep_reentrant(self):
|
||||
class DeepReentrant(Function):
|
||||
@staticmethod
|
||||
@ -7172,6 +7175,7 @@ for shape in [(1,), ()]:
|
||||
out = checkpoint(fn, a, use_reentrant=False, debug=True)
|
||||
out.backward()
|
||||
|
||||
@xfailIfS390X
|
||||
def test_access_saved_tensor_twice_without_recomputation_works(self):
|
||||
count = [0]
|
||||
|
||||
@ -8294,6 +8298,7 @@ for shape in [(1,), ()]:
|
||||
c = Func.apply(a)
|
||||
self.assertEqual(repr(c), "tensor([2.], grad_fn=<FuncBackward>)")
|
||||
|
||||
@xfailIfS390X
|
||||
def test_autograd_inplace_view_of_view(self):
|
||||
x = torch.zeros(2)
|
||||
with torch.no_grad():
|
||||
|
@ -14,6 +14,7 @@ from torch.testing._internal.common_utils import (
|
||||
TestCase,
|
||||
TestGradients,
|
||||
unMarkDynamoStrictTest,
|
||||
xfailIfS390X,
|
||||
)
|
||||
from torch.testing._internal.custom_op_db import custom_op_db
|
||||
from torch.testing._internal.hop_db import hop_db
|
||||
@ -28,6 +29,7 @@ _gradcheck_ops = partial(
|
||||
@unMarkDynamoStrictTest
|
||||
class TestBwdGradients(TestGradients):
|
||||
# Tests that gradients are computed correctly
|
||||
@xfailIfS390X
|
||||
@_gradcheck_ops(op_db + hop_db + custom_op_db)
|
||||
def test_fn_grad(self, device, dtype, op):
|
||||
# This is verified by test_dtypes in test_ops.py
|
||||
|
@ -44,6 +44,7 @@ from torch.testing._internal.common_utils import (
|
||||
run_tests,
|
||||
TEST_WITH_TORCHDYNAMO,
|
||||
TestCase,
|
||||
xfailIfS390X,
|
||||
)
|
||||
|
||||
|
||||
@ -580,6 +581,7 @@ class TestOptimRenewed(TestCase):
|
||||
self.assertEqual(complex_steps, real_steps)
|
||||
|
||||
@skipMPS
|
||||
@xfailIfS390X
|
||||
@optims([o for o in optim_db if o.supports_complex], dtypes=[torch.complex64])
|
||||
def test_complex_2d(self, device, dtype, optim_info):
|
||||
optim_cls = optim_info.optim_cls
|
||||
|
@ -45,6 +45,7 @@ from torch.testing._internal.common_utils import (
|
||||
run_tests,
|
||||
TEST_WITH_CROSSREF,
|
||||
TestCase,
|
||||
xfailIfS390X,
|
||||
skipIfTorchDynamo,
|
||||
)
|
||||
|
||||
@ -410,6 +411,7 @@ class TestTensorBoardSummary(BaseTestCase):
|
||||
summary.video('dummy', np.random.rand(20, 7, 1, 8, 8))
|
||||
|
||||
@unittest.skipIf(IS_MACOS, "Skipping on mac, see https://github.com/pytorch/pytorch/pull/109349 ")
|
||||
@xfailIfS390X
|
||||
def test_audio(self):
|
||||
self.assertTrue(compare_proto(summary.audio('dummy', tensor_N(shape=(42,))), self))
|
||||
|
||||
|
@ -1358,6 +1358,7 @@ IS_MACOS = sys.platform == "darwin"
|
||||
IS_PPC = platform.machine() == "ppc64le"
|
||||
IS_X86 = platform.machine() in ('x86_64', 'i386')
|
||||
IS_ARM64 = platform.machine() in ('arm64', 'aarch64')
|
||||
IS_S390X = platform.machine() == "s390x"
|
||||
|
||||
def is_avx512_vnni_supported():
|
||||
if sys.platform != 'linux':
|
||||
@ -1861,6 +1862,9 @@ def runOnRocmArch(arch: Tuple[str, ...]):
|
||||
return wrap_fn
|
||||
return dec_fn
|
||||
|
||||
def xfailIfS390X(func):
|
||||
return unittest.expectedFailure(func) if IS_S390X else func
|
||||
|
||||
def skipIfXpu(func=None, *, msg="test doesn't currently work on the XPU stack"):
|
||||
def dec_fn(fn):
|
||||
reason = f"skipIfXpu: {msg}"
|
||||
|
Reference in New Issue
Block a user