Revert "[inductor] turn on windows inductor UTs (#160161)"

This reverts commit f0980fc0bbd656d6c02d23ad97e945353b314f35.

Reverted https://github.com/pytorch/pytorch/pull/160161 on behalf of https://github.com/clee2000 due to broke some inductor tests on windows inductor\test_codecache.py::TestStandaloneCompile::test_different_process [GH job link](https://github.com/pytorch/pytorch/actions/runs/16853706010/job/47748778757) [HUD commit link](f0980fc0bb).  note to self: bad TD ([comment](https://github.com/pytorch/pytorch/pull/160161#issuecomment-3172784292))
This commit is contained in:
PyTorch MergeBot
2025-08-10 17:33:19 +00:00
parent 0e3e377bd5
commit 7ae0629d64
5 changed files with 10 additions and 17 deletions

View File

@ -123,10 +123,9 @@ jobs:
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
test-matrix: |
{ include: [
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
]}
secrets: inherit

View File

@ -10,7 +10,6 @@ import torch._dynamo.test_case
import torch._dynamo.testing
from torch._dynamo.exc import IncorrectUsage, Unsupported
from torch._dynamo.utils import counters
from torch.testing._internal.common_utils import skipIfWindows
def my_custom_function(x):
@ -893,9 +892,6 @@ class DecoratorTests(torch._dynamo.test_case.TestCase):
self.assertEqual(gn(inp), inp + 3)
self.assertEqual(cnts.frame_count, 1)
@skipIfWindows(
msg="TODO: (xuhancn), confirm if torch.compiler.disable work on Windows."
)
def test_disable_recursive_false(self):
def fn2(x):
return x + 1

View File

@ -21,10 +21,8 @@ from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_cuda import SM90OrLater
from torch.testing._internal.common_utils import (
find_free_port,
IS_WINDOWS,
munge_exc,
skipIfTorchDynamo,
skipIfWindows,
TEST_XPU,
xfailIf,
)
@ -530,7 +528,7 @@ LoweringException: AssertionError:
"import torch",
env=env,
)
lines = stderr.decode().split("\r\n" if IS_WINDOWS else "\n")
lines = stderr.decode().split("\n")
# This is a sanity assert that our error is not spammy.
# As of this test creation this was 18.
# See this issue for the purpose o this test:
@ -546,7 +544,6 @@ LoweringException: AssertionError:
self.assertEqual(lines[-4], "Valid settings:")
@requires_distributed()
@skipIfWindows(msg="TODO: (xuhancn), Can't reproduce locally")
def test_distributed_rank_logging(self):
env = dict(os.environ)
env["TORCH_LOGS"] = "dynamo"

View File

@ -26,7 +26,6 @@ from torch.testing._internal.common_quantized import (
)
from torch.testing._internal.common_utils import (
IS_MACOS,
IS_WINDOWS,
parametrize,
skipIfWindows,
TEST_MKL,
@ -3095,5 +3094,5 @@ instantiate_device_type_tests(
if __name__ == "__main__":
from torch.testing._internal.inductor_utils import HAS_CPU
if HAS_CPU and not (IS_MACOS or IS_WINDOWS):
if HAS_CPU and not IS_MACOS:
run_tests()

View File

@ -41,9 +41,11 @@ def run_tests(needs: Union[str, tuple[str, ...]] = ()) -> None:
if TEST_WITH_TORCHDYNAMO or TEST_WITH_CROSSREF:
return # skip testing
# Enable Inductor UTs on Windows for CPU.
# CUDA on Windows is not verified, NVDA developer can continue to enable CUDA based on CPU path.
if torch.cuda.is_available() and IS_WINDOWS:
if (
not torch.xpu.is_available()
and IS_WINDOWS
and os.environ.get("TORCHINDUCTOR_WINDOWS_TESTS", "0") == "0"
):
return
if isinstance(needs, str):