Add einops x torch.compile testing in PyTorch CI (#157416)

Fixes #146782. This PR adds testing for multiple einops versions in
PyTorch CI. This occurs in a new "einops" CI job that runs for both
Python 3.9 and 3.13 (aka, what we test Dynamo over).

Test Plan:
- wait for CI

Pull Request resolved: https://github.com/pytorch/pytorch/pull/157416
Approved by: https://github.com/guilhermeleobas, https://github.com/arogozhnikov, https://github.com/anijain2305
This commit is contained in:
rzou
2025-07-03 05:39:51 -07:00
committed by PyTorch MergeBot
parent 660dbea909
commit e3fe001d9e
4 changed files with 191 additions and 0 deletions

View File

@ -368,6 +368,16 @@ test_dynamo_wrapped_shard() {
assert_git_not_dirty
}
test_einops() {
pip install einops==0.6.1
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
pip install einops==0.7.0
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
pip install einops==0.8.1
time python test/run_test.py --einops --verbose --upload-artifacts-while-running
assert_git_not_dirty
}
test_inductor_distributed() {
# Smuggle a few multi-gpu tests here so that we don't have to request another large node
@ -1690,6 +1700,8 @@ elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
test_inductor_distributed
fi
fi
elif [[ "${TEST_CONFIG}" == *einops* ]]; then
test_einops
elif [[ "${TEST_CONFIG}" == *dynamo_wrapped* ]]; then
install_torchvision
test_dynamo_wrapped_shard "${SHARD_NUMBER}"

View File

@ -201,6 +201,7 @@ jobs:
{ config: "dynamo_wrapped", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "einops", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }
]}
secrets: inherit
@ -236,6 +237,7 @@ jobs:
{ config: "dynamo_wrapped", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "dynamo_wrapped", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
{ config: "einops", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" }
]}
secrets: inherit

158
test/dynamo/test_einops.py Normal file
View File

@ -0,0 +1,158 @@
# Owner(s): ["module: dynamo"]
import importlib
import subprocess
import sys
import unittest
import torch
import torch._dynamo.config
import torch._dynamo.test_case
from torch import nn
from torch._dynamo.test_case import TestCase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
)
HAS_EINOPS = importlib.util.find_spec("einops")
if HAS_EINOPS:
import einops
einops_version = einops.__version__
else:
einops_version = "none"
einops_version_sanitized = einops_version.replace(".", "_")
@unittest.skipIf(not HAS_EINOPS, "these tests require einops")
class TestEinops(TestCase):
"""
These tests adapted from similar tests in the einops repo.
https://github.com/arogozhnikov/einops/blob/main/einops/tests/test_other.py#L254
The goal of this test suite is to test torch.compile x einops for multiple
versions of einops. Our goal is to prevent regressions in einops from changes
in PyTorch.
"""
@unittest.skipIf(
einops_version == "0.6.1", "https://github.com/pytorch/pytorch/issues/157417"
)
@parametrize("version", [einops_version_sanitized])
def test_functions(self, version):
from einops import einsum, pack, rearrange, reduce, repeat, unpack
class TorchModuleWithOperations(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x_abc, suffix=""):
a, b, c = x_abc.shape
def suf(pattern):
parts = pattern.split()
return " ".join(
[p if p[-1] not in "acd" else p + suffix for p in parts]
)
# patterns look a bit strange because names a, c, d will be modified on every run
# by suf function
x_abcd = repeat(x_abc, suf("a b c -> a b c 4"))
x_abc = reduce(x_abcd, suf("a b c d -> a b c"), "min")
x_abdc, ps = pack([x_abc] * (2 + len(suffix)), suf("a b * c"))
x_array = unpack(
rearrange(x_abdc, suf("a b d c -> (a b ) 1 c d")), ps, "ab one1 c *"
)
x1 = x_array[0] + len(x_array)
x1 = rearrange(x1, suf("(a b ) 1 c -> a b c"), b=b)
addition = einsum(x_abc, x_abcd, suf("a b c , a b c d -> d"))[0]
return x1 + addition
original = TorchModuleWithOperations()
# Einops only interacts with Dynamo but we test backend="inductor" just in case
compiled = torch.compile(original, backend="inductor", fullgraph=True)
for size in [10, 20, 40]:
x = torch.rand([size, size + 1, size + 2])
for suffix in ["", "suf1", "other_suffix"]:
result1 = compiled(x, suffix)
result2 = original(x.double(), suffix).float()
self.assertEqual(result1, result2)
@parametrize("version", [einops_version_sanitized])
def test_layers(self, version):
from einops.layers.torch import EinMix, Rearrange, Reduce
original = nn.Sequential(
Rearrange("b (t c) -> b t c", c=16),
EinMix(
"b t c -> qkv b t cout",
weight_shape="qkv c cout",
bias_shape="qkv cout",
qkv=3,
c=16,
cout=8,
),
Reduce("qkv b t cout -> b t qkv", "min", cout=8),
)
# Einops only interacts with Dynamo but we test backend="inductor" just in case
compiled = torch.compile(original, backend="inductor", fullgraph=True)
for size in [16, 32, 64]:
x = torch.rand([size, size])
result1 = original(x)
result2 = compiled(x.double()).float()
self.assertEqual(result1, result2)
@parametrize("version", [einops_version_sanitized])
def test_no_recompile_on_lazy_state(self, version):
"""einops has some lazy state that gets initialized the first time an API
is called. This should not trigger a recompile."""
script = """\
import torch
import torch.nn as nn
from einops import einsum, pack, reduce, repeat, unpack, rearrange
class TorchModuleWithOperations(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x_abc, suffix=""):
a, b, c = x_abc.shape
def suf(pattern):
parts = pattern.split()
return " ".join([p if p[-1] not in "acd" else p + suffix for p in parts])
# patterns look a bit strange because names a, c, d will be modified on every run
# by suf function
x_abcd = repeat(x_abc, suf("a b c -> a b c 4"))
x_abc = reduce(x_abcd, suf("a b c d -> a b c"), "min")
x_abdc, ps = pack([x_abc] * (2 + len(suffix)), suf("a b * c"))
x_array = unpack(rearrange(x_abdc, suf("a b d c -> (a b ) 1 c d")), ps, "ab one1 c *")
x1 = x_array[0] + len(x_array)
x1 = rearrange(x1, suf("(a b ) 1 c -> a b c"), b=b)
addition = einsum(x_abc, x_abcd, suf("a b c , a b c d -> d"))[0]
return x1 + addition
compiled_fn = torch.compile(TorchModuleWithOperations(), fullgraph=True)
x = torch.arange(2 * 3 * 5).view(2, 3, 5)
y = compiled_fn(x)
# Should not recompile!
with torch.compiler.set_stance("fail_on_recompile"):
z = compiled_fn(x)
"""
subprocess.check_output([sys.executable, "-c", script])
instantiate_parametrized_tests(
TestEinops,
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()

View File

@ -1295,6 +1295,16 @@ def parse_args():
"(including functorch tests)."
),
)
parser.add_argument(
"--einops",
"--einops",
action="store_true",
help=(
"If this flag is present, we will only run einops tests. "
"If this flag is not present, we will run all tests "
"(including einops tests)."
),
)
parser.add_argument(
"--mps",
"--mps",
@ -1546,6 +1556,15 @@ def get_selected_tests(options) -> list[str]:
filter(lambda test_name: test_name in FUNCTORCH_TESTS, selected_tests)
)
# Filter to only run einops tests when --einops option is specified
if options.einops:
selected_tests = list(
filter(
lambda test_name: test_name.startswith("test/dynamo/test_einops"),
selected_tests,
)
)
if options.cpp:
selected_tests = list(
filter(lambda test_name: test_name in CPP_TESTS, selected_tests)