[inductor] Increase tolerance for test_comprehensive_nn_functional_linear_cuda_float16 (#156962)

Fixes #156514

Pull Request resolved: https://github.com/pytorch/pytorch/pull/156962
Approved by: https://github.com/jamesjwu
This commit is contained in:
Jason Ansel
2025-06-26 09:00:34 -07:00
committed by PyTorch MergeBot
parent c27f83dd91
commit 12cb06e574

View File

@ -429,6 +429,7 @@ inductor_override_kwargs["cuda"] = {
("nn.functional.batch_norm.without_cudnn", f16): {"reference_in_float": True},
("nn.functional.cosine_similarity", f16): {"reference_in_float": True},
("nn.functional.instance_norm", f16): {"reference_in_float": True},
("nn.functional.linear", f16): {"atol": 3e-4, "rtol": 0.01},
("nn.functional.local_response_norm", f16): {"reference_in_float": True},
("nn.functional.normalize", f16): {"atol": 1e-3, "rtol": 0.05},
("nn.functional.rms_norm", f16): {"reference_in_float": True},