Update torch-xpu-ops commit pin (#140277)

Update the torch-xpu-ops commit to [01f4e29](01f4e293fa), includes:
- Improve XPU operator coverage
- Fix `Werror=comments` relevant building issues

Pull Request resolved: https://github.com/pytorch/pytorch/pull/140277
Approved by: https://github.com/EikanWang, https://github.com/atalman
This commit is contained in:
Yutao Xu
2024-11-13 23:38:51 +00:00
committed by PyTorch MergeBot
parent 2f1dbfea02
commit f1e045eb75
2 changed files with 9 additions and 4 deletions

View File

@ -349,8 +349,8 @@ inductor_expected_failures_single_sample["xpu"] = {
# a deconvolution forward propagation primitive
"nn.functional.conv_transpose2d": {f32, f64},
"nn.functional.conv_transpose3d": {f32, f64},
# rrelu not supported on XPU now
"nn.functional.rrelu": {f16, f32, f64},
# frexp not supported on XPU now
"frexp": {f16, f32, f64},
# not implemented for 'Half'
"sort": {b8},
"argsort": {b8},
@ -559,7 +559,12 @@ inductor_override_kwargs["xpu"] = {
("cumsum", f16): {"reference_in_float": True},
"cumprod": {"reference_in_float": True, "atol": 7e-5, "rtol": 0.002},
("dot", f16): {"atol": 1e-5, "rtol": 0.002},
"logcumsumexp": {"grad_atol": 8e-4, "grad_rtol": 0.001},
"logcumsumexp": {
"atol": 5e-5,
"rtol": 0.005,
"grad_atol": 8e-4,
"grad_rtol": 0.001,
},
"exponential": {"reference_in_float": True},
"geometric": {"reference_in_float": True},
("kron", f16): {"reference_in_float": True},

2
third_party/xpu.txt vendored
View File

@ -1 +1 @@
5e2983143e1485d651227bb992ffbc07d8539370
01f4e293fa39818bd0d018e9bb82d4e2cf54be48