[BC-Breaking] Remove long-deprecated casting functions from native_functions.yaml (#164641)

This PR removes `torch._cast_XXX` from generated OPs. They were deprecated in PyTorch 1

Pull Request resolved: https://github.com/pytorch/pytorch/pull/164641
Approved by: https://github.com/albanD, https://github.com/justinchuby
This commit is contained in:
Yuanyuan Chen
2025-10-08 08:27:55 +00:00
committed by PyTorch MergeBot
parent c855f8632e
commit 64108bdbed
15 changed files with 15 additions and 261 deletions

View File

@ -1111,14 +1111,6 @@
"_amp_update_scale_",
"_assert_async",
"_batch_norm_impl_index",
"_cast_Byte",
"_cast_Char",
"_cast_Double",
"_cast_Float",
"_cast_Half",
"_cast_Int",
"_cast_Long",
"_cast_Short",
"_choose_qparams_per_tensor",
"_coalesce",
"_compute_linear_combination",

View File

@ -135,84 +135,6 @@ TEST_F(LazyOpsTest, TestIsSigned) {
});
}
TEST_F(LazyOpsTest, TestCastByte) {
torch::Tensor a =
torch::rand(
{2, 2}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())) *
100.0;
torch::Tensor b = torch::_cast_Byte(a);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor lazy_a = CopyToDevice(a, device);
torch::Tensor lazy_b = torch::_cast_Byte(lazy_a);
AllEqual(b, lazy_b);
});
}
TEST_F(LazyOpsTest, TestCastChar) {
torch::Tensor a =
torch::rand(
{2, 2}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())) *
100.0;
torch::Tensor b = torch::_cast_Char(a);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor lazy_a = CopyToDevice(a, device);
torch::Tensor lazy_b = torch::_cast_Char(lazy_a);
AllEqual(b, lazy_b);
});
}
TEST_F(LazyOpsTest, TestCastShort) {
torch::Tensor a =
torch::rand(
{2, 2}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())) *
100.0;
torch::Tensor b = torch::_cast_Short(a);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor lazy_a = CopyToDevice(a, device);
torch::Tensor lazy_b = torch::_cast_Short(lazy_a);
AllEqual(b, lazy_b);
});
}
TEST_F(LazyOpsTest, TestCastInt) {
torch::Tensor a =
torch::rand(
{2, 2}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())) *
100.0;
torch::Tensor b = torch::_cast_Int(a);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor lazy_a = CopyToDevice(a, device);
torch::Tensor lazy_b = torch::_cast_Int(lazy_a);
AllEqual(b, lazy_b);
});
}
TEST_F(LazyOpsTest, TestCastLong) {
torch::Tensor a =
torch::rand(
{2, 2}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())) *
100.0;
torch::Tensor b = torch::_cast_Long(a);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor lazy_a = CopyToDevice(a, device);
torch::Tensor lazy_b = torch::_cast_Long(lazy_a);
AllEqual(b, lazy_b);
});
}
TEST_F(LazyOpsTest, TestCastFloat) {
torch::Tensor a =
torch::rand(
{2, 2}, torch::TensorOptions(torch::kFloat).device(DefaultDevice())) *
100.0;
torch::Tensor b = torch::_cast_Float(a);
ForEachDevice([&](const torch::Device& device) {
torch::Tensor lazy_a = CopyToDevice(a, device);
torch::Tensor lazy_b = torch::_cast_Float(lazy_a);
AllEqual(b, lazy_b);
});
}
TEST_F(LazyOpsTest, TestRetainType) {
torch::Tensor lazy_a = torch::zeros(
{2, 2}, torch::TensorOptions(torch::kByte).device(torch::kLazy));

View File

@ -141,6 +141,15 @@ ALLOW_LIST = [
("c10d::.*", datetime.date(9999, 1, 1)),
# Previously MPS_only did not support backward
("aten::_fused_rms_norm", datetime.date(2025, 12, 30)),
# These casting ops were deprecated in PyTorch 1
("aten::_cast_Half", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Short", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Long", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Int", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Float", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Double", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Char", datetime.date(9999, 1, 1), None, True),
("aten::_cast_Byte", datetime.date(9999, 1, 1), None, True),
]
ALLOW_LIST_COMPILED = [