Fixed crash when calling pad_packed_tensor when packed with cuda tensors and ensure_sorted=false due to indexing with tensors on different devices (#115028)

Fixes #115027

Fix in csrc as done in the python code [here](https://github.com/pytorch/pytorch/blob/main/torch/nn/utils/rnn.py#L338).

Pull Request resolved: https://github.com/pytorch/pytorch/pull/115028
Approved by: https://github.com/drisspg
This commit is contained in:
Shaltiel Shmidman
2023-12-07 18:09:14 +00:00
committed by PyTorch MergeBot
parent 686a3e0bf0
commit ee8b33f7d5
2 changed files with 14 additions and 1 deletions

View File

@ -797,3 +797,16 @@ TEST_F(RNNTest, CheckErrorInfos) {
ASSERT_THROWS_WITH(GRU(options), "num_layers must be greater than zero");
}
}
// This test assures that pad_packed_sequence does not crash when packed with
// cuda tensors, https://github.com/pytorch/pytorch/issues/115027
TEST_F(RNNTest, CheckPadPackedSequenceWithCudaTensors_CUDA) {
// Create input on the GPU, sample 5x5
auto input = torch::randn({5, 5}).to(at::ScalarType::Float).cuda();
auto lengths = torch::full({5}, 5);
auto packed =
torch::nn::utils::rnn::pack_padded_sequence(input, lengths, false, false);
auto error = torch::nn::utils::rnn::pad_packed_sequence(packed);
}