mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fixed crash when calling pad_packed_tensor when packed with cuda tensors and ensure_sorted=false due to indexing with tensors on different devices (#115028)
Fixes #115027 Fix in csrc as done in the python code [here](https://github.com/pytorch/pytorch/blob/main/torch/nn/utils/rnn.py#L338). Pull Request resolved: https://github.com/pytorch/pytorch/pull/115028 Approved by: https://github.com/drisspg
This commit is contained in:
committed by
PyTorch MergeBot
parent
686a3e0bf0
commit
ee8b33f7d5
@ -797,3 +797,16 @@ TEST_F(RNNTest, CheckErrorInfos) {
|
||||
ASSERT_THROWS_WITH(GRU(options), "num_layers must be greater than zero");
|
||||
}
|
||||
}
|
||||
|
||||
// This test assures that pad_packed_sequence does not crash when packed with
|
||||
// cuda tensors, https://github.com/pytorch/pytorch/issues/115027
|
||||
TEST_F(RNNTest, CheckPadPackedSequenceWithCudaTensors_CUDA) {
|
||||
// Create input on the GPU, sample 5x5
|
||||
auto input = torch::randn({5, 5}).to(at::ScalarType::Float).cuda();
|
||||
auto lengths = torch::full({5}, 5);
|
||||
|
||||
auto packed =
|
||||
torch::nn::utils::rnn::pack_padded_sequence(input, lengths, false, false);
|
||||
|
||||
auto error = torch::nn::utils::rnn::pad_packed_sequence(packed);
|
||||
}
|
||||
|
Reference in New Issue
Block a user