mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "fix torch.sparse.log_softmax on CPU (#161959)"
This reverts commit 002e59440afe8711019e68df500f5e18b9a43f3c.
Reverted https://github.com/pytorch/pytorch/pull/161959 on behalf of https://github.com/davidberard98 due to test failure: test_sparse.py::TestSparseMPS::test_log_softmax_float_mps_float32 [GH job link](https://github.com/pytorch/pytorch/actions/runs/17573794461/job/49915138287) [HUD commit link](002e59440a
) ([comment](https://github.com/pytorch/pytorch/pull/161959#issuecomment-3270509418))
This commit is contained in:
@ -2,7 +2,6 @@
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <ATen/Config.h>
|
||||
#include <ATen/Dispatch.h>
|
||||
#include <ATen/AccumulateType.h>
|
||||
#include <ATen/NamedTensorUtils.h>
|
||||
#include <ATen/native/sparse/ParamUtils.h>
|
||||
#include <ATen/native/SparseTensorUtils.h>
|
||||
@ -296,7 +295,6 @@ void cpu_sparse_coo_softmax(Tensor output, const Tensor& input, const int64_t di
|
||||
to exp functions as well as reuse of softmax implementation for
|
||||
log_softmax.
|
||||
*/
|
||||
using accscalar_t = at::acc_type<scalar_t, false>;
|
||||
auto sparse_dim = input.sparse_dim();
|
||||
auto indices = input._indices().contiguous();
|
||||
auto values = input._values().contiguous();
|
||||
@ -342,14 +340,14 @@ void cpu_sparse_coo_softmax(Tensor output, const Tensor& input, const int64_t di
|
||||
continue;
|
||||
|
||||
/* Prepare scratch space */
|
||||
std::vector<accscalar_t> mx_row(nvalues, -std::numeric_limits<accscalar_t>::infinity());
|
||||
std::vector<scalar_t> mx_row(nvalues, -std::numeric_limits<scalar_t>::infinity());
|
||||
std::vector<scalar_t> exp_sums_row(nvalues, 0);
|
||||
|
||||
/* Compute mx */
|
||||
for (int64_t i : pool_indices) {
|
||||
auto values_row = values_accessor[i];
|
||||
for (const auto j : c10::irange(nvalues)) {
|
||||
mx_row[j] = std::max(mx_row[j], accscalar_t(values_row[j]));
|
||||
mx_row[j] = std::max(mx_row[j], values_row[j]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3694,14 +3694,6 @@ class TestSparse(TestSparseBase):
|
||||
self._check_zero_nnz_softmax_op(torch.sparse.log_softmax, 1, device, dtype)
|
||||
self._check_zero_nnz_softmax_op(torch.sparse.log_softmax, 10, device, dtype)
|
||||
|
||||
@dtypes(torch.float)
|
||||
def test_log_softmax_float(self, device, dtype):
|
||||
x = (torch.rand(4, 3, dtype=dtype, device=device) - 10000000.0).to_sparse()
|
||||
out = torch.sparse.log_softmax(x, dim=1).to_dense()
|
||||
x_double = x.double()
|
||||
out_double = torch.sparse.log_softmax(x_double, dim=1).to_dense()
|
||||
self.assertEqual(out, out_double.to(dtype=dtype))
|
||||
|
||||
# TODO: Check after why ROCm's cusparseXcsrgemm2Nnz function doesn't return the same nnz value as CUDA
|
||||
@coalescedonoff
|
||||
@dtypes(*floating_and_complex_types())
|
||||
|
Reference in New Issue
Block a user