mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This reverts commit 0116ffae7f94f35a2c712e186a0b371959b68c64. Reverted https://github.com/pytorch/pytorch/pull/125995 on behalf of https://github.com/huydhn due to Sorry for reverting your change but we need to reland this after I get rid of all usage of _aminmax internally in Meta ([comment](https://github.com/pytorch/pytorch/pull/125995#issuecomment-2113769497))
77 lines
2.3 KiB
C++
77 lines
2.3 KiB
C++
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
|
#include <ATen/native/ReduceAllOps.h>
|
|
#include <ATen/native/Resize.h>
|
|
|
|
#include <ATen/core/Tensor.h>
|
|
|
|
#ifndef AT_PER_OPERATOR_HEADERS
|
|
#include <ATen/Functions.h>
|
|
#include <ATen/NativeFunctions.h>
|
|
#else
|
|
#include <ATen/ops/_aminmax_native.h>
|
|
#include <ATen/ops/aminmax.h>
|
|
#include <ATen/ops/empty.h>
|
|
#include <ATen/ops/max.h>
|
|
#include <ATen/ops/max_native.h>
|
|
#include <ATen/ops/min.h>
|
|
#include <ATen/ops/min_native.h>
|
|
#endif
|
|
|
|
namespace at::native {
|
|
|
|
DEFINE_DISPATCH(min_all_stub);
|
|
DEFINE_DISPATCH(max_all_stub);
|
|
|
|
Tensor min(const Tensor &self) {
|
|
TORCH_CHECK(self.numel() > 0,
|
|
"min(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.");
|
|
Tensor result = at::empty({}, self.options());
|
|
min_all_stub(self.device().type(), result, self.contiguous());
|
|
return result;
|
|
}
|
|
|
|
Tensor& min_unary_out(const Tensor &self, Tensor& out) {
|
|
// First check if the devices match (CPU vs GPU)
|
|
TORCH_CHECK(self.device() == out.device());
|
|
|
|
TORCH_CHECK(canCast(
|
|
typeMetaToScalarType(self.dtype()),
|
|
typeMetaToScalarType(out.dtype())));
|
|
|
|
at::native::resize_output(out, {});
|
|
|
|
min_all_stub(self.device().type(), out, self.contiguous());
|
|
return out;
|
|
}
|
|
|
|
Tensor max(const Tensor &self) {
|
|
TORCH_CHECK(self.numel() > 0,
|
|
"max(): Expected reduction dim to be specified for input.numel() == 0. Specify the reduction dim with the 'dim' argument.");
|
|
Tensor result = at::empty({}, self.options());
|
|
max_all_stub(self.device().type(), result, self.contiguous());
|
|
return result;
|
|
}
|
|
|
|
Tensor& max_unary_out(const Tensor &self, Tensor& out) {
|
|
// First check if the devices match (CPU vs GPU)
|
|
TORCH_CHECK(self.device() == out.device());
|
|
|
|
TORCH_CHECK(canCast(
|
|
typeMetaToScalarType(self.dtype()),
|
|
typeMetaToScalarType(out.dtype())));
|
|
|
|
at::native::resize_output(out, {});
|
|
|
|
max_all_stub(self.device().type(), out, self.contiguous());
|
|
return out;
|
|
}
|
|
|
|
// DEPRECATED: Use at::aminmax instead
|
|
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
|
|
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
|
|
" This warning will only appear once per process.");
|
|
return at::aminmax(self);
|
|
}
|
|
|
|
} // namespace at::native
|