mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Remove deprecated _aminmax operator (#125995)
It has been deprecated for a long time. Co-authored-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125995 Approved by: https://github.com/ezyang
This commit is contained in:
@ -8,7 +8,6 @@
|
||||
#include <ATen/Functions.h>
|
||||
#include <ATen/NativeFunctions.h>
|
||||
#else
|
||||
#include <ATen/ops/_aminmax_native.h>
|
||||
#include <ATen/ops/aminmax.h>
|
||||
#include <ATen/ops/empty.h>
|
||||
#include <ATen/ops/max.h>
|
||||
@ -66,11 +65,4 @@ Tensor& max_unary_out(const Tensor &self, Tensor& out) {
|
||||
return out;
|
||||
}
|
||||
|
||||
// DEPRECATED: Use at::aminmax instead
|
||||
std::tuple<Tensor, Tensor> _aminmax_all(const Tensor &self) {
|
||||
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
|
||||
" This warning will only appear once per process.");
|
||||
return at::aminmax(self);
|
||||
}
|
||||
|
||||
} // namespace at::native
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include <ATen/Functions.h>
|
||||
#include <ATen/NativeFunctions.h>
|
||||
#else
|
||||
#include <ATen/ops/_aminmax_native.h>
|
||||
#include <ATen/ops/_assert_async_native.h>
|
||||
#include <ATen/ops/_functional_assert_async_native.h>
|
||||
#include <ATen/ops/_print_native.h>
|
||||
@ -682,13 +681,6 @@ std::tuple<Tensor, Tensor> qmin(const Tensor& self, int64_t dim, bool keepdim) {
|
||||
at::_make_per_tensor_quantized_tensor(min, self.q_scale(), self.q_zero_point()), min_indices);
|
||||
}
|
||||
|
||||
// DEPRECATED: Use at::aminmax instead
|
||||
std::tuple<Tensor, Tensor> _aminmax(const Tensor& self, int64_t dim, bool keepdim) {
|
||||
TORCH_WARN_ONCE("_aminmax is deprecated as of PyTorch 1.11 and will be removed in a future release. Use aminmax instead."
|
||||
" This warning will only appear once per process.");
|
||||
return at::aminmax(self, dim, keepdim);
|
||||
}
|
||||
|
||||
TORCH_IMPL_FUNC(clamp_out)
|
||||
(
|
||||
const Tensor& /*self*/,
|
||||
|
@ -3762,18 +3762,6 @@
|
||||
# This function should be deprecated in favor of differential_analytic_matrix_function in FunctionsManual.cpp
|
||||
- func: matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
|
||||
|
||||
# DEPRECATED: Use torch.aminmax instead
|
||||
- func: _aminmax(Tensor self) -> (Tensor, Tensor)
|
||||
dispatch:
|
||||
CPU, CUDA: _aminmax_all
|
||||
autogen: _aminmax.out
|
||||
|
||||
# DEPRECATED: Use torch.aminmax instead
|
||||
- func: _aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)
|
||||
dispatch:
|
||||
CPU, CUDA: _aminmax
|
||||
autogen: _aminmax.dim_out
|
||||
|
||||
- func: aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)
|
||||
device_check: NoCheck # TensorIterator
|
||||
structured_delegate: aminmax.out
|
||||
|
@ -7,7 +7,7 @@
|
||||
#ifndef AT_PER_OPERATOR_HEADERS
|
||||
#include <ATen/Functions.h>
|
||||
#else
|
||||
#include <ATen/ops/_aminmax.h>
|
||||
#include <ATen/ops/aminmax.h>
|
||||
#include <ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h>
|
||||
#include <ATen/ops/fake_quantize_per_channel_affine.h>
|
||||
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask.h>
|
||||
@ -148,7 +148,7 @@ void _calculate_moving_average(
|
||||
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
if (per_row_fq) {
|
||||
std::tie(x_min, x_max) = at::_aminmax(x, 1);
|
||||
std::tie(x_min, x_max) = at::aminmax(x, 1);
|
||||
float* x_min_data = x_min.data_ptr<float>();
|
||||
float* x_max_data = x_max.data_ptr<float>();
|
||||
int num_threads = std::min(size, (int64_t)512);
|
||||
@ -165,7 +165,7 @@ void _calculate_moving_average(
|
||||
size);
|
||||
C10_CUDA_KERNEL_LAUNCH_CHECK();
|
||||
} else {
|
||||
std::tie(x_min, x_max) = at::_aminmax(x);
|
||||
std::tie(x_min, x_max) = at::aminmax(x);
|
||||
float* x_min_data = x_min.data_ptr<float>();
|
||||
float* x_max_data = x_max.data_ptr<float>();
|
||||
// Moving Average Min/Max observer for activations
|
||||
|
@ -1104,7 +1104,6 @@
|
||||
"_add_relu",
|
||||
"_add_relu_",
|
||||
"_addmm_activation",
|
||||
"_aminmax",
|
||||
"_amp_foreach_non_finite_check_and_unscale_",
|
||||
"_amp_update_scale_",
|
||||
"_assert_async",
|
||||
@ -1314,6 +1313,7 @@
|
||||
"_values_copy",
|
||||
"_weight_norm",
|
||||
"_weight_norm_interface",
|
||||
"aminmax",
|
||||
"autocast",
|
||||
"broadcast_shapes",
|
||||
"candidate",
|
||||
|
@ -18,10 +18,6 @@ aten::_add_relu.Tensor
|
||||
aten::_add_relu.out
|
||||
aten::_add_relu_.Scalar
|
||||
aten::_add_relu_.Tensor
|
||||
aten::_aminmax
|
||||
aten::_aminmax.dim
|
||||
aten::_aminmax.dim_out
|
||||
aten::_aminmax.out
|
||||
aten::_amp_foreach_non_finite_check_and_unscale
|
||||
aten::_amp_foreach_non_finite_check_and_unscale.out
|
||||
aten::_amp_foreach_non_finite_check_and_unscale_
|
||||
|
@ -9,6 +9,9 @@ import torch
|
||||
from torch._C import parse_schema
|
||||
|
||||
|
||||
# Run by backwards_compat CI job
|
||||
|
||||
|
||||
# How to run this test locally:
|
||||
# 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly)
|
||||
# one with your local changes (venv_yours).
|
||||
@ -140,6 +143,10 @@ ALLOW_LIST = [
|
||||
("onednn::qconv2d_pointwise", datetime.date(2024, 12, 31)),
|
||||
("onednn::qconv3d_pointwise", datetime.date(2024, 12, 31)),
|
||||
("onednn::qconv2d_pointwise.binary", datetime.date(2024, 12, 31)),
|
||||
("aten::_aminmax", datetime.date(2024, 12, 31)),
|
||||
("aten::_aminmax.dim", datetime.date(2024, 12, 31)),
|
||||
("aten::_aminmax.out", datetime.date(2024, 12, 31)),
|
||||
("aten::_aminmax.dim_out", datetime.date(2024, 12, 31)),
|
||||
]
|
||||
|
||||
ALLOW_LIST_COMPILED = [
|
||||
|
@ -1048,7 +1048,6 @@ uncovered_ops:
|
||||
aten::__is__: 83
|
||||
aten::__isnot__: 81
|
||||
aten::__not__: 32
|
||||
aten::_aminmax: 4
|
||||
aten::_convolution: 12
|
||||
aten::_convolution.deprecated: 3
|
||||
aten::_make_per_tensor_quantized_tensor: 2
|
||||
|
@ -30,7 +30,6 @@ root_operators:
|
||||
aten::__range_length: 106
|
||||
aten::__rshift__.int: 2
|
||||
aten::__xor__.bool: 16
|
||||
aten::_aminmax: 18
|
||||
aten::_convolution: 27
|
||||
aten::_convolution.deprecated: 3
|
||||
aten::_infer_size: 9
|
||||
|
@ -1219,18 +1219,10 @@ class TestReductions(TestCase):
|
||||
def test_aminmax(self, device, dtype):
|
||||
|
||||
def _amin_wrapper(x, dim=None, keepdims=False):
|
||||
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
|
||||
if dim is None:
|
||||
return torch._aminmax(x)[0]
|
||||
else:
|
||||
return torch._aminmax(x, dim, keepdims)[0]
|
||||
return torch.aminmax(x, dim=dim, keepdim=keepdims)[0]
|
||||
|
||||
def _amax_wrapper(x, dim=None, keepdims=False):
|
||||
with self.assertWarnsOnceRegex(UserWarning, "_aminmax is deprecated"):
|
||||
if dim is None:
|
||||
return torch._aminmax(x)[1]
|
||||
else:
|
||||
return torch._aminmax(x, dim, keepdims)[1]
|
||||
return torch.aminmax(x, dim=dim, keepdim=keepdims)[1]
|
||||
|
||||
self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype)
|
||||
self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype)
|
||||
|
@ -373,7 +373,6 @@ torch_c_binding_in_graph_functions = dict.fromkeys(
|
||||
"torch._add_relu_",
|
||||
"torch._add_relu",
|
||||
"torch._addmm_activation",
|
||||
"torch._aminmax",
|
||||
"torch._amp_foreach_non_finite_check_and_unscale_",
|
||||
"torch._amp_update_scale_",
|
||||
"torch._assert_async",
|
||||
|
Reference in New Issue
Block a user