Remove all uses of AT_CHECK and replace them with TORCH_CHECK (#34846)

Summary:
AT_CHECK has been deprecated and provides no more features than
TORCH_CHECK
Pull Request resolved: https://github.com/pytorch/pytorch/pull/34846

Differential Revision: D20481339

Pulled By: mrshenli

fbshipit-source-id: 1777e769a069a78e03118270294e5e273d516ca7
This commit is contained in:
Hong Xu
2020-03-17 08:55:17 -07:00
committed by Facebook GitHub Bot
parent 76d9e76b4a
commit a8ca340ad6
5 changed files with 25 additions and 37 deletions

View File

@ -447,7 +447,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> miopen_rnn(
fn.tensors.set(input.sizes(), fn_batch_sizes, batch_first); fn.tensors.set(input.sizes(), fn_batch_sizes, batch_first);
if (fn.rnn.rnn_mode != miopenLSTM) { if (fn.rnn.rnn_mode != miopenLSTM) {
AT_CHECK(!cx.defined(), "miopen_rnn: illegal defined cx for non-LSTM RNN."); TORCH_CHECK(!cx.defined(), "miopen_rnn: illegal defined cx for non-LSTM RNN.");
} }
auto is_input_packed = fn.tensors.batch_sizes.size() != 0; auto is_input_packed = fn.tensors.batch_sizes.size() != 0;
@ -458,8 +458,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> miopen_rnn(
auto hidden_size = _hidden_size(fn.rnn, fn.tensors); auto hidden_size = _hidden_size(fn.rnn, fn.tensors);
auto output_size = _output_size(fn.rnn, fn.tensors); auto output_size = _output_size(fn.rnn, fn.tensors);
AT_CHECK(hx.is_contiguous(), "miopen_rnn : hx is not contiguous."); TORCH_CHECK(hx.is_contiguous(), "miopen_rnn : hx is not contiguous.");
AT_CHECK(!cx.defined() || cx.is_contiguous(), "miopen_rnn : cx is not contiguous."); TORCH_CHECK(!cx.defined() || cx.is_contiguous(), "miopen_rnn : cx is not contiguous.");
auto x = input.contiguous(); auto x = input.contiguous();
auto output = at::empty(output_size, input.options()); auto output = at::empty(output_size, input.options());
@ -493,7 +493,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> miopen_rnn(
_copyParams_and_permute(MatrixRef<Tensor>{weight, static_cast<size_t>(weight_stride0)}, _copyParams_and_permute(MatrixRef<Tensor>{weight, static_cast<size_t>(weight_stride0)},
MatrixRef<Tensor>{params, params_stride0}, fn_mode); MatrixRef<Tensor>{params, params_stride0}, fn_mode);
AT_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), "Expected cell size ", IntArrayRef{hidden_size}, ", got", cx.sizes()); TORCH_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), "Expected cell size ", IntArrayRef{hidden_size}, ", got", cx.sizes());
size_t workspace_size; size_t workspace_size;
auto x_descs_arr = descs.get_x_descs(); auto x_descs_arr = descs.get_x_descs();
@ -563,7 +563,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> miopen_rnn_backward_input(
auto handle = getMiopenHandle(); auto handle = getMiopenHandle();
if(fn.rnn.rnn_mode != miopenLSTM) { if(fn.rnn.rnn_mode != miopenLSTM) {
AT_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN"); TORCH_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN");
} }
auto is_input_packed = fn_batch_sizes.size() != 0; auto is_input_packed = fn_batch_sizes.size() != 0;
@ -577,8 +577,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> miopen_rnn_backward_input(
auto hidden_size = _hidden_size(fn.rnn, fn.tensors); auto hidden_size = _hidden_size(fn.rnn, fn.tensors);
auto output_size = _output_size(fn.rnn, fn.tensors); auto output_size = _output_size(fn.rnn, fn.tensors);
AT_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous"); TORCH_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous");
AT_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous"); TORCH_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous");
auto x = input.contiguous(); auto x = input.contiguous();
auto dy = grad_output.contiguous(); auto dy = grad_output.contiguous();
@ -591,23 +591,23 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> miopen_rnn_backward_input(
AT_ASSERTM(cx.defined() || !output_mask[2], "illegally required grad of cx for non-LSTM RNN"); AT_ASSERTM(cx.defined() || !output_mask[2], "illegally required grad of cx for non-LSTM RNN");
auto dcx = cx.defined() ? at::empty(hidden_size, cx.options()) : Tensor(); auto dcx = cx.defined() ? at::empty(hidden_size, cx.options()) : Tensor();
AT_CHECK(fn_train, "miopen RNN backward can only be called in training mode"); TORCH_CHECK(fn_train, "miopen RNN backward can only be called in training mode");
AT_CHECK(input.sizes().equals(input_size), TORCH_CHECK(input.sizes().equals(input_size),
"Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes()); "Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes());
AT_CHECK(output.sizes().equals(output_size), TORCH_CHECK(output.sizes().equals(output_size),
"Expected output size ", IntArrayRef{output_size}, ", got ", output.sizes()); "Expected output size ", IntArrayRef{output_size}, ", got ", output.sizes());
AT_CHECK(!hx.defined() || hx.sizes().equals(hidden_size), TORCH_CHECK(!hx.defined() || hx.sizes().equals(hidden_size),
"Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes()); "Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes());
AT_CHECK(!cx.defined() || cx.sizes().equals(hidden_size), TORCH_CHECK(!cx.defined() || cx.sizes().equals(hidden_size),
"Expected cell size ", IntArrayRef{hidden_size}, ", got ", cx.sizes()); "Expected cell size ", IntArrayRef{hidden_size}, ", got ", cx.sizes());
AT_CHECK(!dhy.defined() || dhy.sizes().equals(hidden_size), TORCH_CHECK(!dhy.defined() || dhy.sizes().equals(hidden_size),
"Expected d_hidden size ", IntArrayRef{hidden_size}, ", got ", dhy.sizes()); "Expected d_hidden size ", IntArrayRef{hidden_size}, ", got ", dhy.sizes());
AT_CHECK(!dcy.defined() || dcy.sizes().equals(hidden_size), TORCH_CHECK(!dcy.defined() || dcy.sizes().equals(hidden_size),
"Expected d_cell size ", IntArrayRef{hidden_size}, ", got ", dcy.sizes()); "Expected d_cell size ", IntArrayRef{hidden_size}, ", got ", dcy.sizes());
AT_CHECK(dhy.is_cuda() && dy.is_cuda() && (!dcy.defined() || dcy.is_cuda()), TORCH_CHECK(dhy.is_cuda() && dy.is_cuda() && (!dcy.defined() || dcy.is_cuda()),
"Gradients aren't HIP tensors"); "Gradients aren't HIP tensors");
miopenRNNAlgo_t algo = miopenRNNdefault; miopenRNNAlgo_t algo = miopenRNNdefault;
@ -679,7 +679,7 @@ std::vector<Tensor> miopen_rnn_backward_weight(
auto handle = getMiopenHandle(); auto handle = getMiopenHandle();
if (fn.rnn.rnn_mode != miopenLSTM) { if (fn.rnn.rnn_mode != miopenLSTM) {
AT_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN"); TORCH_CHECK(!cx.defined(), "rnn: illegal defined cx for non-LSTM RNN");
} }
auto is_input_packed = fn_batch_sizes.size() != 0; auto is_input_packed = fn_batch_sizes.size() != 0;
@ -691,15 +691,15 @@ std::vector<Tensor> miopen_rnn_backward_weight(
auto input_size = _input_size(fn.tensors); auto input_size = _input_size(fn.tensors);
auto hidden_size = _hidden_size(fn.rnn, fn.tensors); auto hidden_size = _hidden_size(fn.rnn, fn.tensors);
AT_CHECK(fn_train, "miopen RNN backward can only be called in training mode"); TORCH_CHECK(fn_train, "miopen RNN backward can only be called in training mode");
AT_CHECK(input.sizes().equals(input_size), TORCH_CHECK(input.sizes().equals(input_size),
"Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes()); "Expected input size ", IntArrayRef{input_size}, ", got ", input.sizes());
AT_CHECK(!hx.defined() || hx.sizes().equals(hidden_size), TORCH_CHECK(!hx.defined() || hx.sizes().equals(hidden_size),
"Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes()); "Expected hidden size ", IntArrayRef{hidden_size}, ", got ", hx.sizes());
AT_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous"); TORCH_CHECK(hx.is_contiguous(), "rnn: hx is not contiguous");
AT_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous"); TORCH_CHECK(!cx.defined() || cx.is_contiguous(), "rnn: cx is not contiguous");
auto x = input.contiguous(); auto x = input.contiguous();
const auto& y = output; const auto& y = output;
@ -808,7 +808,7 @@ std::pair<Tensor, hidden_type> _miopen_impl(
std::tie(hx, cx) = unpack_hidden(hidden); std::tie(hx, cx) = unpack_hidden(hidden);
int64_t hidden_size = hx.size(2); int64_t hidden_size = hx.size(2);
AT_CHECK(_batch_sizes.dim() == 1, "batch_sizes tensor should be 1D"); TORCH_CHECK(_batch_sizes.dim() == 1, "batch_sizes tensor should be 1D");
IntArrayRef batch_sizes { _batch_sizes.data_ptr<int64_t>(), static_cast<size_t>(_batch_sizes.size(0)) }; IntArrayRef batch_sizes { _batch_sizes.data_ptr<int64_t>(), static_cast<size_t>(_batch_sizes.size(0)) };
Tensor dropout_state = at::empty({0}, input.options()); Tensor dropout_state = at::empty({0}, input.options());

View File

@ -369,9 +369,6 @@ C10_DEPRECATED_MESSAGE("AT_INDEX_ERROR(msg) is deprecated, use TORCH_CHECK_INDEX
*/ */
inline void deprecated_AT_INDEX_ERROR() {} inline void deprecated_AT_INDEX_ERROR() {}
C10_DEPRECATED_MESSAGE("AT_CHECK is deprecated, use TORCH_CHECK instead.")
inline void deprecated_AT_CHECK() {}
/* /*
// Deprecation disabled until we fix sites in our codebase // Deprecation disabled until we fix sites in our codebase
C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an internal invariant failure, use " \ C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an internal invariant failure, use " \
@ -390,15 +387,6 @@ inline void deprecated_AT_ASSERTM() {}
}} // namespace c10::detail }} // namespace c10::detail
// Deprecated alias; this alias was deprecated because it wasn't clear to
// people that you should use a macro with AT_ prefix inside the torch/csrc
// directory. Use TORCH_CHECK instead.
#define AT_CHECK(...) \
do { \
::c10::detail::deprecated_AT_CHECK(); \
C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(__VA_ARGS__)); \
} while (false)
// Deprecated alias; this alias was deprecated because people kept mistakenly // Deprecated alias; this alias was deprecated because people kept mistakenly
// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK // using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK
// instead. See https://github.com/pytorch/pytorch/issues/20287 for more details. // instead. See https://github.com/pytorch/pytorch/issues/20287 for more details.

View File

@ -173,7 +173,7 @@ ScalarType infer_scalar_type(PyObject *obj) {
switch (torch::tensors::get_default_scalar_type()) { switch (torch::tensors::get_default_scalar_type()) {
case ScalarType::Float: return ScalarType::ComplexFloat; case ScalarType::Float: return ScalarType::ComplexFloat;
case ScalarType::Double: return ScalarType::ComplexDouble; case ScalarType::Double: return ScalarType::ComplexDouble;
default: AT_CHECK(0, "invalid default scalar type for complex"); default: TORCH_CHECK(false, "invalid default scalar type for complex");
} }
} }
if (THPVariable_Check(obj)) { if (THPVariable_Check(obj)) {

View File

@ -954,7 +954,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
continue; continue;
} }
const auto actual = metadata[i].sizes(); const auto actual = metadata[i].sizes();
AT_CHECK(actual == expected, "Sparse dimensions do not match"); TORCH_CHECK(actual == expected, "Sparse dimensions do not match");
} }
} }

View File

@ -450,7 +450,7 @@ void ProcessGroupNCCL::broadcastUniqueNCCLID(ncclUniqueId* ncclID) {
store_->set(storeKey, vec); store_->set(storeKey, vec);
} else { } else {
auto vec = store_->get(storeKey); auto vec = store_->get(storeKey);
AT_CHECK(vec.size() == NCCL_UNIQUE_ID_BYTES); TORCH_CHECK(vec.size() == NCCL_UNIQUE_ID_BYTES);
std::memcpy(ncclID, vec.data(), vec.size()); std::memcpy(ncclID, vec.data(), vec.size());
} }
} }