Remove deprecated alias macro(1/3) (#137556)

**Detailed Descriptions:**
- Remove AT_ERROR Macro

Pull Request resolved: https://github.com/pytorch/pytorch/pull/137556
Approved by: https://github.com/ezyang
This commit is contained in:
FFFrog
2024-10-21 14:55:07 +08:00
committed by PyTorch MergeBot
parent 16caa8c1b3
commit af0bc75460
153 changed files with 521 additions and 418 deletions

View File

@ -136,7 +136,7 @@ inline bool _apply_preamble(ArrayRef<Tensor> tensors) {
checkDeviceType("CPU_tensor_apply", tensors, kCPU);
checkLayout("CPU_tensor_apply", tensors, kStrided);
if (!_all_equal_numel(tensors))
AT_ERROR(_all_equal_numel_error(tensors));
TORCH_CHECK(false, _all_equal_numel_error(tensors));
// An empty tensor has no elements
for (auto& t : tensors)
if (t.numel() == 0)

View File

@ -12,11 +12,11 @@
namespace at {
static cpu_fixed_malloc(void*, ptrdiff_t) {
AT_ERROR("attempting to resize a tensor view of an external blob");
TORCH_CHECK(false, "attempting to resize a tensor view of an external blob");
}
static cpu_fixed_realloc(void*, void*, ptrdiff_t) {
AT_ERROR("attempting to resize a tensor view of an external blob");
TORCH_CHECK(false, "attempting to resize a tensor view of an external blob");
}
static cpu_fixed_free(void* state, void* allocation) {

View File

@ -189,7 +189,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
double_normal_sample = std::optional<double>(legacy_pod->normal_y);
}
} else {
AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
TORCH_CHECK(false, "Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
" or a CPUGeneratorImplState of size ", size_current,
" but found the input RNG state size to be ", new_state_size);
}

View File

@ -67,8 +67,10 @@ class TORCH_API Context {
} else if (device_type == at::kHIP) {
return at::detail::getHIPHooks();
} else {
AT_ERROR(
c10::DeviceTypeName(device_type), " device type not an accelerator.");
TORCH_CHECK(
false,
c10::DeviceTypeName(device_type),
" device type not an accelerator.");
}
}

View File

@ -55,7 +55,8 @@ TORCH_API void record_kernel_function_dtype(std::string name);
do { \
if constexpr (!at::should_include_kernel_dtype( \
at_dispatch_name, enum_type)) { \
AT_ERROR( \
TORCH_CHECK( \
false, \
"dtype '", \
toString(enum_type), \
"' not selected for kernel tag ", \
@ -220,7 +221,8 @@ inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
switch (_st) { \
__VA_ARGS__ \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
'"', \
at_dispatch_name, \
"\" not implemented for '", \

View File

@ -78,7 +78,7 @@ inline void check_defined(
const char* api_name) {
for (auto& t : tensors) {
if (!t.get().defined()) {
AT_ERROR(api_name, "(...) called with an undefined Tensor");
TORCH_CHECK(false, api_name, "(...) called with an undefined Tensor");
}
}
}

View File

@ -33,7 +33,7 @@ inline void infer_size_impl(
} else if (shape[dim] >= 0) {
newsize *= shape[dim];
} else {
AT_ERROR("invalid shape dimension ", shape[dim]);
TORCH_CHECK(false, "invalid shape dimension ", shape[dim]);
}
}

View File

@ -45,15 +45,15 @@ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
}
void set_size(int64_t dim, int64_t new_size) override {
AT_ERROR("opaque tensors do not have set_size");
TORCH_CHECK(false, "opaque tensors do not have set_size");
}
void set_stride(int64_t dim, int64_t new_stride) override {
AT_ERROR("opaque tensors do not have set_stride");
TORCH_CHECK(false, "opaque tensors do not have set_stride");
}
void set_storage_offset(int64_t storage_offset) override {
AT_ERROR("opaque tensors do not have set_storage_offset");
TORCH_CHECK(false, "opaque tensors do not have set_storage_offset");
}
#ifdef DEBUG

View File

@ -23,7 +23,8 @@
case kSparseBsc: \
return __VA_ARGS__(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse compressed tensor layout but got ", \
the_layout); \
@ -42,7 +43,8 @@
case kSparseBsc: \
return (COLUMN_DIM_ACTION)(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse compressed tensor layout but got ", \
the_layout); \
@ -61,7 +63,8 @@
case kSparseBsc: \
return (BLOCK_ACTION)(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse compressed tensor layout but got ", \
the_layout); \
@ -77,7 +80,8 @@
case kSparseBsr: \
return (ROW_DIM_ACTION)(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse row compressed tensor layout but got ", \
the_layout); \
@ -93,7 +97,8 @@
case kSparseBsc: \
return (COL_DIM_ACTION)(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse column compressed tensor layout but got ", \
the_layout); \
@ -108,7 +113,8 @@
case kSparseCsc: \
return (ACTION)(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse compressed (non-block) tensor layout but got ", \
the_layout); \
@ -123,7 +129,8 @@
case kSparseBsc: \
return (ACTION)(); \
default: \
AT_ERROR( \
TORCH_CHECK( \
false, \
NAME, \
" expected sparse compressed block tensor layout but got ", \
the_layout); \

View File

@ -57,13 +57,13 @@ void SparseTensorImpl::release_resources() {
}
void SparseTensorImpl::set_size(int64_t dim, int64_t new_size) {
AT_ERROR("sparse tensors do not have set_size");
TORCH_CHECK(false, "sparse tensors do not have set_size");
}
void SparseTensorImpl::set_stride(int64_t dim, int64_t new_stride) {
AT_ERROR("sparse tensors do not have set_stride");
TORCH_CHECK(false, "sparse tensors do not have set_stride");
}
void SparseTensorImpl::set_storage_offset(int64_t storage_offset) {
AT_ERROR("sparse tensors do not have set_storage_offset");
TORCH_CHECK(false, "sparse tensors do not have set_storage_offset");
}
#ifdef DEBUG
bool SparseTensorImpl::has_storage() const {

View File

@ -155,7 +155,7 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
}
oss << "but expected " << ((!t1->is_cpu() && !t2->is_cpu()) ? "them" : "it")
<< " to be on GPU (while checking arguments for " << c << ")";
AT_ERROR(oss.str());
TORCH_CHECK(false, oss.str());
}
TORCH_CHECK(
t1->get_device() == t2->get_device(),
@ -200,7 +200,7 @@ void checkScalarTypes(CheckedFrom c, const TensorArg& t,
}
oss << "; but got " << t->toString()
<< " instead (while checking arguments for " << c << ")";
AT_ERROR(oss.str());
TORCH_CHECK(false, oss.str());
}
}

View File

@ -36,7 +36,8 @@ inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
for (const auto i : c10::irange(tensors.size())) {
const auto& expr = tensors[i];
if (expr.layout() != Layout::Strided) {
AT_ERROR(
TORCH_CHECK(
false,
"Expected dense tensor but got ",
expr.layout(),
" for sequence element ",
@ -48,7 +49,8 @@ inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
"'");
}
if (expr.device().type() != device_type) {
AT_ERROR(
TORCH_CHECK(
false,
"Expected object of device type ",
device_type,
" but got device type ",
@ -62,7 +64,8 @@ inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
"'");
}
if (expr.scalar_type() != scalar_type) {
AT_ERROR(
TORCH_CHECK(
false,
"Expected object of scalar type ",
scalar_type,
" but got scalar type ",
@ -96,7 +99,8 @@ std::array<int64_t, N> check_intlist(
return res;
}
if (list.size() != N) {
AT_ERROR(
TORCH_CHECK(
false,
"Expected a list of ",
N,
" ints but got ",

View File

@ -149,7 +149,7 @@ Banned functions
*******************************/
static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional<Tensor>&, int64_t) {
AT_ERROR("torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
TORCH_CHECK(false, "torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
"Many models use a sigmoid layer right before the binary cross entropy layer.\n"
"In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n"
"or torch.nn.BCEWithLogitsLoss. binary_cross_entropy_with_logits and BCEWithLogits are\n"

View File

@ -211,7 +211,7 @@ inline at::ScalarType prioritize(
const Tensor& nextArg,
c10::DeviceType device_type = c10::DeviceType::CUDA) {
if (current == at::kDouble) {
AT_ERROR("promote type is double in at::autocast::prioritize");
TORCH_CHECK(false, "promote type is double in at::autocast::prioritize");
return current;
}
at::ScalarType lower_precision_fp =
@ -225,7 +225,8 @@ inline at::ScalarType prioritize(
} else if (current == lower_precision_fp && next == lower_precision_fp) {
return lower_precision_fp;
} else {
AT_ERROR("Unexpected floating ScalarType in at::autocast::prioritize");
TORCH_CHECK(
false, "Unexpected floating ScalarType in at::autocast::prioritize");
return current;
}
} else {

View File

@ -28,7 +28,7 @@ struct TORCH_API EnumType : public NamedType {
std::move(enum_names_values),
std::move(cu)));
default:
AT_ERROR(
TORCH_CHECK(false,
"Cannot create Enum with value type '",
value->str(),
"', only int, float and string are supported");

View File

@ -55,7 +55,7 @@ inline void FunctionSchema::checkAndNormalizeInputs(
inputs.push_back(*argument.default_value());
continue;
}
AT_ERROR(
TORCH_CHECK(false,
name(),
"() is missing value for argument '",
argument.name(),

View File

@ -756,7 +756,7 @@ IValueComparator getLessThanComparator(const IValue& v) {
torch::jit::Function* lt_func =
checkObjectSortSchema(v.type()->expect<ClassType>(), why_not);
if (!lt_func) {
AT_ERROR(why_not.str());
TORCH_CHECK(false, why_not.str());
}
return [lt_func](const IValue& a, const IValue& b) {
@ -772,7 +772,7 @@ IValueComparator getLessThanComparator(const IValue& v) {
};
}
AT_ERROR("IValues of type: ", v.tagKind(), " are not comparable");
TORCH_CHECK(false, "IValues of type: ", v.tagKind(), " are not comparable");
}
IValueComparator getGreaterThanComparator(const IValue& v) {
@ -967,7 +967,7 @@ IValue IValue::deepcopy(
copy = *this;
} break;
default: {
AT_ERROR("Can't deepcopy IValue with tag: ", tagKind());
TORCH_CHECK(false, "Can't deepcopy IValue with tag: ", tagKind());
}
}
// NB: this doesn't work if an object contains itself, and it may
@ -1050,7 +1050,7 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(
}
err << ". Please define serialization methods via def_pickle() for "
"this class.";
AT_ERROR(err.str());
TORCH_CHECK(false, err.str());
}
object->setSlot(i, slots_[i].deepcopy(memo, device));
}

View File

@ -938,7 +938,7 @@ struct TORCH_API DictType : public SharedType {
case TypeKind::DeviceObjType:
return DictTypePtr(new DictType(std::move(key), std::move(value)));
default:
AT_ERROR(
TORCH_CHECK(false,
"Cannot create dict for key type '",
key->str(),
"', only int, float, complex, Tensor, device and string keys are supported");

View File

@ -585,7 +585,7 @@ struct TORCH_API Type {
virtual TypePtr createWithContained(
// NOLINTNEXTLINE(performance-unnecessary-value-param)
std::vector<TypePtr> /*contained_types*/) const {
AT_ERROR(
TORCH_CHECK(false,
"type with contained types did not overload createWithContained: ",
str());
}

View File

@ -629,7 +629,7 @@ MatchTypeReturn matchTypeVariables(
}
}
AT_ERROR("Unhandled free variable container: ", formal->repr_str());
TORCH_CHECK(false, "Unhandled free variable container: ", formal->repr_str());
}
// change return types like List[List[t]] into List[List[int]]

View File

@ -34,7 +34,7 @@ static rocblas_operation hipOperationToRocOperation(hipblasOperation_t op)
case HIPBLAS_OP_C:
return rocblas_operation_conjugate_transpose;
}
AT_ERROR("HIPBLAS_STATUS_INVALID_ENUM");
TORCH_CHECK(false, "HIPBLAS_STATUS_INVALID_ENUM");
}
static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
{
@ -57,7 +57,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
case rocblas_status_internal_error:
return HIPBLAS_STATUS_INTERNAL_ERROR;
}
AT_ERROR("HIPBLAS_STATUS_INVALID_ENUM");
TORCH_CHECK(false, "HIPBLAS_STATUS_INVALID_ENUM");
}
// hipblas does not have hipblasSetMathMode
#define hipblasSetMathMode(handle, flags) HIPBLAS_STATUS_SUCCESS
@ -116,7 +116,7 @@ static cublasOperation_t _cublasOpFromChar(char op) {
case 'C':
return CUBLAS_OP_C;
}
AT_ERROR(
TORCH_CHECK(false,
"_cublasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
}

View File

@ -165,9 +165,9 @@ constexpr const char* _cusolver_backend_suggestion = \
[[maybe_unused]] CUresult get_error_str_err = \
at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
if (get_error_str_err != CUDA_SUCCESS) { \
AT_ERROR("CUDA driver error: unknown error"); \
TORCH_CHECK(false, "CUDA driver error: unknown error"); \
} else { \
AT_ERROR("CUDA driver error: ", err_str); \
TORCH_CHECK(false, "CUDA driver error: ", err_str); \
} \
} \
} while (0)
@ -178,7 +178,7 @@ constexpr const char* _cusolver_backend_suggestion = \
do { \
CUresult __err = EXPR; \
if (__err != CUDA_SUCCESS) { \
AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \
TORCH_CHECK(false, "CUDA driver error: ", static_cast<int>(__err)); \
} \
} while (0)
@ -198,9 +198,9 @@ constexpr const char* _cusolver_backend_suggestion = \
nvrtcResult __err = EXPR; \
if (__err != NVRTC_SUCCESS) { \
if (static_cast<int>(__err) != 7) { \
AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \
TORCH_CHECK(false, "CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \
} else { \
AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \
TORCH_CHECK(false, "CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \
} \
} \
} while (0)

View File

@ -300,7 +300,7 @@ long CUDAHooks::versionCuDNN() const {
#if AT_CUDNN_ENABLED()
return CUDNN_VERSION;
#else
AT_ERROR("Cannot query CuDNN version if ATen_cuda is not built with CuDNN");
TORCH_CHECK(false, "Cannot query CuDNN version if ATen_cuda is not built with CuDNN");
#endif
}
@ -408,7 +408,7 @@ double CUDAHooks::batchnormMinEpsilonCuDNN() const {
#if AT_CUDNN_ENABLED()
return CUDNN_BN_MIN_EPSILON;
#else
AT_ERROR(
TORCH_CHECK(false,
"Cannot query CUDNN_BN_MIN_EPSILON if ATen_cuda is not built with CuDNN");
#endif
}

View File

@ -310,7 +310,7 @@ static hipblasOperation_t _hipblasOpFromChar(char op) {
case 'C':
return HIPBLAS_OP_C;
}
AT_ERROR(
TORCH_CHECK(false,
"_hipblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
}
@ -323,7 +323,7 @@ static char _charFromhipblasOp(hipblasOperation_t op) {
case HIPBLAS_OP_C:
return 'C';
}
AT_ERROR(
TORCH_CHECK(false,
"_charFromhipblasOp input should be HIPBLAS_OP_N/T/C but got `", op, "`");
}

View File

@ -130,7 +130,7 @@ static rocblas_operation _rocblasOpFromChar(char op) {
case 'C':
return rocblas_operation_conjugate_transpose;
}
AT_ERROR(
TORCH_CHECK(false,
"_rocblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
}

View File

@ -113,7 +113,7 @@ _cudnn_rnn_cast_reflatten(const Tensor & input,
batch_sizes,
dropout_state);
#else // AT_CUDNN_ENABLED()
AT_ERROR("autocast::_cudnn_rnn_cast_reflatten: ATen not compiled with cuDNN support");
TORCH_CHECK(false, "autocast::_cudnn_rnn_cast_reflatten: ATen not compiled with cuDNN support");
return {Tensor{}, Tensor{}, Tensor{}, Tensor{}, Tensor{}}; // never reached, placates the compiler
#endif // AT_CUDNN_ENABLED()
}

View File

@ -42,7 +42,7 @@ struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface {
}
Allocator* getPinnedMemoryAllocator() const override {
AT_ERROR("Pinned memory requires HIP.");
TORCH_CHECK(false, "Pinned memory requires HIP.");
}
virtual int getNumGPUs() const {
@ -50,7 +50,7 @@ struct TORCH_API HIPHooksInterface : AcceleratorHooksInterface {
}
bool hasPrimaryContext(DeviceIndex device_index) const override {
AT_ERROR("Cannot check primary context without ATen_hip library.");
TORCH_CHECK(false, "Cannot check primary context without ATen_hip library.");
}
};

View File

@ -224,7 +224,7 @@ static Tensor one_hot_decomposition_hack(const Tensor &self, int64_t num_classes
// but shape inference is not possible.
if (self.sym_numel() == 0) {
if (num_classes <= 0) {
AT_ERROR("Can not infer total number of classes from empty tensor.");
TORCH_CHECK(false, "Can not infer total number of classes from empty tensor.");
} else {
shape.emplace_back(num_classes);
return at::empty_symint(shape, self.options());

View File

@ -16,7 +16,7 @@ at::Tensor& metal_copy_(at::Tensor& self, const at::Tensor& src) {
if (p) {
return p->metal_copy_(self, src);
}
AT_ERROR("Metal backend was not linked to the build");
TORCH_CHECK(false, "Metal backend was not linked to the build");
}
} // namespace at::metal

View File

@ -46,7 +46,7 @@ miopen_rnn(const Tensor & input_r,
fn_dropout_state_opt);
#else
AT_ERROR("autocast::miopen_rnn: ATen not compiled with ROCm enabled");
TORCH_CHECK(false, "autocast::miopen_rnn: ATen not compiled with ROCm enabled");
return {Tensor{}, Tensor{}, Tensor{}, Tensor{}, Tensor{}}; // placate the compiler
#endif

View File

@ -189,7 +189,7 @@ void MPSProfiler::initialize() {
currentSigint.sa_flags = SA_RESTART;
sigfillset(&currentSigint.sa_mask);
if (sigaction(SIGINT, &currentSigint, &previousSigint) == -1) {
AT_ERROR("Cannot install SIGINT handler for MPSProfiler.");
TORCH_CHECK(false, "Cannot install SIGINT handler for MPSProfiler.");
}
}
}
@ -207,7 +207,7 @@ void MPSProfiler::StartTrace(const std::string& mode, bool waitUntilCompleted) {
} else if (token == "event") {
m_profile_options |= ProfileOptions::ALL_SIGNPOST_EVENTS;
} else {
AT_ERROR("Invalid Signpost trace mode: ", token);
TORCH_CHECK(false, "Invalid Signpost trace mode: ", token);
}
}
}
@ -654,7 +654,7 @@ bool MPSProfiler::isProfileInfoLoggingEnabled(BaseInfo::Type infoType, bool isEx
isInfoLoggingEnabled = (m_log_options & LogOptions::CPU_FALLBACK_INFO);
break;
default:
AT_ERROR("invalid profiling info type");
TORCH_CHECK(false, "invalid profiling info type");
}
if (!isInfoLoggingEnabled) {
return false;
@ -685,7 +685,7 @@ void MPSProfiler::emitSignpostEvent(SignpostTypes signpost_type,
os_signpost_event_emit(m_os_log_events, signpost_id, kEvtSignpostCPUFallbacksStr, "%s", msg);
break;
default:
AT_ERROR("unknown SignpostType in MPS profiler");
TORCH_CHECK(false, "unknown SignpostType in MPS profiler");
}
}
@ -709,7 +709,7 @@ void MPSProfiler::beginSignpostInterval(SignpostTypes signpost_type,
os_signpost_interval_begin(m_os_log_intervals, signpost_id, kIntSignpostCPUFallbacksStr, "%s", msg);
break;
default:
AT_ERROR("unknown SignpostType in MPS profiler");
TORCH_CHECK(false, "unknown SignpostType in MPS profiler");
}
}
@ -728,7 +728,7 @@ void MPSProfiler::endSignpostInterval(SignpostTypes signpost_type, os_signpost_i
os_signpost_interval_end(m_os_log_intervals, signpost_id, kIntSignpostCPUFallbacksStr);
break;
default:
AT_ERROR("unknown SignpostType in MPS profiler");
TORCH_CHECK(false, "unknown SignpostType in MPS profiler");
}
}
@ -750,7 +750,7 @@ MPSProfiler::SignpostTypes MPSProfiler::getSignpostType(BaseInfo::Type infoType)
case BaseInfo::Type::CPU_FALLBACK:
return SignpostTypes::CPU_FALLBACK;
default:
AT_ERROR("invalid profiling info type");
TORCH_CHECK(false, "invalid profiling info type");
}
}

View File

@ -1624,7 +1624,7 @@ Tensor inverse(const Tensor& A) {
template<typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, Tensor& infos) {
#if !AT_BUILD_WITH_LAPACK()
AT_ERROR("cholesky_solve: LAPACK library not found in compilation");
TORCH_CHECK(false, "cholesky_solve: LAPACK library not found in compilation");
#else
char uplo = upper ? 'U' : 'L';

View File

@ -168,7 +168,7 @@ static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, co
ss << arg_name << " should be greater than zero but got (";
std::copy(args.begin(), args.end() - 1, std::ostream_iterator<int>(ss,", "));
ss << args.back() << ")" << " (while checking arguments for " << c << ")";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}

View File

@ -719,7 +719,7 @@ static void check_shape_forward(const at::Tensor& input,
separator = " x ";
}
AT_ERROR("Calculated padded input size per channel: (", input_ss.str(), "). "
TORCH_CHECK(false, "Calculated padded input size per channel: (", input_ss.str(), "). "
"Kernel size: (", kernel_ss.str(), "). Kernel size can't be greater than actual input size");
}
} else { // transposed
@ -1304,7 +1304,7 @@ ConvBackend _select_conv_backend(
}
// Error out if no suitable backend was found.
AT_ERROR("unsupported ConvNd parameters");
TORCH_CHECK(false, "unsupported ConvNd parameters");
}
// Selects a backend for convolution based on the inputs and params.

View File

@ -262,7 +262,7 @@ void* DispatchStubImpl::get_call_ptr(
false, "DispatchStub: missing kernel for ", device_type);
return nullptr;
case ErrorType::DeviceNotSupported:
AT_ERROR("DispatchStub: unsupported device type", device_type);
TORCH_CHECK(false, "DispatchStub: unsupported device type", device_type);
}
}

View File

@ -81,7 +81,7 @@ Tensor embedding_sparse_backward(
// TODO: implement scale_grad_by_freq
if (scale_grad_by_freq) {
AT_ERROR(
TORCH_CHECK(false,
"embedding_backward: scale_grad_by_freq not supported with sparse gradients");
}

View File

@ -104,7 +104,7 @@ Tensor& fill_diagonal_(Tensor& self, const Scalar& fill_value, bool wrap) {
int64_t dim1 = height;
for (const auto i : c10::irange(1, nDims)) {
if (self.size(i) != dim1) {
AT_ERROR("all dimensions of input must be of equal length");
TORCH_CHECK(false, "all dimensions of input must be of equal length");
}
}
}

View File

@ -269,7 +269,7 @@ inline double _get_epsilon(const ScalarType& sc_type) {
case at::ScalarType::Double:
return std::numeric_limits<double>::epsilon();
default:
AT_ERROR("This function doesn't handle types other than float and double");
TORCH_CHECK(false, "This function doesn't handle types other than float and double");
}
}

View File

@ -136,7 +136,7 @@ static void max_unpooling3d_shape_check(
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
TORCH_CHECK(false,
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",

View File

@ -85,7 +85,7 @@ static inline void slow_conv_transpose2d_shape_check(
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
TORCH_CHECK(false, "weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
@ -112,7 +112,7 @@ static inline void slow_conv_transpose2d_shape_check(
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
TORCH_CHECK(false,
"Given input size per channel: (",
input_height,
" x ",

View File

@ -107,7 +107,7 @@ static inline void slow_conv_transpose3d_shape_check(
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
TORCH_CHECK(false, "weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
@ -142,7 +142,7 @@ static inline void slow_conv_transpose3d_shape_check(
output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
TORCH_CHECK(false,
"Given input size per channel: (",
input_depth,
" x ",

View File

@ -573,12 +573,12 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t> _batch_norm_impl_index(
if (running_mean.defined()) {
check_dims_match_num_input_features("running_mean", num_features, running_mean.sym_numel());
} else if (!training) {
AT_ERROR("running_mean must be defined in evaluation mode");
TORCH_CHECK(false, "running_mean must be defined in evaluation mode");
}
if (running_var.defined()) {
check_dims_match_num_input_features("running_var", num_features, running_var.sym_numel());
} else if (!training) {
AT_ERROR("running_var must be defined in evaluation mode");
TORCH_CHECK(false, "running_var must be defined in evaluation mode");
}
if (weight.defined()) {
check_dims_match_num_input_features("weight", num_features, weight.sym_numel());

View File

@ -34,7 +34,7 @@ Tensor one_hot(const Tensor &self, int64_t num_classes) {
// but shape inference is not possible.
if (self.numel() == 0) {
if (num_classes <= 0) {
AT_ERROR("Can not infer total number of classes from empty tensor.");
TORCH_CHECK(false, "Can not infer total number of classes from empty tensor.");
} else {
shape.push_back(num_classes);
return at::empty(shape, self.options());

View File

@ -51,7 +51,7 @@ std::tuple<Tensor, Tensor> _pack_padded_sequence(const Tensor& _input, const Ten
// NB: enforce_sorted is implemented at a Python level, but the sortedness
// check lives here. If enforce_sorted=False then this error should never
// get called.
AT_ERROR("`lengths` array must be sorted in decreasing order when "
TORCH_CHECK(false, "`lengths` array must be sorted in decreasing order when "
"`enforce_sorted` is True. You can pass `enforce_sorted=False` "
"to pack_padded_sequence and/or pack_sequence to sidestep this "
"requirement if you do not need ONNX exportability.");

View File

@ -83,7 +83,7 @@ Tensor repeat_interleave_symint(
repeats.sym_size(0), " and input.size(", dim.value(), ") = ", input.sym_size(dim.value())
);
} else {
AT_ERROR("repeats must be 0-dim or 1-dim tensor");
TORCH_CHECK(false, "repeats must be 0-dim or 1-dim tensor");
}
auto ret = input.index_select(

View File

@ -881,12 +881,12 @@ Tensor stft(const Tensor& self, const int64_t n_fft, const std::optional<int64_t
if (!at::isFloatingType(self.scalar_type()) && !at::isComplexType(self.scalar_type())) {
std::ostringstream ss;
REPR(ss) << ": expected a tensor of floating point or complex values";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (self.dim() > 2 || self.dim() < 1) {
std::ostringstream ss;
REPR(ss) << ": expected a 1D or 2D tensor";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
Tensor input = self;
if (self.dim() == 1) {
@ -911,24 +911,24 @@ Tensor stft(const Tensor& self, const int64_t n_fft, const std::optional<int64_t
std::ostringstream ss;
REPR(ss) << ": expected 0 < n_fft < " << len
<< ", but got n_fft=" << win_length;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (hop_length <= 0) {
std::ostringstream ss;
REPR(ss) << ": expected hop_length > 0, but got hop_length=" << hop_length;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (win_length <= 0 || win_length > n_fft) {
std::ostringstream ss;
REPR(ss) << ": expected 0 < win_length <= n_fft, but got win_length="
<< win_length;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (window.defined() && (window.dim() != 1 || window.size(0) != win_length)) {
std::ostringstream ss;
REPR(ss) << ": expected a 1D window tensor of size equal to win_length="
<< win_length << ", but got window with size " << window.sizes();
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
#undef REPR
auto window_ = window;
@ -1063,17 +1063,17 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const std::optional<int64_
if (input.numel() == 0) {
std::ostringstream ss;
REPR(ss) << ": input tensor cannot be empty.";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (input_dim != 3 && input_dim != 4) {
std::ostringstream ss;
REPR(ss) << ": expected a tensor with 3 or 4 dimensions, but got " << input_dim;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (input.size(-1) != 2) {
std::ostringstream ss;
REPR(ss) << ": expected the last dimension to be 2 (corresponding to real and imaginary parts), but got " << self.size(-1);
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
const bool onesided = onesidedOpt.value_or(fft_size != n_fft);
@ -1081,32 +1081,32 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const std::optional<int64_
if (n_fft / 2 + 1 != fft_size) {
std::ostringstream ss;
REPR(ss) << ": expected the frequency dimension (3rd to the last) of the input tensor to match n_fft / 2 + 1 when onesided=True, but got " << fft_size;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
} else {
if (n_fft != fft_size) {
std::ostringstream ss;
REPR(ss) << ": expected the frequency dimension (3rd to the last) of the input tensor to match n_fft when onesided=False, but got " << fft_size;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}
if (!(0 < hop_length && hop_length <= win_length)) {
std::ostringstream ss;
REPR(ss) << ": expected 0 < hop_length <= win_length";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (!(0 < win_length && win_length <= n_fft)) {
std::ostringstream ss;
REPR(ss) << ": expected 0 < win_length <= n_fft";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
if (window.defined()) {
if (window.dim() != 1 || window.size(0) != win_length) {
std::ostringstream ss;
REPR(ss) << ": Invalid window shape. window has to be 1D and length of `win_length`";
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}
@ -1175,7 +1175,7 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const std::optional<int64_
if (at::is_scalar_tensor_true(window_envelop_lowest)) {
std::ostringstream ss;
REPR(ss) << "window overlap add min: " << window_envelop_lowest;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
y = (y / window_envelop); // size: (channel, expected_output_signal_len)

View File

@ -63,7 +63,7 @@ inline int64_t infer_ft_complex_to_real_onesided_size(int64_t complex_size,
std::ostringstream ss;
ss << "expected real signal size " << expected_size << " is incompatible "
<< "with onesided complex frequency size " << complex_size;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}

View File

@ -26,19 +26,19 @@ Tensor _bincount_cpu_template(
const Tensor& weights,
int64_t minlength) {
if (minlength < 0) {
AT_ERROR("minlength should be >= 0");
TORCH_CHECK(false, "minlength should be >= 0");
}
if (self.dim() == 1 && self.numel() == 0) {
return at::zeros({minlength}, kLong);
}
if (self.dim() != 1 || *self.min().data_ptr<input_t>() < 0) {
AT_ERROR("bincount only supports 1-d non-negative integral inputs.");
TORCH_CHECK(false, "bincount only supports 1-d non-negative integral inputs.");
}
// Ensure max_val < 2 ^ 63 - 1 (9223372036854775807)
auto max_val = *self.max().data_ptr<input_t>();
if (max_val >= std::numeric_limits<int64_t>::max()) {
AT_ERROR(
TORCH_CHECK(false,
"maximum value of input overflowed, it should be < ",
std::numeric_limits<int64_t>::max(),
" but got ",
@ -48,7 +48,7 @@ Tensor _bincount_cpu_template(
bool has_weights = weights.defined();
if (has_weights && (weights.dim() != 1 || weights.size(0) != self.size(0))) {
AT_ERROR("weights should be 1-d and have the same length as input");
TORCH_CHECK(false, "weights should be 1-d and have the same length as input");
}
Tensor output;

View File

@ -588,7 +588,7 @@ Tensor to_dense_backward(const Tensor& grad, const Tensor& input_, std::optional
case kMkldnn:
return grad.to_mkldnn(input_.scalar_type());
default:
AT_ERROR("to_dense_backward: Unsupported input layout: ", input_layout);
TORCH_CHECK(false, "to_dense_backward: Unsupported input layout: ", input_layout);
return Tensor{};
}
}
@ -928,23 +928,23 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self,
auto layout_from_valid = layout_from == kStrided || layout_from == kSparse || at::sparse_csr::is_sparse_compressed(layout_from);
if (!layout_from_valid) {
AT_ERROR(funcname, ": unexpected source layout ", layout_from);
TORCH_CHECK(false, funcname, ": unexpected source layout ", layout_from);
}
if (layout_from == kStrided) {
if (sparse_dim == 0 && self.dim() > 0) {
AT_ERROR(funcname, ": sparse_dim argument must be in >0 when self.dim()>0");
TORCH_CHECK(false, funcname, ": sparse_dim argument must be in >0 when self.dim()>0");
}
if (sparse_dim < 0 || sparse_dim > self.dim()) {
AT_ERROR(funcname, ": sparse_dim argument must be in [0,", self.dim(), "] range, but ", sparse_dim, " is given");
TORCH_CHECK(false, funcname, ": sparse_dim argument must be in [0,", self.dim(), "] range, but ", sparse_dim, " is given");
}
} else if (layout_from == kSparse) {
if (sparse_dim != self.sparse_dim()) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", kSparse, " with sparse_dim argument !=self.sparse_dim() is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", kSparse, " with sparse_dim argument !=self.sparse_dim() is not supported");
}
} else if (at::sparse_csr::is_sparse_compressed(layout_from)) {
if (sparse_dim != 2) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", kSparse, " with sparse_dim argument !=2 is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", kSparse, " with sparse_dim argument !=2 is not supported");
}
}
}
@ -956,40 +956,40 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self,
auto layout_from_valid = layout_from == kStrided || layout_from == kSparse || at::sparse_csr::is_sparse_compressed(layout_from);
if (!layout_from_valid) {
AT_ERROR(funcname, ": unexpected source layout ", layout_from);
TORCH_CHECK(false, funcname, ": unexpected source layout ", layout_from);
}
auto layout_to_valid = layout_to == kStrided || layout_to == kSparse || at::sparse_csr::is_sparse_compressed(layout_to);
if (!layout_to_valid) {
AT_ERROR(funcname, ": unexpected source layout ", layout_from);
TORCH_CHECK(false, funcname, ": unexpected source layout ", layout_from);
}
if (layout_from == kSparse && layout_to != kSparse) {
if (self.sparse_dim() != 2) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " for input tensors with sparse_dim()!=2 is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", layout_to, " for input tensors with sparse_dim()!=2 is not supported");
}
}
if ((layout_from == kSparseCsr || layout_from == kSparseCsc) &&
(layout_to == kSparseBsr || layout_to == kSparseBsc)) {
if (sparse_csr::numBatchDimensions(self) > 0) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " for batched inputs is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", layout_to, " for batched inputs is not supported");
}
}
if (blocksize.has_value()) {
if (blocksize.value().size() != 2) {
AT_ERROR(funcname, ": blocksize needs to be a tuple of size 2, but got ", blocksize.value().size());
TORCH_CHECK(false, funcname, ": blocksize needs to be a tuple of size 2, but got ", blocksize.value().size());
}
auto blocksize_to = *blocksize;
if (blocksize_to[0] <= 0 || blocksize_to[1] <= 0) {
AT_ERROR(funcname, ": blocksize needs to be positive, but got ", blocksize_to);
TORCH_CHECK(false, funcname, ": blocksize needs to be positive, but got ", blocksize_to);
}
if (layout_to == kSparseBsr || layout_to == kSparseBsc) {
if (layout_from == kSparseBsr || layout_from == kSparseBsc) {
auto blocksize_from = at::sparse_csr::getBlockSize(self);
if (!(blocksize_to == blocksize_from)) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " with blocksize changed from ", blocksize_from, " to ", blocksize_to, " is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", layout_to, " with blocksize changed from ", blocksize_from, " to ", blocksize_to, " is not supported");
}
} else {
auto dense_dim = (layout_from == kStrided) ? dense_dim_opt.value_or(0) : self.dense_dim();
@ -997,35 +997,35 @@ void _to_sparse_check_arguments(const std::string& funcname, const Tensor& self,
auto sparse_col_dim = -(dense_dim + 1);
if ((self.size(sparse_row_dim) % blocksize_to[0] != 0) ||
(self.size(sparse_col_dim) % blocksize_to[1] != 0)) {
AT_ERROR(funcname, ": tensor sparse size (", self.size(sparse_row_dim), ",", self.size(sparse_row_dim), ") must be divisible by given blocksize (", blocksize_to[0], ",", blocksize_to[1], ")");
TORCH_CHECK(false, funcname, ": tensor sparse size (", self.size(sparse_row_dim), ",", self.size(sparse_row_dim), ") must be divisible by given blocksize (", blocksize_to[0], ",", blocksize_to[1], ")");
}
}
} else {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " with blocksize argument given is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", layout_to, " with blocksize argument given is not supported");
}
} else {
if ((layout_to == kSparseBsr || layout_to == kSparseBsc) &&
!(layout_from == kSparseBsr && layout_from == kSparseBsc)) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " without blocksize argument given is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", layout_to, " without blocksize argument given is not supported");
}
}
if (dense_dim_opt.has_value()) {
if (layout_from != kStrided) {
AT_ERROR(funcname, ": conversion from ", layout_from, " to ", layout_to, " with dense_dim argument given is not supported");
TORCH_CHECK(false, funcname, ": conversion from ", layout_from, " to ", layout_to, " with dense_dim argument given is not supported");
}
auto dense_dim = *dense_dim_opt;
if (layout_to == kSparse) {
if (dense_dim == self.dim() && self.dim() > 0) {
AT_ERROR(funcname, ": dense_dim argument must be !=self.dim() when self.dim()>0");
TORCH_CHECK(false, funcname, ": dense_dim argument must be !=self.dim() when self.dim()>0");
}
if (dense_dim < 0 || dense_dim > self.dim()) {
AT_ERROR(funcname, ": dense_dim argument must be in [0,", self.dim(), "] range, but ", dense_dim, " is given");
TORCH_CHECK(false, funcname, ": dense_dim argument must be in [0,", self.dim(), "] range, but ", dense_dim, " is given");
}
} else {
if (dense_dim < 0 || dense_dim > self.dim() - 2) {
AT_ERROR(funcname, ": dense_dim argument must be in [0,", self.dim() - 2, "] range, but ", dense_dim, " is given");
TORCH_CHECK(false, funcname, ": dense_dim argument must be in [0,", self.dim() - 2, "] range, but ", dense_dim, " is given");
}
}
}
@ -1129,7 +1129,7 @@ Tensor dense_to_sparse_with_mask(const Tensor& self, const Tensor& mask, std::op
break;
}
AT_ERROR("dense_to_sparse_with_mask: ", self.layout(), " to ", layout_to, " conversion not supported");
TORCH_CHECK(false, "dense_to_sparse_with_mask: ", self.layout(), " to ", layout_to, " conversion not supported");
return Tensor{};
}
@ -1181,7 +1181,7 @@ Tensor dense_to_sparse(const Tensor& self, std::optional<c10::Layout> layout, Op
break;
}
AT_ERROR("dense_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
TORCH_CHECK(false, "dense_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
return Tensor{};
}
@ -1440,7 +1440,7 @@ Tensor sparse_compressed_to_sparse_csr(const Tensor& self, std::optional<int64_t
return sparse_compressed_to_flipped(self, std::nullopt, "to_sparse_csr");
}
AT_ERROR("sparse_compressed_to_sparse_csr: expected SparseCsr or SparseCsc layout but got ", self.layout());
TORCH_CHECK(false, "sparse_compressed_to_sparse_csr: expected SparseCsr or SparseCsc layout but got ", self.layout());
return Tensor{};
}
@ -1453,7 +1453,7 @@ Tensor sparse_compressed_to_sparse_csc(const Tensor& self, std::optional<int64_t
return sparse_compressed_to_flipped(self, std::nullopt, "to_sparse_csc");
}
AT_ERROR("sparse_compressed_to_sparse_csc: expected SparseCsr or SparseCsc layout but got ", self.layout());
TORCH_CHECK(false, "sparse_compressed_to_sparse_csc: expected SparseCsr or SparseCsc layout but got ", self.layout());
return Tensor{};
}
@ -1828,7 +1828,7 @@ Tensor sparse_compressed_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize
return self.to_sparse_csr(dense_dim_opt).to_sparse_bsr(blocksize);
}
AT_ERROR("sparse_compressed_to_sparse_bsr: expected SparseCsr, SparseCsc, SparseBsr or SparseBsc layout but got ", self.layout());
TORCH_CHECK(false, "sparse_compressed_to_sparse_bsr: expected SparseCsr, SparseCsc, SparseBsr or SparseBsc layout but got ", self.layout());
return Tensor{};
}
@ -1850,14 +1850,14 @@ Tensor sparse_compressed_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize
return self.to_sparse_csc(dense_dim_opt).to_sparse_bsc(blocksize);
}
AT_ERROR("sparse_compressed_to_sparse_bsc: expected SparseCsr, SparseCsc, SparseBsr or SparseBsc layout but got ", self.layout());
TORCH_CHECK(false, "sparse_compressed_to_sparse_bsc: expected SparseCsr, SparseCsc, SparseBsr or SparseBsc layout but got ", self.layout());
return Tensor{};
}
Tensor sparse_coo_to_sparse(const Tensor& self, const int64_t sparse_dim) {
_to_sparse_check_arguments("sparse_coo_to_sparse", self, sparse_dim);
AT_ERROR("sparse_coo_to_sparse: ", self.layout(), " to ", kSparse, " conversion not supported");
TORCH_CHECK(false, "sparse_coo_to_sparse: ", self.layout(), " to ", kSparse, " conversion not supported");
return Tensor{};
}
@ -1927,7 +1927,7 @@ Tensor sparse_compressed_to_sparse(const Tensor& self, std::optional<c10::Layout
break;
}
AT_ERROR("sparse_compressed_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
TORCH_CHECK(false, "sparse_compressed_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
return Tensor{};
}
@ -1951,7 +1951,7 @@ Tensor sparse_coo_to_sparse(const Tensor& self, std::optional<c10::Layout> layou
break;
}
AT_ERROR("sparse_coo_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
TORCH_CHECK(false, "sparse_coo_to_sparse: ", self.layout(), " to ", layout_to, " conversion not supported");
return Tensor{};
}

View File

@ -101,7 +101,7 @@ bool cudnn_is_acceptable(const Tensor& self) {
Tensor & detach_(Tensor & self) {
// this just exists to give us a hook in VariableType and an entry in Declarations.yaml
//AT_ERROR("detach_ is not implemented for Tensor");
//TORCH_CHECK(false, "detach_ is not implemented for Tensor");
return self;
}

View File

@ -83,11 +83,11 @@ void cpu_max_unpool(
if (optional_error_index) {
if constexpr (is_3d) {
AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
TORCH_CHECK(false, "Found an invalid max index: ", optional_error_index.value(),
" (output volumes are of size ", output_depth,
"x", output_height, "x", output_width);
} else {
AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
TORCH_CHECK(false, "Found an invalid max index: ", optional_error_index.value(),
" (output volumes are of size ", output_height,
"x", output_width);
}
@ -151,7 +151,7 @@ void cpu_max_unpool_channels_last(
});
if (optional_error_index) {
AT_ERROR("Found an invalid max index: ", optional_error_index.value(),
TORCH_CHECK(false, "Found an invalid max index: ", optional_error_index.value(),
" (output volumes are of size ", output_height,
"x", output_width, ")");
}
@ -223,12 +223,12 @@ void cpu_max_unpool_backward(
if (optional_error_index) {
if (is_3d) {
AT_ERROR("invalid max index ", optional_error_index.value(),
TORCH_CHECK(false, "invalid max index ", optional_error_index.value(),
", odepth= ", output_depth,
", owidth= ", output_width,
", oheight= ", output_height);
} else {
AT_ERROR("invalid max index ", optional_error_index.value(),
TORCH_CHECK(false, "invalid max index ", optional_error_index.value(),
", owidth= ", output_width,
", oheight= ", output_height);
}

View File

@ -66,7 +66,7 @@ static inline void CUFFT_CHECK(cufftResult error)
if (error != CUFFT_SUCCESS) {
std::ostringstream ss;
ss << "cuFFT error: " << _cudaGetErrorEnum(error);
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}

View File

@ -462,7 +462,7 @@ Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &ind
padding_idx);
default:
AT_ERROR(
TORCH_CHECK(false,
"Unknown mode for embedding_bag_backward_cuda ", mode);
}
}

View File

@ -267,7 +267,7 @@ static void max_unpooling3d_shape_check(
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
TORCH_CHECK(false,
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
@ -447,7 +447,7 @@ at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
TORCH_CHECK(false,
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",

View File

@ -164,7 +164,7 @@ mixed_dtypes_linear_dispatch_bias_activation(
ElementInputB,
fastertransformer::EpilogueOpNoBias>(input, weight, scale, bias);
}
AT_ERROR("mixed_dtypes_linear_dispatch_bias_activation: Activation \"",
TORCH_CHECK(false, "mixed_dtypes_linear_dispatch_bias_activation: Activation \"",
activation, "\" is not supported");
return Tensor{};
}
@ -185,7 +185,7 @@ mixed_dtypes_linear_dispatch_bias_activation(
ElementInputB,
fastertransformer::EpilogueOpBiasSilu>(input, weight, scale, bias);
}
AT_ERROR("mixed_dtypes_linear_dispatch_bias_activation: Activation \"",
TORCH_CHECK(false, "mixed_dtypes_linear_dispatch_bias_activation: Activation \"",
activation, "\" is not supported");
return Tensor{};
}
@ -198,7 +198,7 @@ _mixed_dtypes_linear(const Tensor& input, const Tensor& weight,
const std::optional<Tensor>& bias_opt,
const std::optional<c10::string_view> activation_opt) {
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR("_mixed_dtypes_linear: not compiled for this platform");
TORCH_CHECK(false, "_mixed_dtypes_linear: not compiled for this platform");
return Tensor{};
#else
const auto bias = bias_opt.has_value() ? *bias_opt : Tensor{};

View File

@ -88,7 +88,7 @@ static inline void slow_conv_transpose2d_shape_check(
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
TORCH_CHECK(false, "weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
@ -115,7 +115,7 @@ static inline void slow_conv_transpose2d_shape_check(
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
TORCH_CHECK(false,
"Given input size per channel: (",
input_height,
" x ",

View File

@ -106,7 +106,7 @@ static inline void slow_conv_transpose3d_shape_check(
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
TORCH_CHECK(false, "weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
@ -140,7 +140,7 @@ static inline void slow_conv_transpose3d_shape_check(
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_depth < 1 || output_width < 1 || output_height < 1) {
AT_ERROR(
TORCH_CHECK(false,
"Given input size per channel: (",
input_depth,
" x ",

View File

@ -184,7 +184,7 @@ struct KthValueLauncher {
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
TORCH_CHECK(false, "slices are too many");
}
dim3 block(std::min(
@ -221,7 +221,7 @@ struct MedianLauncher {
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
TORCH_CHECK(false, "slices are too many");
}
dim3 block(std::min(

View File

@ -12,10 +12,10 @@ namespace at::native {
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm_out_only_sparse_cuda(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
TORCH_CHECK(false, "tensor.sspaddmm(...) can only be called on sparse tensors");
}
Tensor& _sspaddmm_out_cuda(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("NYI: CUDA sspaddmm is not implemented");
TORCH_CHECK(false, "NYI: CUDA sspaddmm is not implemented");
}
} // namespace at::native

View File

@ -251,7 +251,7 @@ Tensor _bincount_cuda_template(
const Tensor& weights,
int64_t minlength) {
if (minlength < 0) {
AT_ERROR("minlength should be >= 0");
TORCH_CHECK(false, "minlength should be >= 0");
}
if (self.dim() == 1 && self.numel() == 0) {
return at::zeros(
@ -264,12 +264,12 @@ Tensor _bincount_cuda_template(
if (self.dim() != 1 ||
(!std::is_same_v<input_t, uint8_t> &&
*self.min().cpu().const_data_ptr<input_t>() < 0)) {
AT_ERROR("bincount only supports 1-d non-negative integral inputs.");
TORCH_CHECK(false, "bincount only supports 1-d non-negative integral inputs.");
}
bool has_weights = weights.defined();
if (has_weights && (weights.dim() != 1 || weights.size(0) != self.size(0))) {
AT_ERROR("weights should be 1-d and have the same length as input");
TORCH_CHECK(false, "weights should be 1-d and have the same length as input");
}
const int64_t nbins =
@ -312,7 +312,7 @@ Tensor _histc_cuda_template(
at::acc_type<input_t, /*is_cuda=*/true> min,
at::acc_type<input_t, /*is_cuda=*/true> max) {
if (nbins <= 0) {
AT_ERROR("bins must be > 0");
TORCH_CHECK(false, "bins must be > 0");
}
Tensor output = at::zeros(
{nbins},
@ -387,7 +387,7 @@ Tensor _histc_cuda(
const Scalar& min,
const Scalar& max) {
if (self.scalar_type() == ScalarType::Half) {
AT_ERROR("HalfTensor is not supported");
TORCH_CHECK(false, "HalfTensor is not supported");
}
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage

View File

@ -1158,7 +1158,7 @@ REGISTER_CUDA_DISPATCH(ldl_solve_stub, &ldl_solve_kernel)
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#if !AT_MAGMA_ENABLED()
AT_ERROR("cholesky_solve: MAGMA library not found in "
TORCH_CHECK(false, "cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
@ -1476,7 +1476,7 @@ template <typename scalar_t>
static void apply_lu_factor_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#if !AT_MAGMA_ENABLED()
// This should never be thrown if the calling functions are correct.
AT_ERROR("linalg.lu_factor: PyTorch was not compiled with MAGMA support.");
TORCH_CHECK(false, "linalg.lu_factor: PyTorch was not compiled with MAGMA support.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
@ -1677,7 +1677,7 @@ REGISTER_CUDA_DISPATCH(lu_factor_stub, &lu_factor);
template <typename scalar_t>
static void apply_triangular_solve_batched_magma(const Tensor& A, const Tensor& b, bool left, bool upper, TransposeType transpose, bool unitriangular) {
#if !AT_MAGMA_ENABLED()
AT_ERROR("triangular_solve: MAGMA library not found in "
TORCH_CHECK(false, "triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
@ -2106,7 +2106,7 @@ static void apply_svd_magma(const Tensor& A,
const Tensor& Vh,
const Tensor& info) {
#if !AT_MAGMA_ENABLED()
AT_ERROR("linalg.svd: MAGMA library not found in "
TORCH_CHECK(false, "linalg.svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;

View File

@ -59,7 +59,7 @@ struct MAGMAQueue {
static inline magma_int_t magma_int_cast(int64_t value, const char* varname) {
auto result = static_cast<magma_int_t>(value);
if (static_cast<int64_t>(result) != value) {
AT_ERROR("magma: The value of ", varname, "(", (long long)value,
TORCH_CHECK(false, "magma: The value of ", varname, "(", (long long)value,
") is too large to fit into a magma_int_t (", sizeof(magma_int_t), " bytes)");
}
return result;

View File

@ -25,7 +25,8 @@ Tensor cudnn_affine_grid_generator_forward(
int64_t C,
int64_t H,
int64_t W) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_affine_grid_generator_forward: ATen not compiled with cuDNN support");
}
@ -35,7 +36,8 @@ Tensor cudnn_affine_grid_generator_backward(
int64_t C,
int64_t H,
int64_t W) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_affine_grid_generator_backward: ATen not compiled with cuDNN support");
}

View File

@ -25,7 +25,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> cudnn_batch_norm(
bool training,
double exponential_average_factor,
double epsilon) {
AT_ERROR("cudnn_batch_norm: ATen not compiled with cuDNN support");
TORCH_CHECK(false, "cudnn_batch_norm: ATen not compiled with cuDNN support");
}
std::tuple<Tensor, Tensor, Tensor> cudnn_batch_norm_backward(
@ -38,13 +38,15 @@ std::tuple<Tensor, Tensor, Tensor> cudnn_batch_norm_backward(
const std::optional<Tensor>& save_var_opt,
double epsilon,
const Tensor& reservedSpace) {
AT_ERROR("cudnn_batch_norm_backward: ATen not compiled with cuDNN support");
TORCH_CHECK(
false, "cudnn_batch_norm_backward: ATen not compiled with cuDNN support");
}
size_t _get_cudnn_batch_norm_reserve_space_size(
const Tensor& input_t,
bool training) {
AT_ERROR(
TORCH_CHECK(
false,
"_get_cudnn_batch_norm_reserve_space_size: ATen not compiled with cuDNN support");
}

View File

@ -35,7 +35,7 @@ at::Tensor cudnn_convolution(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR("cudnn_convolution: ATen not compiled with cuDNN support");
TORCH_CHECK(false, "cudnn_convolution: ATen not compiled with cuDNN support");
}
at::Tensor& cudnn_convolution_out(
@ -49,7 +49,8 @@ at::Tensor& cudnn_convolution_out(
bool deterministic,
bool allow_tf32,
Tensor& output_t) {
AT_ERROR("cudnn_convolution_out: ATen not compiled with cuDNN support");
TORCH_CHECK(
false, "cudnn_convolution_out: ATen not compiled with cuDNN support");
}
at::Tensor cudnn_convolution_backward_input(
@ -63,7 +64,8 @@ at::Tensor cudnn_convolution_backward_input(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_convolution_backward_input: ATen not compiled with cuDNN support");
}
@ -78,7 +80,8 @@ at::Tensor cudnn_convolution_backward_weight(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_convolution_backward_weight: ATen not compiled with cuDNN support");
}
@ -94,7 +97,9 @@ std::tuple<at::Tensor, at::Tensor> cudnn_convolution_backward(
bool deterministic,
bool allow_tf32,
std::array<bool, 2> output_mask) {
AT_ERROR("cudnn_convolution_backward: ATen not compiled with cuDNN support");
TORCH_CHECK(
false,
"cudnn_convolution_backward: ATen not compiled with cuDNN support");
}
at::Tensor cudnn_convolution_transpose(
@ -108,7 +113,9 @@ at::Tensor cudnn_convolution_transpose(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR("cudnn_convolution_transpose: ATen not compiled with cuDNN support");
TORCH_CHECK(
false,
"cudnn_convolution_transpose: ATen not compiled with cuDNN support");
}
at::Tensor cudnn_convolution_transpose_backward_input(
@ -121,7 +128,8 @@ at::Tensor cudnn_convolution_transpose_backward_input(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_convolution_transpose_backward: ATen not compiled with cuDNN support");
}
@ -136,7 +144,8 @@ at::Tensor cudnn_convolution_transpose_backward_weight(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_convolution_transpose_backward_weight: ATen not compiled with cuDNN support");
}
@ -153,7 +162,8 @@ std::tuple<at::Tensor, at::Tensor> cudnn_convolution_transpose_backward(
bool deterministic,
bool allow_tf32,
std::array<bool, 2> output_mask) {
AT_ERROR(
TORCH_CHECK(
false,
"cudnn_convolution_transpose_backward: ATen not compiled with cuDNN support");
}
@ -168,7 +178,8 @@ void raw_cudnn_convolution_forward_out(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"raw_cudnn_convolution_forward_out: ATen not compiled with cuDNN support");
}
@ -183,7 +194,8 @@ void raw_cudnn_convolution_backward_input_out(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"raw_cudnn_convolution_backward_input_out: ATen not compiled with cuDNN support");
}
@ -198,7 +210,8 @@ void raw_cudnn_convolution_backward_weight_out(
bool benchmark,
bool deterministic,
bool allow_tf32) {
AT_ERROR(
TORCH_CHECK(
false,
"raw_cudnn_convolution_backward_weight_out: ATen not compiled with cuDNN support");
}
@ -210,7 +223,8 @@ Tensor cudnn_convolution_relu(
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups) {
AT_ERROR("cudnn_convolution_relu: ATen not compiled with cuDNN support");
TORCH_CHECK(
false, "cudnn_convolution_relu: ATen not compiled with cuDNN support");
}
Tensor cudnn_convolution_add_relu(
@ -223,7 +237,9 @@ Tensor cudnn_convolution_add_relu(
IntArrayRef padding,
IntArrayRef dilation,
int64_t groups) {
AT_ERROR("cudnn_convolution_add_relu: ATen not compiled with cuDNN support");
TORCH_CHECK(
false,
"cudnn_convolution_add_relu: ATen not compiled with cuDNN support");
}
#endif // AT_CUDNN_ENABLED

View File

@ -21,14 +21,18 @@ namespace native {
// See Note [ATen preprocessor philosophy]
Tensor cudnn_grid_sampler_forward(const Tensor& input_t, const Tensor& grid_t) {
AT_ERROR("cudnn_grid_sampler_forward: ATen not compiled with cuDNN support");
TORCH_CHECK(
false,
"cudnn_grid_sampler_forward: ATen not compiled with cuDNN support");
}
std::tuple<Tensor, Tensor> cudnn_grid_sampler_backward(
const Tensor& input_t,
const Tensor& grid_t,
const Tensor& grad_output_t) {
AT_ERROR("cudnn_grid_sampler_backward: ATen not compiled with cuDNN support");
TORCH_CHECK(
false,
"cudnn_grid_sampler_backward: ATen not compiled with cuDNN support");
}
} // namespace native

View File

@ -55,7 +55,8 @@ std::tuple<Tensor, Tensor> _cudnn_ctc_loss(
int64_t BLANK,
bool deterministic,
bool zero_infinity) {
AT_ERROR("cudnn_ctc_loss: ATen not compiled with cuDNN >= 7 support");
TORCH_CHECK(
false, "cudnn_ctc_loss: ATen not compiled with cuDNN >= 7 support");
}
std::tuple<Tensor, Tensor> _cudnn_ctc_loss_tensor(
@ -66,7 +67,8 @@ std::tuple<Tensor, Tensor> _cudnn_ctc_loss_tensor(
int64_t BLANK,
bool deterministic,
bool zero_infinity) {
AT_ERROR("cudnn_ctc_loss: ATen not compiled with cuDNN >= 8 support");
TORCH_CHECK(
false, "cudnn_ctc_loss: ATen not compiled with cuDNN >= 8 support");
}
} // namespace native

View File

@ -44,7 +44,8 @@ Tensor _cudnn_rnn_flatten_weight(
int64_t fn_num_layers,
bool batch_first,
bool fn_bidirectional) {
AT_ERROR("_cudnn_rnn_flatten_weight: ATen not compiled with cuDNN support");
TORCH_CHECK(
false, "_cudnn_rnn_flatten_weight: ATen not compiled with cuDNN support");
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _cudnn_rnn(
@ -64,7 +65,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _cudnn_rnn(
bool fn_bidirectional,
IntArrayRef fn_batch_sizes,
const std::optional<Tensor>& fn_dropout_state_opt) {
AT_ERROR("_cudnn_rnn: ATen not compiled with cuDNN support");
TORCH_CHECK(false, "_cudnn_rnn: ATen not compiled with cuDNN support");
}
std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> _cudnn_rnn_backward(
@ -90,7 +91,8 @@ std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> _cudnn_rnn_backward(
const std::optional<Tensor>& dropout_state_opt,
const Tensor& reserve,
std::array<bool, 4> output_mask) {
AT_ERROR("_cudnn_rnn_backward: ATen not compiled with cuDNN support");
TORCH_CHECK(
false, "_cudnn_rnn_backward: ATen not compiled with cuDNN support");
}
Tensor _cudnn_init_dropout_state(
@ -105,7 +107,8 @@ Tensor _cudnn_init_dropout_state(
TensorOptions().dtype(dtype).layout(layout).device(device).pinned_memory(
pin_memory);
AT_ERROR("_cudnn_init_dropout_state: ATen not compiled with cuDNN support");
TORCH_CHECK(
false, "_cudnn_init_dropout_state: ATen not compiled with cuDNN support");
}
} // namespace native
@ -181,7 +184,7 @@ struct RNNDescriptorParams {
default: {
std::ostringstream oss;
oss << "unrecognized cuDNN RNN mode " << fn_mode;
AT_ERROR(oss.str());
TORCH_CHECK(false, oss.str());
}
}
}
@ -583,7 +586,7 @@ int64_t _num_linear_layers(cudnnRNNMode_t mode) {
case CUDNN_RNN_TANH:
return 2;
default:
AT_ERROR("unknown cuDNN RNN mode ", mode);
TORCH_CHECK(false, "unknown cuDNN RNN mode ", mode);
}
}

View File

@ -56,7 +56,7 @@ inline void col2im_shape_check(
int64_t n_input_plane = input.size(batch_dim + 1);
if (n_input_plane % (kernel_width * kernel_height) != 0) {
AT_ERROR(
TORCH_CHECK(false,
"Expected size of input's dimension 1 to be divisible by the "
"product of kernel_size, but got input.size(1)=",
n_input_plane,
@ -81,7 +81,7 @@ inline void col2im_shape_check(
1;
if (input_length != (n_blocks_height * n_blocks_width)) {
AT_ERROR(
TORCH_CHECK(false,
"Given output_size=(",
output_height,
", ",
@ -126,7 +126,7 @@ inline void col2im_shape_check(
"which is too small (non-positive)");
if (output_width < 1 || output_height < 1) {
AT_ERROR(
TORCH_CHECK(false,
"Expected output spatial size to be positive, but got: output_size=(",
output_height,
", ",
@ -204,7 +204,7 @@ inline void im2col_shape_check(
1;
if (output_height < 1 || output_width < 1) {
AT_ERROR(
TORCH_CHECK(false,
"Given input with spatial size (",
input_height,
", ",

View File

@ -39,7 +39,7 @@ C10_ALWAYS_INLINE void _check_rms_norm_inputs_symint(
ss << ", " << size;
}
ss << "], but got input of size" << input_shape;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}
@ -83,7 +83,7 @@ C10_ALWAYS_INLINE std::pair<int64_t, int64_t> _check_layer_norm_inputs(
ss << ", " << size;
}
ss << "], but got input of size" << input_shape;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
const int axis = input_ndim - normalized_ndim;

View File

@ -24,13 +24,13 @@ namespace at { namespace native {
std::tuple<Tensor, Tensor, Tensor> miopen_batch_norm(
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt,
bool training, double exponential_average_factor, double epsilon) {
AT_ERROR("miopen_batch_norm: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_batch_norm: ATen not compiled with MIOpen support");
}
std::tuple<Tensor, Tensor, Tensor> miopen_batch_norm_backward(
const Tensor& input, const Tensor& grad_output, const Tensor& weight, const std::optional<Tensor>& running_mean_opt, const std::optional<Tensor>& running_var_opt, const std::optional<Tensor>& save_mean_opt, const std::optional<Tensor>& save_var_opt,
double epsilon) {
AT_ERROR("miopen_batch_norm_backward: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_batch_norm_backward: ATen not compiled with MIOpen support");
}
}} // namespace at::native

View File

@ -34,89 +34,89 @@ at::Tensor miopen_convolution(
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt /* optional */,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
int64_t groups, bool benchmark, bool deterministic) {
AT_ERROR("miopen_convolution: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_backward_input(
IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic) {
AT_ERROR("miopen_convolution_backward_input: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_backward_input: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_backward_weight(
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic) {
AT_ERROR("miopen_convolution_backward_weight: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_backward_weight: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_backward_bias(
const at::Tensor& grad_output) {
AT_ERROR("miopen_convolution_backward_bias: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_backward_bias: ATen not compiled with MIOpen support");
}
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_backward(
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
AT_ERROR("miopen_convolution_backward: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_backward: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_transpose(
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt /* optional */,
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation,
int64_t groups, bool benchmark, bool deterministic) {
AT_ERROR("miopen_convolution_transpose: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_transpose: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_transpose_backward_input(
const at::Tensor& grad_output, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
int64_t groups, bool benchmark, bool deterministic) {
AT_ERROR("miopen_convolution_transpose_backward: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_transpose_backward: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_transpose_backward_weight(
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic) {
AT_ERROR("miopen_convolution_transpose_backward_weight: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_transpose_backward_weight: ATen not compiled with MIOpen support");
}
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_convolution_transpose_backward(
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
AT_ERROR("miopen_convolution_transpose_backward: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_transpose_backward: ATen not compiled with MIOpen support");
}
at::Tensor miopen_depthwise_convolution(
const Tensor& input, const Tensor& weight, const std::optional<Tensor>& bias_opt /* optional */,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation,
int64_t groups, bool benchmark, bool deterministic) {
AT_ERROR("miopen_depthwise_convolution: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_depthwise_convolution: ATen not compiled with MIOpen support");
}
at::Tensor miopen_depthwise_convolution_backward_input(
IntArrayRef input_size, const at::Tensor& grad_output, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic) {
AT_ERROR("miopen_depthwise_convolution_backward_input: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_depthwise_convolution_backward_input: ATen not compiled with MIOpen support");
}
at::Tensor miopen_depthwise_convolution_backward_weight(
IntArrayRef weight_size, const at::Tensor& grad_output, const at::Tensor& input,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic) {
AT_ERROR("miopen_depthwise_convolution_backward_weight: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_depthwise_convolution_backward_weight: ATen not compiled with MIOpen support");
}
std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_depthwise_convolution_backward(
const at::Tensor& input, const at::Tensor& grad_output, const at::Tensor& weight,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups,
bool benchmark, bool deterministic, std::array<bool,3> output_mask) {
AT_ERROR("miopen_depthwise_convolution_backward: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_depthwise_convolution_backward: ATen not compiled with MIOpen support");
}
@ -124,13 +124,13 @@ at::Tensor miopen_convolution_add_relu(
const at::Tensor& input, const at::Tensor& weight, const at::Tensor& z,
const std::optional<Scalar>& alpha, const std::optional<Tensor>& bias, IntArrayRef stride,
IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
AT_ERROR("miopen_convolution_add_relu: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_add_relu: ATen not compiled with MIOpen support");
}
at::Tensor miopen_convolution_relu(
const at::Tensor& input, const at::Tensor& weight, const std::optional<Tensor>& bias,
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
AT_ERROR("miopen_convolution_relu: ATen not compiled with MIOpen support");
TORCH_CHECK(false, "miopen_convolution_relu: ATen not compiled with MIOpen support");
}
}}
@ -396,7 +396,7 @@ struct algorithm_search<miopenConvFwdAlgorithm_t> {
args.odesc.desc(),
&max_solution_count));
if (max_solution_count > AT_MIOPEN_MAX_SOLUTIONS) {
AT_ERROR("miopenConvFwdAlgorithm_t getSolution max_solution_count > AT_MIOPEN_MAX_SOLUTIONS");
TORCH_CHECK(false, "miopenConvFwdAlgorithm_t getSolution max_solution_count > AT_MIOPEN_MAX_SOLUTIONS");
}
MIOPEN_CHECK(miopenConvolutionForwardGetSolution(
args.handle,
@ -469,7 +469,7 @@ struct algorithm_search<miopenConvBwdDataAlgorithm_t> {
args.idesc.desc(),
&max_solution_count));
if (max_solution_count > AT_MIOPEN_MAX_SOLUTIONS) {
AT_ERROR("miopenConvBwdDataAlgorithm_t getSolution max_solution_count > AT_MIOPEN_MAX_SOLUTIONS");
TORCH_CHECK(false, "miopenConvBwdDataAlgorithm_t getSolution max_solution_count > AT_MIOPEN_MAX_SOLUTIONS");
}
MIOPEN_CHECK(miopenConvolutionBackwardDataGetSolution(
args.handle,
@ -542,7 +542,7 @@ struct algorithm_search<miopenConvBwdWeightsAlgorithm_t> {
args.wdesc.desc(),
&max_solution_count));
if (max_solution_count > AT_MIOPEN_MAX_SOLUTIONS) {
AT_ERROR("miopenConvBwdWeightsAlgorithm_t getSolution max_solution_count > AT_MIOPEN_MAX_SOLUTIONS");
TORCH_CHECK(false, "miopenConvBwdWeightsAlgorithm_t getSolution max_solution_count > AT_MIOPEN_MAX_SOLUTIONS");
}
MIOPEN_CHECK(miopenConvolutionBackwardWeightsGetSolution(
args.handle,

View File

@ -34,7 +34,7 @@ namespace at { namespace native {
bool batch_first, double fn_dropout, bool fn_train, bool fn_bidirectional,
IntArrayRef fn_batch_sizes, const std::optional<Tensor>& fn_dropout_state_opt
) {
AT_ERROR("miopen_rnn : ATen not compiled with MIOpen support.");
TORCH_CHECK(false, "miopen_rnn : ATen not compiled with MIOpen support.");
}
std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> miopen_rnn_backward(
@ -43,7 +43,7 @@ namespace at { namespace native {
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const std::optional<Tensor>& dropout_state_opt,
const Tensor& reserve, std::array<bool, 4> output_mask
) {
AT_ERROR("miopen_rnn_backward: ATen not compiled with MIOpen support.");
TORCH_CHECK(false, "miopen_rnn_backward: ATen not compiled with MIOpen support.");
}
}} //namespace at::native
@ -109,7 +109,7 @@ struct RNNDescriptorParams {
{
std::ostringstream oss;
oss << "unrecognized miopen RNN mode " << fn_mode;
AT_ERROR(oss.str());
TORCH_CHECK(false, oss.str());
}
}
}
@ -323,7 +323,7 @@ int64_t _num_linear_layers(miopenRNNMode_t mode) {
case miopenRNNTANH:
return 2;
default:
AT_ERROR("Unknown miopen RNN mode : ", mode);
TORCH_CHECK(false, "Unknown miopen RNN mode : ", mode);
}
}
@ -804,7 +804,7 @@ std::tuple<Tensor, Tensor> unpack_hidden(const std::tuple<Tensor, Tensor>& hidde
template<typename hidden_type>
hidden_type pack_hidden(const Tensor& hx, const Tensor& cx) {
static_assert(std::is_same<hidden_type, void>::value, "pack_hidden not implemented for this type");
AT_ERROR("NOT IMPLEMENTED");
TORCH_CHECK(false, "NOT IMPLEMENTED");
}
template<>

View File

@ -19,9 +19,9 @@ Tensor& _sparse_mm_mkl_(
const Scalar& alpha,
const Scalar& beta) {
#if __APPLE__ || __MACH__
AT_ERROR("sparse_mm_mkl: MKL support is disabled on macos/iOS.");
TORCH_CHECK(false, "sparse_mm_mkl: MKL support is disabled on macos/iOS.");
#else
AT_ERROR("sparse_mm_mkl: ATen not compiled with MKL support");
TORCH_CHECK(false, "sparse_mm_mkl: ATen not compiled with MKL support");
#endif
return self; // for stopping compiler warnings.
}

View File

@ -241,7 +241,7 @@ T compute_fct(int64_t size, int64_t normalization) {
case fft_norm_mode::by_n: return one / static_cast<T>(size);
case fft_norm_mode::by_root_n: return one / std::sqrt(static_cast<T>(size));
}
AT_ERROR("Unsupported normalization type", normalization);
TORCH_CHECK(false, "Unsupported normalization type", normalization);
}
template<typename T>
@ -578,30 +578,30 @@ namespace at { namespace native {
REGISTER_NO_CPU_DISPATCH(fft_fill_with_conjugate_symmetry_stub);
Tensor _fft_c2r_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, int64_t last_dim_size) {
AT_ERROR("fft: ATen not compiled with FFT support");
TORCH_CHECK(false, "fft: ATen not compiled with FFT support");
}
Tensor _fft_r2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, bool onesided) {
AT_ERROR("fft: ATen not compiled with FFT support");
TORCH_CHECK(false, "fft: ATen not compiled with FFT support");
}
Tensor _fft_c2c_mkl(const Tensor& self, IntArrayRef dim, int64_t normalization, bool forward) {
AT_ERROR("fft: ATen not compiled with FFT support");
TORCH_CHECK(false, "fft: ATen not compiled with FFT support");
}
Tensor& _fft_r2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization,
bool onesided, Tensor& out) {
AT_ERROR("fft: ATen not compiled with FFT support");
TORCH_CHECK(false, "fft: ATen not compiled with FFT support");
}
Tensor& _fft_c2r_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization,
int64_t last_dim_size, Tensor& out) {
AT_ERROR("fft: ATen not compiled with FFT support");
TORCH_CHECK(false, "fft: ATen not compiled with FFT support");
}
Tensor& _fft_c2c_mkl_out(const Tensor& self, IntArrayRef dim, int64_t normalization,
bool forward, Tensor& out) {
AT_ERROR("fft: ATen not compiled with FFT support");
TORCH_CHECK(false, "fft: ATen not compiled with FFT support");
}
}} // namespace at::native

View File

@ -41,7 +41,7 @@ const Tensor& input,
bool bidirectional,
bool batch_first,
bool train) {
AT_ERROR("mkldnn_rnn_layer: ATen not compiled with MKLDNN support");
TORCH_CHECK(false, "mkldnn_rnn_layer: ATen not compiled with MKLDNN support");
}
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> mkldnn_rnn_layer_backward(
@ -68,7 +68,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> mkldnn_rnn_la
at::IntArrayRef batch_sizes,
bool batch_first,
const at::Tensor& workspace) {
AT_ERROR("mkldnn_rnn_layer_backward: ATen not compiled with MKLDNN support");
TORCH_CHECK(false, "mkldnn_rnn_layer_backward: ATen not compiled with MKLDNN support");
}
REGISTER_NO_CPU_DISPATCH(lstm_mkldnn_stub);

View File

@ -68,7 +68,7 @@ Tensor& addmm_out(
// complex/double case
if (mat1.is_complex() || mat1.scalar_type() == ScalarType::Double) {
AT_ERROR(
TORCH_CHECK(false,
"Double and complex datatype matmul is not supported in oneDNN");
}
@ -148,7 +148,7 @@ Tensor& mm_out(const Tensor& self, const Tensor& mat2, Tensor& result) {
}
if (self.is_complex() || self.scalar_type() == ScalarType::Double) {
AT_ERROR(
TORCH_CHECK(false,
"Double and complex datatype matmul is not supported in oneDNN");
}
@ -203,7 +203,7 @@ Tensor& baddbmm_out(
// complex and double case
if (batch1.is_complex() || batch2.scalar_type() == ScalarType::Double) {
AT_ERROR(
TORCH_CHECK(false,
"Double and complex datatype matmul is not supported in oneDNN");
}
@ -329,7 +329,7 @@ Tensor& bmm_out(const Tensor& self, const Tensor& batch2, Tensor& result) {
}
if (self.is_complex() || self.scalar_type() == ScalarType::Double) {
AT_ERROR(
TORCH_CHECK(false,
"Double and complex datatype matmul is not supported in oneDNN");
}
onednn::matmul(result, self, batch2, at::Tensor(), true, onednn::Attr());

View File

@ -76,7 +76,7 @@ static void pool2d_template(const Tensor& input,
} else if (suggested_memory_format == at::MemoryFormat::Contiguous) {
TORCH_CHECK((ndims == 3 || ndims == 4), "non-empty 3D or 4D (batch mode) tensor expected for input");
} else {
AT_ERROR("Unsupported memory format. Supports only ChannelsLast, Contiguous");
TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
int padH = safe_downcast<int, int64_t>(padding[0]);

View File

@ -97,7 +97,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> _lstm_mps(const Tenso
// Projections are not currently supported, raise an error if needed
bool has_projections = (hx[0].size(2) != hx[1].size(2));
if (has_projections) {
AT_ERROR("LSTM with projections is not currently supported with MPS.");
TORCH_CHECK(false, "LSTM with projections is not currently supported with MPS.");
}
std::vector<Tensor> kernel_weights;

View File

@ -76,7 +76,7 @@ static void upsample_out_template(const Tensor& input,
centerResults = true;
nearestRoundingMode = MPSGraphResizeNearestRoundingModeRoundPreferCeil;
} else {
AT_ERROR("Unsupported resize mode ", resize_mode_str);
TORCH_CHECK(false, "Unsupported resize mode ", resize_mode_str);
}
const int64_t output_width = output_size.size() > 1 ? output_size[1] : output_size[0];

View File

@ -752,7 +752,7 @@ inline std::tuple<bool, Tensor, Tensor> NestedTensor_compute_size_stride(
}
}
else {
AT_ERROR("invalid shape dimension ", size_reshaped);
TORCH_CHECK(false, "invalid shape dimension ", size_reshaped);
}
}
// See Note [Special size rule for nested tensor]

View File

@ -223,10 +223,10 @@ Tensor matmul_nested(const Tensor& self, const Tensor& mat2) {
return matmul_nested_with_broadcasted_dense(self, mat2);
}
if (self.is_nested() && !mat2.is_nested()) {
AT_ERROR(
TORCH_CHECK(false,
"Expected both to be nested, but got a nested self and non-nested other");
} else if (!self.is_nested() && mat2.is_nested()) {
AT_ERROR(
TORCH_CHECK(false,
"Expected both to be nested, but got a non-nested self and nested other");
}
// to_padded_tensor only supports contiguous inputs

View File

@ -110,7 +110,7 @@ Tensor nested_from_padded_cuda(
padded_contiguous.sizes()[0]);
}
} else {
AT_ERROR("Only support fp32/fp16 for padded input");
TORCH_CHECK(false, "Only support fp32/fp16 for padded input");
}
return at::detail::make_tensor<NestedTensorImpl>(std::move(output), sizes);
} else {

View File

@ -64,7 +64,7 @@ Tensor adaptive_avg_pool2d_quantized_cuda(
auto result_fp32 = at::adaptive_avg_pool2d(input_fp32, output_size);
return at::quantize_per_tensor(result_fp32, input.q_scale(), input.q_zero_point(), input.scalar_type());
#else // USE_CUDA
AT_ERROR("at::native::adaptive_avg_pool2d_quantized_cuda: ATen not compiled with USE_CUDA support");
TORCH_CHECK(false, "at::native::adaptive_avg_pool2d_quantized_cuda: ATen not compiled with USE_CUDA support");
return Tensor{}; // never reached, placates the compiler
#endif
}
@ -209,11 +209,11 @@ Tensor quantized_max_pool2d_cudnn(
// recall we casted our input and output to 4D if qx was 3D, so we recast it back to 3D prior to returning
return (ndim == 3 ? qy.view(std::vector<int64_t>(output_shape.begin() + 1, output_shape.end())) : qy);
#else // AT_CUDNN_ENABLED()
AT_ERROR("at::native::quantized_max_pool2d_cudnn: ATen not compiled with cuDNN support");
TORCH_CHECK(false, "at::native::quantized_max_pool2d_cudnn: ATen not compiled with cuDNN support");
return Tensor{}; // never reached, placates the compiler
#endif // AT_CUDNN_ENABLED()
#else // USE_CUDA
AT_ERROR("at::native::quantized_max_pool2d_cudnn: ATen not compiled with USE_CUDA support");
TORCH_CHECK(false, "at::native::quantized_max_pool2d_cudnn: ATen not compiled with USE_CUDA support");
return Tensor{}; // never reached, placates the compiler
#endif
}

View File

@ -459,7 +459,7 @@ Tensor _sparse_compressed_tensor_unsafe_symint(
std::optional<Device> device,
std::optional<bool> pin_memory) {
if (!layout) {
AT_ERROR("sparse_compressed_tensor_unsafe expected sparse compressed tensor layout but got none");
TORCH_CHECK(false, "sparse_compressed_tensor_unsafe expected sparse compressed tensor layout but got none");
}
Layout layout_ = layout.value();
AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(layout_, "sparse_compressed_tensor_unsafe", [&]{});
@ -587,7 +587,7 @@ Tensor sparse_compressed_tensor(
std::optional<bool> pin_memory) {
if (!layout) {
AT_ERROR("sparse_compressed_tensor expected sparse compressed tensor layout but got none");
TORCH_CHECK(false, "sparse_compressed_tensor expected sparse compressed tensor layout but got none");
}
Layout layout_ = layout.value();
AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(layout_, "sparse_compressed_tensor", [&]{});
@ -616,7 +616,7 @@ Tensor sparse_compressed_tensor(
std::optional<bool> pin_memory) {
if (!layout) {
AT_ERROR("sparse_compressed_tensor expected sparse compressed tensor layout but got none");
TORCH_CHECK(false, "sparse_compressed_tensor expected sparse compressed tensor layout but got none");
}
Layout layout_ = layout.value();
AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(layout_, "sparse_compressed_tensor", [&]{});

View File

@ -588,7 +588,7 @@ SparseTensor& copy_sparse_wrapper_(
{
NoNamesGuard guard;
if (!self.is_sparse() || !src.is_sparse()) {
AT_ERROR(
TORCH_CHECK(false,
"copy_() between dense and sparse Tensors is not implemented! Found self type = ",
self.toString(),
" and src type = ",

View File

@ -1224,9 +1224,9 @@ void s_addmm_out_sparse_dense_worker(int64_t nnz, int64_t dim_i, int64_t dim_j,
r_ptr + row * r_stride0, r_stride1);
} else {
if (col < 0 || col >= dim_j) {
AT_ERROR("addmm: index out of column bound: ", col, " not between 1 and ", dim_j);
TORCH_CHECK(false, "addmm: index out of column bound: ", col, " not between 1 and ", dim_j);
} else {
AT_ERROR("addmm: index out of row bound: ", row, " not between 1 and ", dim_i);
TORCH_CHECK(false, "addmm: index out of row bound: ", row, " not between 1 and ", dim_i);
}
}
}
@ -1577,7 +1577,7 @@ SparseTensor& _sspaddmm_out_cpu(
dense_ptr + col * dense_stride0, dense_stride1,
newv_ptr + p * newv_stride0, 1);
} else {
AT_ERROR("index out of bound. sspmm: ", col, " not between 1 and ", dim_j);
TORCH_CHECK(false, "index out of bound. sspmm: ", col, " not between 1 and ", dim_j);
}
}
// Fill up the indices with the right values
@ -1602,7 +1602,7 @@ SparseTensor& _sspaddmm_out_cpu(
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm_out_only_sparse(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
TORCH_CHECK(false, "tensor.sspaddmm(...) can only be called on sparse tensors");
}
// sparse, dense -> sparse

View File

@ -88,7 +88,7 @@ cusparseOperation_t convertTransToCusparseOperation(char trans) {
else if (trans == 'n') return CUSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
TORCH_CHECK(false, "trans must be one of: t, n, c");
}
}

View File

@ -124,7 +124,7 @@ Tensor _sparse_semi_structured_apply_dense(
const Tensor& threads_masks) {
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR("_sparse_semi_structured_apply_dense: not supported");
TORCH_CHECK(false, "_sparse_semi_structured_apply_dense: not supported");
return Tensor{};
#else
TORCH_CHECK(

View File

@ -195,7 +195,7 @@ Tensor two_four_sgemm(
meta_dtype = at::kInt;
break;
default:
AT_ERROR("two_four_sgemm: invalid size of meta tensor datatype "
TORCH_CHECK(false, "two_four_sgemm: invalid size of meta tensor datatype "
"encountered");
}
TORCH_CHECK(meta.dtype() == meta_dtype,
@ -215,7 +215,7 @@ Tensor two_four_sgemm(
} else if constexpr (std::is_same_v<ElementOutput, float>) {
tensor_d_dtype = at::kFloat;
} else {
AT_ERROR("two_four_sgemm: invalid datatype for sparse GEMM output ",
TORCH_CHECK(false, "two_four_sgemm: invalid datatype for sparse GEMM output ",
"encountered");
}
if constexpr (use_bias) {
@ -424,7 +424,7 @@ Tensor two_four_sgemm_dispatch_layouts(
}
}
AT_ERROR("two_four_sgemm_dispatch_layouts: Combination of ",
TORCH_CHECK(false, "two_four_sgemm_dispatch_layouts: Combination of ",
tensor_a_row_major ? "row-major" : "column_major", " and ",
tensor_b_row_major ? "row-major" : "column_major",
" layouts for input tensors is not supported");
@ -573,7 +573,7 @@ Tensor two_four_sgemm_dispatch_layouts_bias_activation(
}
}
AT_ERROR("two_four_sgemm_dispatch_layouts: Activation \"", activation,
TORCH_CHECK(false, "two_four_sgemm_dispatch_layouts: Activation \"", activation,
"\" is not supported for given input tensors");
return Tensor{};
}
@ -608,7 +608,7 @@ Tensor _sparse_semi_structured_linear(
"_sparse_semi_structured_mm/_sparse_semi_structured_addmm "
"instead.");
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR("_sparse_semi_structured_linear: CUTLASS not supported");
TORCH_CHECK(false, "_sparse_semi_structured_linear: CUTLASS not supported");
return Tensor{};
#else
// No need to check that all tensors are on CUDA device, as this

View File

@ -187,7 +187,7 @@ void spgemm_cutlass(
tensor_e_dtype = at::kInt;
break;
default:
AT_ERROR(__func__, ": invalid size of meta tensor datatype "
TORCH_CHECK(false, __func__, ": invalid size of meta tensor datatype "
"encountered");
}
TORCH_CHECK(tensor_e.dtype() == tensor_e_dtype,
@ -424,7 +424,7 @@ void spgemm_cutlass_dispatch_layouts(
}
}
AT_ERROR(__func__, "_dispatch_layouts: Combination of ",
TORCH_CHECK(false, __func__, "_dispatch_layouts: Combination of ",
tensor_a_row_major ? "row-major" : "column_major", " and ",
tensor_b_row_major ? "row-major" : "column_major",
" layouts for input tensors is not supported");
@ -525,7 +525,7 @@ Tensor sparse_semi_structured_mad_op(
const std::optional<Tensor>& input_opt, const Scalar& alpha,
const Scalar& beta, const std::optional<c10::ScalarType> out_dtype_opt) {
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR(__func__, " : CUTLASS not supported");
TORCH_CHECK(false, __func__, " : CUTLASS not supported");
return Tensor{};
#else
// No need to check that all tensors are on CUDA device, as this
@ -846,7 +846,7 @@ static void reorder_meta(cutlass::TensorRef<Element, LayoutDest> dest,
std::tuple<Tensor, Tensor>
_to_sparse_semi_structured(const Tensor& dense) {
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR(__func__, " : CUTLASS not supported");
TORCH_CHECK(false, __func__, " : CUTLASS not supported");
return std::make_tuple(Tensor{}, Tensor{});
#else
// Check dimensions of the dense matrix.
@ -871,7 +871,7 @@ _to_sparse_semi_structured(const Tensor& dense) {
ksparse = 2;
dense_elems_per_meta_elem = 8;
} else {
AT_ERROR("_to_sparse_semi_structured: Invalid dense argument datatype ",
TORCH_CHECK(false, "_to_sparse_semi_structured: Invalid dense argument datatype ",
dense.dtype(), " encountered");
}
@ -879,12 +879,12 @@ _to_sparse_semi_structured(const Tensor& dense) {
const auto dense_ncols = dense.size(1);
if (dense_nrows % (meta_dtype == at::kShort ? 32 : 16) != 0) {
AT_ERROR("_to_sparse_semi_structured: Number of rows of dense matrix must "
TORCH_CHECK(false, "_to_sparse_semi_structured: Number of rows of dense matrix must "
"be divisible by ", (meta_dtype == at::kShort ? 32 : 16),
", but it is ", dense_nrows);
}
if (dense_ncols % dense_elems_per_meta_elem != 0) {
AT_ERROR("_to_sparse_semi_structured: Number of columns of dense matrix "
TORCH_CHECK(false, "_to_sparse_semi_structured: Number of columns of dense matrix "
"must be divisible by ", dense_elems_per_meta_elem, ", but it is ",
dense_ncols);
}
@ -925,7 +925,7 @@ _to_sparse_semi_structured(const Tensor& dense) {
} else if (mask_elems == std::make_tuple(0, 0, 1, 1)) {
meta_quadruple = 14; // 1110
} else {
AT_ERROR("_to_sparse_semi_structured: dense argument does not match ",
TORCH_CHECK(false, "_to_sparse_semi_structured: dense argument does not match ",
(dense.dtype() != at::kFloat) ? "2:4" : "1:2",
"sparsity pattern");
}

View File

@ -281,7 +281,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _sparse_semi_structured_tile(
bool use_cutlass)
{
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR("_sparse_semi_structured_tile: not supported");
TORCH_CHECK(false, "_sparse_semi_structured_tile: not supported");
return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{}, Tensor{});
#else
std::string algo(algorithm.data(), algorithm.size());

View File

@ -90,7 +90,7 @@ std::tuple<Tensor, Tensor> _sparse_semi_structured_apply_typed(Tensor input, Ten
std::tuple<Tensor, Tensor> _sparse_semi_structured_apply(const Tensor& input, const Tensor& threads_masks) // Returned by `_sparse_semi_structured_tile`
{
#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
AT_ERROR("_sparse_semi_structured_apply: not supported");
TORCH_CHECK(false, "_sparse_semi_structured_apply: not supported");
return std::make_tuple(Tensor{}, Tensor{});
#else
TORCH_CHECK(

View File

@ -18,7 +18,7 @@ inline std::vector<T> _expand_param_if_needed(
ss << "expected " << param_name << " to be a single integer value or a "
<< "list of " << expected_dim << " values to match the convolution "
<< "dimensions, but got " << param_name << "=" << list_param;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
} else {
return list_param.vec();
}

View File

@ -48,7 +48,7 @@ void _check_layer_norm_inputs(
ss << ", " << size;
}
ss << "], but got input of size" << input_shape;
AT_ERROR(ss.str());
TORCH_CHECK(false, ss.str());
}
}

View File

@ -21,7 +21,7 @@ at::Tensor& vulkan_copy_(at::Tensor& self, const at::Tensor& src) {
if (p) {
return p->vulkan_copy_(self, src);
}
AT_ERROR("Vulkan backend was not linked to the build");
TORCH_CHECK(false, "Vulkan backend was not linked to the build");
}
} // namespace vulkan

View File

@ -11,9 +11,9 @@
CUresult get_error_str_err [[maybe_unused]] = \
c10::cuda::DriverAPI::get()->cuGetErrorString_(__err, &err_str); \
if (get_error_str_err != CUDA_SUCCESS) { \
AT_ERROR("CUDA driver error: unknown error"); \
TORCH_CHECK(false, "CUDA driver error: unknown error"); \
} else { \
AT_ERROR("CUDA driver error: ", err_str); \
TORCH_CHECK(false, "CUDA driver error: ", err_str); \
} \
} \
} while (0)

View File

@ -21,7 +21,7 @@ FileAdapter::RAIIFile::RAIIFile(const std::string& file_name) {
auto error_msg =
std::system_category().default_error_condition(old_errno).message();
#endif
AT_ERROR(
TORCH_CHECK(false,
"open file failed because of errno ",
old_errno,
" on fopen: ",

View File

@ -29,7 +29,7 @@ size_t IStreamAdapter::read(uint64_t pos, void* buf, size_t n, const char* what)
void IStreamAdapter::validate(const char* what) const {
if (!*istream_) {
AT_ERROR("istream reader failed: ", what, ".");
TORCH_CHECK(false, "istream reader failed: ", what, ".");
}
}

View File

@ -298,7 +298,8 @@ inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
} catch (const Exception& e) {
if (std::string(e.what()).find(expectMessageContains) ==
std::string::npos) {
AT_ERROR(
TORCH_CHECK(
false,
"Expected error message to contain \"",
expectMessageContains,
"\" but error message was: ",
@ -306,7 +307,8 @@ inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
}
return;
}
AT_ERROR(
TORCH_CHECK(
false,
"Expected to throw exception containing \"",
expectMessageContains,
"\" but didn't throw");

Some files were not shown because too many files have changed in this diff Show More