Update to replace AT_ERROR with TORCH_CHECK (#52711)

Summary:
Fixes #{52699}

Pull Request resolved: https://github.com/pytorch/pytorch/pull/52711

Reviewed By: ailzhang

Differential Revision: D26654677

Pulled By: malfet

fbshipit-source-id: 97079250d144c9b1c69028f35e4a23a34481b2a5
This commit is contained in:
Lance Ware
2021-02-25 19:43:41 -08:00
committed by Facebook GitHub Bot
parent a0a1bb074b
commit fdd25f82c9
11 changed files with 29 additions and 29 deletions

View File

@ -147,7 +147,7 @@ static inline Backend dispatchKeyToBackend(DispatchKey t) {
} else if (t == DispatchKey::Undefined) {
return Backend::Undefined;
} else {
AT_ERROR("Unrecognized tensor type ID: ", t);
TORCH_CHECK(false, "Unrecognized tensor type ID: ", t);
}
}
@ -230,9 +230,9 @@ static inline DeviceType backendToDeviceType(Backend b) {
case Backend::MLC:
return DeviceType::MLC;
case Backend::Undefined:
AT_ERROR("Undefined backend is not a valid device type");
TORCH_CHECK(false, "Undefined backend is not a valid device type");
default:
AT_ERROR("Unknown backend");
TORCH_CHECK(false, "Unknown backend");
}
}
@ -272,7 +272,7 @@ static inline Backend backendToCPU(Backend b) {
case Backend::Undefined:
return Backend::Undefined;
default:
AT_ERROR("Unknown backend");
TORCH_CHECK(false, "Unknown backend");
}
}
@ -300,7 +300,7 @@ static inline Backend backendToXPU(Backend b) {
case Backend::Undefined:
return Backend::Undefined;
default:
AT_ERROR("Unknown backend");
TORCH_CHECK(false, "Unknown backend");
}
}
@ -323,7 +323,7 @@ static inline Backend backendToCUDA(Backend b) {
case Backend::Undefined:
return Backend::Undefined;
default:
AT_ERROR("Unknown backend");
TORCH_CHECK(false, "Unknown backend");
}
}
@ -346,7 +346,7 @@ static inline Backend backendToHIP(Backend b) {
case Backend::Undefined:
return Backend::Undefined;
default:
AT_ERROR("Unknown backend");
TORCH_CHECK(false, "Unknown backend");
}
}

View File

@ -57,7 +57,7 @@ DeviceType parse_type(const std::string& device_string) {
if (device != types.end()) {
return device->second;
}
AT_ERROR(
TORCH_CHECK(false,
"Expected one of cpu, cuda, xpu, mkldnn, opengl, opencl, ideep, hip, msnpu, mlc, xla, vulkan device type at start of device string: ",
device_string);
}
@ -77,7 +77,7 @@ Device::Device(const std::string& device_string) : Device(Type::CPU) {
try {
index_ = c10::stoi(match[2].str());
} catch (const std::exception &) {
AT_ERROR(
TORCH_CHECK(false,
"Could not parse device index '", match[2].str(),
"' in device string '", device_string, "'");
}

View File

@ -36,7 +36,7 @@ std::string DeviceTypeName(DeviceType d, bool lower_case) {
case DeviceType::XPU:
return lower_case ? "xpu" : "XPU";
default:
AT_ERROR(
TORCH_CHECK(false,
"Unknown device: ",
static_cast<int16_t>(d),
". If you have recently updated the caffe2.proto file to add a new "

View File

@ -35,7 +35,7 @@ inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
case at::kMkldnn:
return stream << "Mkldnn";
default:
AT_ERROR("Unknown layout");
TORCH_CHECK(false, "Unknown layout");
}
}

View File

@ -48,7 +48,7 @@ inline std::ostream& operator<<(
case MemoryFormat::ChannelsLast3d:
return stream << "ChannelsLast3d";
default:
AT_ERROR("Unknown memory format");
TORCH_CHECK(false, "Unknown memory format");
}
}

View File

@ -201,7 +201,7 @@ static inline size_t elementSize(ScalarType t) {
switch (t) {
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE)
default:
AT_ERROR("Unknown ScalarType");
TORCH_CHECK(false, "Unknown ScalarType");
}
#undef CASE_ELEMENTSIZE_CASE
}
@ -367,7 +367,7 @@ static inline ScalarType promoteTypes(ScalarType a, ScalarType b) {
}
if (isQIntType(a) || isQIntType(b)) {
AT_ERROR(
TORCH_CHECK(false,
"promoteTypes with quantized numbers is not handled yet; figure out what the correct rules should be, offending types: ",
toString(a),
" ",

View File

@ -134,7 +134,7 @@ struct C10_API Storage {
size_t capacity,
DeleterFnPtr d = nullptr) {
if (!storage_impl_.unique()) {
AT_ERROR(
TORCH_CHECK(false,
"UniqueStorageShareExternalPointer can only be called when use_count == 1");
}
storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d);
@ -144,7 +144,7 @@ struct C10_API Storage {
at::DataPtr&& data_ptr,
size_t capacity) {
if (!storage_impl_.unique()) {
AT_ERROR(
TORCH_CHECK(false,
"UniqueStorageShareExternalPointer can only be called when use_count == 1");
}
storage_impl_->UniqueStorageShareExternalPointer(

View File

@ -636,7 +636,7 @@ inline DispatchKey computeDispatchKey(c10::optional<ScalarType> dtype, c10::opti
case DeviceType::Metal:
return DispatchKey::Metal;
default:
AT_ERROR("Unsupported device type for dense layout: ", device_.type());
TORCH_CHECK(false, "Unsupported device type for dense layout: ", device_.type());
}
}
case Layout::Sparse:
@ -650,17 +650,17 @@ inline DispatchKey computeDispatchKey(c10::optional<ScalarType> dtype, c10::opti
case DeviceType::XPU:
return DispatchKey::SparseXPU;
default:
AT_ERROR("Unsupported device type for sparse layout: ", device_.type());
TORCH_CHECK(false, "Unsupported device type for sparse layout: ", device_.type());
}
case Layout::Mkldnn:
switch (device_.type()) {
case DeviceType::CPU:
return DispatchKey::MkldnnCPU;
default:
AT_ERROR("Unsupported device type for mkldnn layout: ", device_.type());
TORCH_CHECK(false, "Unsupported device type for mkldnn layout: ", device_.type());
}
default:
AT_ERROR("Unsupported layout: ", layout_);
TORCH_CHECK(false, "Unsupported layout: ", layout_);
}
}

View File

@ -10,11 +10,11 @@ UndefinedTensorImpl::UndefinedTensorImpl()
}
int64_t UndefinedTensorImpl::size(int64_t d) const {
AT_ERROR("size(dim) called on an undefined Tensor");
TORCH_CHECK(false, "size(dim) called on an undefined Tensor");
}
int64_t UndefinedTensorImpl::stride(int64_t d) const {
AT_ERROR("stride(dim) called on an undefined Tensor");
TORCH_CHECK(false, "stride(dim) called on an undefined Tensor");
}
#ifdef DEBUG
@ -25,11 +25,11 @@ bool UndefinedTensorImpl::has_storage() const {
#endif
void UndefinedTensorImpl::set_storage_offset(int64_t) {
AT_ERROR("set_storage_offset() called on an undefined Tensor");
TORCH_CHECK(false, "set_storage_offset() called on an undefined Tensor");
}
IntArrayRef UndefinedTensorImpl::strides() const {
AT_ERROR("strides() called on undefined Tensor");
TORCH_CHECK(false, "strides() called on undefined Tensor");
}
const char* UndefinedTensorImpl::tensorimpl_type_name() const {

View File

@ -603,7 +603,7 @@ class DeviceCachingAllocator {
} else if (&pool == &large_blocks) {
return StatType::LARGE_POOL;
} else {
AT_ERROR("get_stat_type_for_pool: invalid pool");
TORCH_CHECK(false, "get_stat_type_for_pool: invalid pool");
}
}
@ -614,7 +614,7 @@ class DeviceCachingAllocator {
} else if (block->pool == &large_blocks) {
return remaining > kSmallSize;
} else {
AT_ERROR("should_split: invalid pool");
TORCH_CHECK(false, "should_split: invalid pool");
}
}
@ -887,7 +887,7 @@ class THCCachingAllocator {
}
Block* block = get_allocated_block(ptr, true /* remove */);
if (!block) {
AT_ERROR("invalid device pointer: ", ptr);
TORCH_CHECK(false, "invalid device pointer: ", ptr);
}
device_allocator[block->device]->free(block);
}
@ -921,7 +921,7 @@ class THCCachingAllocator {
{
Block* block = get_allocated_block(ptr);
if (!block) {
AT_ERROR("invalid device pointer: ", ptr);
TORCH_CHECK(false, "invalid device pointer: ", ptr);
}
return device_allocator[block->device]->getBaseAllocation(block, outSize);
}

View File

@ -100,6 +100,6 @@ inline TempFile make_tempfile(std::string name_prefix = "torch-file-") {
if (auto tempfile = try_make_tempfile(std::move(name_prefix))) {
return *tempfile;
}
AT_ERROR("Error generating temporary file: ", std::strerror(errno));
TORCH_CHECK(false, "Error generating temporary file: ", std::strerror(errno));
}
} // namespace c10