mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix several typos (#83823)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/83823 Approved by: https://github.com/ngimel, https://github.com/kit1980
This commit is contained in:
committed by
PyTorch MergeBot
parent
7a348a1d4a
commit
041edeeecb
@ -200,7 +200,7 @@ inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
|
||||
// conditionally compile fragments of the case statements such
|
||||
// that the kernel functions are specialized only for the dtypes
|
||||
// that are needed. The NAME parameter *must* be a build time
|
||||
// cons char* (can't be std::string, etc...)
|
||||
// const char* (can't be std::string, etc...)
|
||||
//
|
||||
// Please ensure that the NAME is unique for every implementation
|
||||
// or you run the risk of over-including code for the kernel
|
||||
|
@ -103,7 +103,7 @@ struct ScalarTypeToCPPType;
|
||||
/* This is a workaround for the CUDA bug which prevents */ \
|
||||
/* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
|
||||
/* ambiguous reference which can't to be resolved. For some reason it */ \
|
||||
/* cant pick between at::detail and at::cuda::detail. */ \
|
||||
/* can't pick between at::detail and at::cuda::detail. */ \
|
||||
/* For repro example, please see: */ \
|
||||
/* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
|
||||
/* TODO: remove once the bug is fixed. */ \
|
||||
|
@ -108,7 +108,7 @@ PyObject* THPDTypeInfo_compare(THPDTypeInfo* a, THPDTypeInfo* b, int op) {
|
||||
|
||||
static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void*) {
|
||||
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers)
|
||||
int bits = elementSize(self->type) * 8;
|
||||
int64_t bits = elementSize(self->type) * 8;
|
||||
return THPUtils_packInt64(bits);
|
||||
}
|
||||
|
||||
@ -220,13 +220,10 @@ PyObject* THPFInfo_str(THPFInfo* self) {
|
||||
}
|
||||
|
||||
PyObject* THPIInfo_str(THPIInfo* self) {
|
||||
auto type = self->type;
|
||||
std::string primary_name, legacy_name;
|
||||
std::tie(primary_name, legacy_name) = torch::utils::getDtypeNames(type);
|
||||
std::ostringstream oss;
|
||||
|
||||
oss << "iinfo(min=" << PyFloat_AsDouble(THPIInfo_min(self, nullptr));
|
||||
oss << ", max=" << PyFloat_AsDouble(THPIInfo_max(self, nullptr));
|
||||
oss << "iinfo(min=" << PyLong_AsDouble(THPIInfo_min(self, nullptr));
|
||||
oss << ", max=" << PyLong_AsDouble(THPIInfo_max(self, nullptr));
|
||||
oss << ", dtype=" << PyUnicode_AsUTF8(THPIInfo_dtype(self, nullptr)) << ")";
|
||||
|
||||
return THPUtils_packString(oss.str().c_str());
|
||||
|
Reference in New Issue
Block a user