mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[Caffe2] Handle cpuinfo_initialize()
failure (#114011)
It can fail on ARM platform if `/sys` folder is not accessible. In that case, call `std:🧵:hardware_concurrency()`, which is aligned with the thread_pool initialization logic of `c10::TaskThreadPoolBase:defaultNumThreads()` Further addresses issue raised in https://github.com/pytorch/pytorch/issues/113568 Pull Request resolved: https://github.com/pytorch/pytorch/pull/114011 Approved by: https://github.com/kit1980 ghstack dependencies: #113771
This commit is contained in:
committed by
PyTorch MergeBot
parent
855a5cf427
commit
310e3060b7
@ -46,8 +46,13 @@ namespace {
|
||||
|
||||
size_t getDefaultNumThreads() {
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
CAFFE_ENFORCE(cpuinfo_initialize(), "cpuinfo initialization failed");
|
||||
int numThreads = cpuinfo_get_processors_count();
|
||||
auto numThreads = 1U;
|
||||
if (cpuinfo_initialize()) {
|
||||
numThreads = std::max(cpuinfo_get_processors_count(), 1U);
|
||||
} else {
|
||||
LOG(WARNING) << "cpuinfo initialization failed";
|
||||
numThreads = std::max(std::thread::hardware_concurrency(), 1U);
|
||||
}
|
||||
|
||||
bool applyCap = false;
|
||||
#if defined(C10_ANDROID)
|
||||
@ -101,7 +106,7 @@ size_t getDefaultNumThreads() {
|
||||
}
|
||||
}
|
||||
#else
|
||||
int numThreads = std::max<int>(std::thread::hardware_concurrency(), 1);
|
||||
auto numThreads = std::max(std::thread::hardware_concurrency(), 1U);
|
||||
#endif
|
||||
|
||||
if (FLAGS_pthreadpool_size) {
|
||||
@ -117,7 +122,7 @@ size_t getDefaultNumThreads() {
|
||||
* detect if we are running under tsan, for now capping the default
|
||||
* threadcount to the tsan limit unconditionally.
|
||||
*/
|
||||
int tsanThreadLimit = 63;
|
||||
auto tsanThreadLimit = 63U;
|
||||
numThreads = std::min(numThreads, tsanThreadLimit);
|
||||
|
||||
return numThreads;
|
||||
|
Reference in New Issue
Block a user