mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[PowerPC] Disable MKLDNN TF32 on PowerPC to fix build failure (#163454)
The commits f4d8bc46c7706f872abcb4ec41f0b32207d5d826 added TF32 support for x86 CPUs, which causes build failures on PowerPC systems with mkldnn. This patch disables TF32 paths on PowerPC while keeping x86 TF32 support intact, allowing PyTorch to build successfully on PowerPC. I have run the mkldnn test case on PowerPC, and it passed successfully. `pytest test/test_mkldnn.py 87 passed, 2 skipped in 1709.02s (0:28:29` Pull Request resolved: https://github.com/pytorch/pytorch/pull/163454 Approved by: https://github.com/jgong5, https://github.com/malfet
This commit is contained in:
committed by
PyTorch MergeBot
parent
d0c32971b4
commit
eaeaa08e3a
@ -160,8 +160,12 @@ static bool mkldnn_conv_enabled_fpmath_mode_bf16(){
|
||||
}
|
||||
|
||||
static bool mkldnn_conv_enabled_fpmath_mode_tf32(){
|
||||
return at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::CONV) == at::Float32Precision::TF32 &&
|
||||
cpuinfo_has_x86_amx_fp16();
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
return at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::CONV) == at::Float32Precision::TF32 &&
|
||||
cpuinfo_has_x86_amx_fp16();
|
||||
#else
|
||||
return false; //TF32 not supported on power system
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline at::MemoryFormat mkldnn_convolution_memory_format(int64_t dims, bool is_channels_last) {
|
||||
|
@ -74,8 +74,12 @@ static bool use_mkldnn_bf32_linear() {
|
||||
}
|
||||
|
||||
static bool use_mkldnn_tf32_linear() {
|
||||
return at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::MATMUL) == at::Float32Precision::TF32 &&
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
return at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::MATMUL) == at::Float32Precision::TF32 &&
|
||||
cpuinfo_has_x86_amx_fp16();
|
||||
#else
|
||||
return false; // TF32 not supported on power system
|
||||
#endif
|
||||
}
|
||||
|
||||
Tensor mkldnn_linear(
|
||||
|
@ -114,8 +114,13 @@ static bool use_mkldnn_bf32_matmul() {
|
||||
return use_mkldnn_bf16_matmul() && at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::MATMUL) == at::Float32Precision::BF16;
|
||||
}
|
||||
|
||||
|
||||
static bool use_mkldnn_tf32_matmul() {
|
||||
return cpuinfo_has_x86_amx_fp16() && at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::MATMUL) == at::Float32Precision::TF32;
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
return cpuinfo_has_x86_amx_fp16() && at::globalContext().float32Precision(at::Float32Backend::MKLDNN, at::Float32Op::MATMUL) == at::Float32Precision::TF32;
|
||||
#else
|
||||
return false; // TF32 not supported on power system
|
||||
#endif
|
||||
}
|
||||
|
||||
// returns an ideep::tensor
|
||||
|
Reference in New Issue
Block a user