x64: matmul: fix scales types check

This commit is contained in:
Ovchinnikov Dmitriy
2025-10-17 04:08:59 -07:00
committed by Dmitriy Ovchinnikov
parent b453bb09eb
commit 024dcf2913
2 changed files with 9 additions and 3 deletions

View File

@ -477,6 +477,11 @@ inline bool isa_has_bf16(cpu_isa_t isa) {
return is_superset(isa, avx512_core_bf16);
}
inline bool isa_has_f16(cpu_isa_t isa) {
return is_superset(isa, avx512_core_fp16)
|| is_superset(isa, avx10_1_512_amx_fp16);
}
inline bool isa_has_masks(cpu_isa_t isa) {
return is_superset(isa, avx512_core);
}

View File

@ -1414,9 +1414,10 @@ status_t init_brgemm_matmul_conf(cpu_isa_t isa, brgemm_matmul_conf_t &bgmmc,
bgmmc.with_wei_decompression),
VERBOSE_UNSUPPORTED_SCALES_CFG);
// AVX2 supports f32 scales only
VCONDCHECK_BG(IMPLICATION(one_of(isa, avx2, avx2_vnni, avx2_vnni_2),
bgmmc.wei_scales_dt == f32),
// Check if isa has support for f16/bf16 weights scales
VCONDCHECK_BG(IMPLICATION(bgmmc.wei_scales_dt == f16, isa_has_f16(isa))
&& IMPLICATION(
bgmmc.wei_scales_dt == bf16, isa_has_bf16(isa)),
VERBOSE_UNSUPPORTED_SCALES_CFG);
}