mirror of
https://github.com/deepspeedai/DeepSpeed.git
synced 2025-10-20 15:33:51 +08:00
Add cpu accelerator fp16 dtype support (#7207)
Add cpu accelerator fp16 dtype support --------- Signed-off-by: Lai, Yejing <yejing.lai@intel.com> Co-authored-by: Logan Adams <114770087+loadams@users.noreply.github.com>
This commit is contained in:
@ -229,10 +229,17 @@ class CPU_Accelerator(DeepSpeedAccelerator):
|
||||
return True
|
||||
|
||||
def is_fp16_supported(self):
|
||||
try:
|
||||
if torch.ops.mkldnn._is_mkldnn_fp16_supported():
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def supported_dtypes(self):
|
||||
return [torch.float, torch.bfloat16]
|
||||
supported_dtypes = [torch.float, torch.bfloat16]
|
||||
if self.is_fp16_supported():
|
||||
supported_dtypes.append(torch.float16)
|
||||
return supported_dtypes
|
||||
|
||||
# Graph operations
|
||||
def create_graph(self):
|
||||
|
Reference in New Issue
Block a user