mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Add Independent Memory Efficient and Flash Attention Build Flags (#107985)
# Summary In an effort to simplify https://github.com/pytorch/pytorch/pull/105602, this PR pulls out independent chunks of code that can be landed prior to FlashV2 landing. Pull Request resolved: https://github.com/pytorch/pytorch/pull/107985 Approved by: https://github.com/cpuhrsch
This commit is contained in:
committed by
PyTorch MergeBot
parent
f0c6e5c91f
commit
182a9cf366
@ -80,6 +80,7 @@ function(caffe2_print_configuration_summary)
|
||||
message(STATUS " USE_CUSPARSELT : ${USE_CUSPARSELT}")
|
||||
message(STATUS " CUDA version : ${CUDA_VERSION}")
|
||||
message(STATUS " USE_FLASH_ATTENTION : ${USE_FLASH_ATTENTION}")
|
||||
message(STATUS " USE_MEM_EFF_ATTENTION : ${USE_MEM_EFF_ATTENTION}")
|
||||
if(${USE_CUDNN})
|
||||
message(STATUS " cuDNN version : ${CUDNN_VERSION}")
|
||||
endif()
|
||||
|
Reference in New Issue
Block a user