Add Independent Memory Efficient and Flash Attention Build Flags (#107985)

# Summary
In an effort to simplify https://github.com/pytorch/pytorch/pull/105602, this PR pulls out independent chunks of code that can be landed prior to FlashV2 landing.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/107985
Approved by: https://github.com/cpuhrsch
This commit is contained in:
drisspg
2023-08-28 18:39:15 +00:00
committed by PyTorch MergeBot
parent f0c6e5c91f
commit 182a9cf366
8 changed files with 28 additions and 9 deletions

View File

@ -80,6 +80,7 @@ function(caffe2_print_configuration_summary)
message(STATUS " USE_CUSPARSELT : ${USE_CUSPARSELT}")
message(STATUS " CUDA version : ${CUDA_VERSION}")
message(STATUS " USE_FLASH_ATTENTION : ${USE_FLASH_ATTENTION}")
message(STATUS " USE_MEM_EFF_ATTENTION : ${USE_MEM_EFF_ATTENTION}")
if(${USE_CUDNN})
message(STATUS " cuDNN version : ${CUDNN_VERSION}")
endif()