Revert D20683972: [pytorch][PR] Fix PyTorch separate compilation

Test Plan: revert-hammer

Differential Revision:
D20683972

Original commit changeset: bc1492aa9d1d

fbshipit-source-id: 8994cbb36877d4338b8677ac6bc807dd16efa67c
This commit is contained in:
Edward Yang
2020-03-27 09:13:56 -07:00
committed by Facebook GitHub Bot
parent 16394a9d3f
commit 77ad3c5aeb
5 changed files with 2 additions and 30 deletions

View File

@ -32,7 +32,6 @@ if(INTERN_BUILD_ATEN_OPS)
# Add source, includes, and libs to lists
list(APPEND Caffe2_CPU_SRCS ${ATen_CPU_SRCS})
list(APPEND Caffe2_GPU_SRCS ${ATen_CUDA_SRCS})
list(APPEND Caffe2_GPU_SRCS_W_SORT_BY_KEY ${ATen_CUDA_SRCS_W_SORT_BY_KEY})
list(APPEND Caffe2_HIP_SRCS ${ATen_HIP_SRCS})
list(APPEND Caffe2_CPU_TEST_SRCS ${ATen_CPU_TEST_SRCS})
list(APPEND Caffe2_GPU_TEST_SRCS ${ATen_CUDA_TEST_SRCS})
@ -703,19 +702,7 @@ if(USE_ROCM)
endif()
elseif(USE_CUDA)
set(CUDA_LINK_LIBRARIES_KEYWORD PRIVATE)
if(CUDA_SEPARABLE_COMPILATION)
# Separate compilation fails when kernels using `thrust::sort_by_key`
# are linked with the rest of CUDA code. Workaround by linking the separateley
set(_generated_name "torch_cuda_w_sort_by_key_intermediate_link${CMAKE_C_OUTPUT_EXTENSION}")
set(torch_cuda_w_sort_by_key_link_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/torch_cuda.dir/${CMAKE_CFG_INTDIR}/${_generated_name}")
cuda_wrap_srcs(torch_cuda OBJ Caffe2_GPU_W_SORT_BY_KEY_OBJ ${Caffe2_GPU_SRCS_W_SORT_BY_KEY})
CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${torch_cuda_w_sort_by_key_link_file}" torch_cpu "${_options}" "${torch_cuda_SEPARABLE_COMPILATION_OBJECTS}")
set( torch_cuda_SEPARABLE_COMPILATION_OBJECTS )
# Pass compiled sort-by-key object + device-linked fatbin as extra dependencies of torch_cuda
cuda_add_library(torch_cuda ${Caffe2_GPU_SRCS} ${torch_cuda_w_sort_by_key_link_file} ${Caffe2_GPU_W_SORT_BY_KEY_OBJ})
else()
cuda_add_library(torch_cuda ${Caffe2_GPU_SRCS} ${Caffe2_GPU_SRCS_W_SORT_BY_KEY})
endif()
cuda_add_library(torch_cuda ${Caffe2_GPU_SRCS})
set(CUDA_LINK_LIBRARIES_KEYWORD)
torch_compile_options(torch_cuda) # see cmake/public/utils.cmake
if(USE_NCCL)