Update TensorPipe submodule (#42522)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/42522

Main changes:
- Consolidated CMake files to have a single entry point, rather than having a specialized one for PyTorch.
- Changed the way the preprocessor flags are provided, and changed their name.

There were a few instances in PyTorch's CMake files where we were directly adding TensorPipe's source directory as an include path, which however doesn't contain the auto-generated header we now added. We fix that by adding the `tensorpipe` CMake target as a dependency, so that the include paths defined by TensorPipe are used, which contain that auto-generated header. So instead we link those targets to the tensorpipe target in order for them to pick up the correct include directories.

I'm turning off SHM and CMA for now because they have never been covered by the CI. I'll enable them in a separate PR so that if they turn out to be flaky we can revert that change without reverting this one.

Test Plan: CI

Reviewed By: malfet

Differential Revision: D22959472

fbshipit-source-id: 1959a41c4a66ef78bf0f3bd5e3964969a2a1bf67
This commit is contained in:
Luca Wehrstedt
2020-08-06 02:11:57 -07:00
committed by Facebook GitHub Bot
parent bd458b7d02
commit c30bc6d4d7
8 changed files with 25 additions and 18 deletions

View File

@ -278,7 +278,6 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
add_library(process_group_agent "${TORCH_SRC_DIR}/csrc/distributed/rpc/process_group_agent.cpp" "${TORCH_SRC_DIR}/csrc/distributed/rpc/process_group_agent.h")
target_link_libraries(process_group_agent PRIVATE torch c10d fmt::fmt-header-only)
add_dependencies(process_group_agent torch c10d)
target_include_directories(process_group_agent PRIVATE ${TORCH_ROOT}/third_party/tensorpipe)
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)

View File

@ -46,7 +46,7 @@ endif()
# 3. If MSVC_Z7_OVERRIDE is ON, then /Zi and /ZI will be replaced with /Z7
# for Debug and RelWithDebInfo builds
if(MSVC)
foreach(flag_var
foreach(flag_var
CMAKE_C_FLAGS CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL)
if(${flag_var} MATCHES "/Z[iI7]")
@ -1307,18 +1307,13 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
if(MSVC)
message(WARNING "Tensorpipe cannot be used on Windows.")
else()
set(__PYTORCH_BUILD ${PYTORCH_BUILD})
set(PYTORCH_BUILD ON)
set(__BUILD_TESTING ${BUILD_TESTING})
set(BUILD_TESTING OFF)
set(TP_BUILD_PYTHON OFF)
set(TP_BUILD_LIBUV ON)
set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
set(TP_ENABLE_SHM OFF CACHE BOOL "" FORCE)
set(TP_ENABLE_CMA OFF CACHE BOOL "" FORCE)
set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
set(PYTORCH_BUILD ${__PYTORCH_BUILD})
set(BUILD_TESING ${__BUILD_TESTING})
list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
endif()
endif()

View File

@ -872,6 +872,7 @@ if __name__ == '__main__':
'share/cmake/Caffe2/Modules_CUDA_fix/upstream/*.cmake',
'share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/*.cmake',
'share/cmake/Gloo/*.cmake',
'share/cmake/Tensorpipe/*.cmake',
'share/cmake/Torch/*.cmake',
],
'caffe2': [

View File

@ -9,9 +9,8 @@ set(TORCH_RPC_TEST_SOURCES
add_executable(test_cpp_rpc ${TORCH_RPC_TEST_SOURCES})
target_include_directories(
test_cpp_rpc PRIVATE
${ATen_CPU_INCLUDE}
${TORCH_ROOT}/third_party/tensorpipe)
target_link_libraries(test_cpp_rpc PRIVATE torch c10d gtest process_group_agent)
${ATen_CPU_INCLUDE})
target_link_libraries(test_cpp_rpc PRIVATE torch c10d tensorpipe gtest process_group_agent)
if(USE_CUDA)
target_link_libraries(test_cpp_rpc PRIVATE

View File

@ -1,4 +1,5 @@
load("@rules_cc//cc:defs.bzl", "cc_library")
load("@//third_party:substitution.bzl", "template_rule")
LIBUV_COMMON_SRCS = [
"third_party/libuv/src/fs-poll.c",
@ -72,6 +73,16 @@ cc_proto_library(
deps = [":tensorpipe_proto_source"],
)
template_rule(
name = "tensorpipe_header_template",
src = "tensorpipe/tensorpipe.h.in",
out = "tensorpipe/tensorpipe.h",
substitutions = {
"cmakedefine01 TENSORPIPE_HAS_SHM_TRANSPORT": "define TENSORPIPE_HAS_SHM_TRANSPORT 0",
"cmakedefine01 TENSORPIPE_HAS_CMA_CHANNEL": "define TENSORPIPE_HAS_CMA_CHANNEL 0",
},
)
cc_library(
name = "tensorpipe",
srcs = glob(

View File

@ -70,7 +70,6 @@ set(TORCH_PYTHON_INCLUDE_DIRECTORIES
${TORCH_ROOT}/third_party/gloo
${TORCH_ROOT}/third_party/onnx
${TORCH_ROOT}/third_party/tensorpipe
${pybind11_INCLUDE_DIRS}
${TORCH_SRC_DIR}/csrc
@ -166,6 +165,9 @@ if(USE_DISTRIBUTED)
endif()
list(APPEND TORCH_PYTHON_LINK_LIBRARIES c10d)
list(APPEND TORCH_PYTHON_COMPILE_DEFINITIONS USE_C10D)
if(USE_TENSORPIPE)
list(APPEND TORCH_PYTHON_LINK_LIBRARIES tensorpipe)
endif()
endif()
endif()

View File

@ -87,7 +87,7 @@ std::unique_ptr<TransportRegistration> makeUvTransport() {
// libuv (https://github.com/libuv/libuv) in order to be cross-platform.
C10_REGISTER_CREATOR(TensorPipeTransportRegistry, uv, makeUvTransport);
#ifdef TP_ENABLE_SHM
#if TENSORPIPE_HAS_SHM_TRANSPORT
std::string createUniqueShmAddr() {
thread_local uint32_t threadLocalId = 0;
@ -125,7 +125,7 @@ std::unique_ptr<ChannelRegistration> makeBasicChannel() {
// transport to be used as a channel.
C10_REGISTER_CREATOR(TensorPipeChannelRegistry, basic, makeBasicChannel);
#ifdef TP_ENABLE_CMA
#if TENSORPIPE_HAS_CMA_CHANNEL
std::unique_ptr<ChannelRegistration> makeCmaChannel() {
auto context = std::make_shared<tensorpipe::channel::cma::Context>();