Move RPC agents to libtorch (#59939)

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/59939

Test Plan: CI

Reviewed By: mrshenli

Differential Revision: D28875276

fbshipit-source-id: f2f6970fd74de5f112636e78edaa4410c61d8c45
This commit is contained in:
Luca Wehrstedt
2021-06-15 16:16:54 -07:00
committed by Facebook GitHub Bot
parent 04ec122868
commit fc50f91929
14 changed files with 86 additions and 128 deletions

View File

@ -1726,7 +1726,7 @@ cc_library(
],
[
":aten",
"@tensorpipe",
"@tensorpipe//:tensorpipe_cpu",
],
),
alwayslink = True,

View File

@ -344,53 +344,6 @@ endif()
if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
if(USE_DISTRIBUTED)
# Define this target even if we're building without TensorPipe, to make life
# easier to other targets that depend on this. However, in that case, by not
# setting the USE_TENSORPIPE compile definition, this target will just end
# up being empty. Downstream targets should also add a #ifdef guard.
if(NOT WIN32)
add_library(process_group_agent
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/process_group_agent.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/process_group_agent.h"
)
target_link_libraries(process_group_agent PRIVATE torch fmt::fmt-header-only)
add_dependencies(process_group_agent torch)
if(USE_TENSORPIPE)
add_library(tensorpipe_agent
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/agent_utils.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/macros.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_agent.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_agent.h"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_cuda.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_utils.cpp"
"${TORCH_SRC_DIR}/csrc/distributed/rpc/tensorpipe_utils.h"
)
target_link_libraries(tensorpipe_agent PRIVATE torch tensorpipe fmt::fmt-header-only)
add_dependencies(tensorpipe_agent torch)
if(USE_CUDA)
target_compile_definitions(tensorpipe_agent PUBLIC USE_CUDA)
endif()
if(USE_ROCM)
target_compile_definitions(tensorpipe_agent PRIVATE
USE_ROCM
__HIP_PLATFORM_HCC__
)
endif()
target_compile_definitions(tensorpipe_agent PUBLIC USE_TENSORPIPE)
target_link_libraries(tensorpipe_agent PRIVATE tensorpipe)
add_dependencies(tensorpipe_agent tensorpipe)
endif()
endif()
endif()
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
# Generate files
@ -1236,7 +1189,7 @@ endif()
if(USE_DISTRIBUTED)
# Needed to support the inclusion of c10d/Foo.hpp headers.
target_include_directories(torch_cpu PUBLIC ${TORCH_SRC_DIR}/lib)
target_compile_definitions(torch_cpu PRIVATE USE_DISTRIBUTED)
target_compile_definitions(torch_cpu PUBLIC USE_DISTRIBUTED)
if(USE_GLOO AND USE_C10D_GLOO)
target_compile_definitions(torch_cpu PUBLIC USE_C10D_GLOO)
endif()
@ -1263,16 +1216,12 @@ if(USE_DISTRIBUTED)
# #if defined(USE_DISTRIBUTED) && !defined(_WIN32)
# need to be removed when RPC is supported
if(NOT WIN32)
target_compile_definitions(torch_cpu PRIVATE
USE_RPC
)
target_compile_definitions(torch_cpu PUBLIC USE_RPC)
endif()
# Pass USE_TENSORPIPE to torch_cpu as some parts of rpc/utils.cpp
# can only be compiled with USE_TENSORPIPE is set.
if(USE_TENSORPIPE)
target_compile_definitions(torch_cpu PRIVATE
USE_TENSORPIPE
)
target_compile_definitions(torch_cpu PUBLIC USE_TENSORPIPE)
endif()
endif()

View File

@ -1377,6 +1377,13 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
if(USE_CUDA)
list(APPEND Caffe2_CUDA_DEPENDENCY_LIBS tensorpipe_cuda)
elseif(USE_ROCM)
message(WARNING "TensorPipe doesn't yet support ROCm")
# Not yet...
# list(APPEND Caffe2_HIP_DEPENDENCY_LIBS tensorpipe_hip)
endif()
endif()
endif()

View File

@ -5,7 +5,7 @@ set(TORCH_RPC_TEST_SOURCES
${TORCH_RPC_TEST_DIR}/test_wire_serialization.cpp
)
set(TORCH_RPC_TEST_DEPENDENCY_LIBS
torch gtest process_group_agent
torch gtest
)
if(USE_GLOO)
@ -20,7 +20,7 @@ if(USE_TENSORPIPE)
${TORCH_RPC_TEST_DIR}/test_tensorpipe_serialization.cpp
)
list(APPEND TORCH_RPC_TEST_DEPENDENCY_LIBS
tensorpipe_agent tensorpipe
tensorpipe
)
endif()

View File

@ -71,63 +71,82 @@ cc_library(
)
header_template_rule(
name = "tensorpipe_config_header",
name = "tensorpipe_cpu_config_header",
src = "tensorpipe/config.h.in",
out = "tensorpipe/config.h",
substitutions = {
"#cmakedefine01 TENSORPIPE_HAS_SHM_TRANSPORT": "",
"#cmakedefine01 TENSORPIPE_HAS_CMA_CHANNEL": "",
"#cmakedefine01 TENSORPIPE_HAS_CUDA_IPC_CHANNEL": "",
"#cmakedefine01 TENSORPIPE_HAS_CUDA_GDR_CHANNEL": "",
"#cmakedefine01 TENSORPIPE_HAS_IBV_TRANSPORT": "",
"#cmakedefine01 TENSORPIPE_SUPPORTS_CUDA": "",
"#cmakedefine01 TENSORPIPE_HAS_SHM_TRANSPORT": "#define TENSORPIPE_HAS_SHM_TRANSPORT 1",
"#cmakedefine01 TENSORPIPE_HAS_IBV_TRANSPORT": "#define TENSORPIPE_HAS_IBV_TRANSPORT 1",
"#cmakedefine01 TENSORPIPE_HAS_CMA_CHANNEL": "#define TENSORPIPE_HAS_CMA_CHANNEL 1",
},
)
TENSORPIPE_HEADERS = glob([
"tensorpipe/*.h",
"tensorpipe/channel/*.h",
"tensorpipe/channel/*/*.h",
"tensorpipe/common/*.h",
"tensorpipe/core/*.h",
"tensorpipe/transport/*.h",
"tensorpipe/transport/*/*.h",
"tensorpipe/util/*/*.h",
])
header_template_rule(
name = "tensorpipe_cuda_config_header",
src = "tensorpipe/config_cuda.h.in",
out = "tensorpipe/config_cuda.h",
substitutions = {
"#cmakedefine01 TENSORPIPE_HAS_CUDA_IPC_CHANNEL": "#define TENSORPIPE_HAS_CUDA_IPC_CHANNEL 1",
"#cmakedefine01 TENSORPIPE_HAS_CUDA_GDR_CHANNEL": "#define TENSORPIPE_HAS_CUDA_GDR_CHANNEL 1",
},
)
TENSORPIPE_BASE_SRCS = glob([
"tensorpipe/*.cc",
"tensorpipe/channel/*.cc",
"tensorpipe/common/address.cc",
"tensorpipe/common/epoll_loop.cc",
"tensorpipe/common/error.cc",
"tensorpipe/common/fd.cc",
"tensorpipe/common/ibv.cc",
"tensorpipe/common/socket.cc",
"tensorpipe/common/system.cc",
"tensorpipe/core/*.cc",
"tensorpipe/transport/*.cc",
"tensorpipe/util/*/*.cc",
])
# We explicitly list the CUDA headers & sources, and we consider everything else
# as CPU (using a catch-all glob). This is both because there's fewer CUDA files
# (thus making it easier to list them exhaustively) and because it will make it
# more likely to catch a misclassified file: if we forget to mark a file as CUDA
# we'll try to build it on CPU and that's likely to fail.
TENSORPIPE_SRCS = TENSORPIPE_BASE_SRCS + glob([
"tensorpipe/channel/basic/*.cc",
"tensorpipe/channel/mpt/*.cc",
"tensorpipe/channel/xth/*.cc",
"tensorpipe/transport/uv/*.cc",
])
TENSORPIPE_CUDA_HEADERS = [
"tensorpipe/tensorpipe_cuda.h",
"tensorpipe/channel/cuda_basic/*.h",
"tensorpipe/channel/cuda_gdr/*.h",
"tensorpipe/channel/cuda_ipc/*.h",
"tensorpipe/channel/cuda_xth/*.h",
"tensorpipe/common/cuda.h",
"tensorpipe/common/cuda_buffer.h",
"tensorpipe/common/cuda_lib.h",
"tensorpipe/common/cuda_loop.h",
"tensorpipe/common/nvml_lib.h",
]
TENSORPIPE_SRCS_CUDA = TENSORPIPE_SRCS + glob([
"tensorpipe/common/cuda_loop.cc",
TENSORPIPE_CUDA_SOURCES = [
"tensorpipe/channel/cuda_basic/*.cc",
"tensorpipe/channel/cuda_gdr/*.cc",
"tensorpipe/channel/cuda_ipc/*.cc",
"tensorpipe/channel/cuda_xth/*.cc",
])
"tensorpipe/common/cuda_buffer.cc",
"tensorpipe/common/cuda_loop.cc",
]
TENSORPIPE_CPU_HEADERS = glob(
[
"tensorpipe/*.h",
"tensorpipe/channel/*.h",
"tensorpipe/channel/*/*.h",
"tensorpipe/common/*.h",
"tensorpipe/core/*.h",
"tensorpipe/transport/*.h",
"tensorpipe/transport/*/*.h",
],
exclude=TENSORPIPE_CUDA_HEADERS)
TENSORPIPE_CPU_SOURCES = glob(
[
"tensorpipe/*.cc",
"tensorpipe/channel/*.cc",
"tensorpipe/channel/*/*.cc",
"tensorpipe/common/*.cc",
"tensorpipe/core/*.cc",
"tensorpipe/transport/*.cc",
"tensorpipe/transport/*/*.cc",
],
exclude=TENSORPIPE_CUDA_SOURCES)
cc_library(
name = "tensorpipe",
srcs = TENSORPIPE_SRCS + [":tensorpipe_config_header"],
hdrs = TENSORPIPE_HEADERS,
name = "tensorpipe_cpu",
srcs = TENSORPIPE_CPU_SOURCES,
hdrs = TENSORPIPE_CPU_HEADERS + [":tensorpipe_cpu_config_header"],
includes = [
".",
],
@ -143,8 +162,8 @@ cc_library(
cc_library(
name = "tensorpipe_cuda",
srcs = TENSORPIPE_SRCS_CUDA + [":tensorpipe_config_header"],
hdrs = TENSORPIPE_HEADERS,
srcs = TENSORPIPE_CUDA_SOURCES,
hdrs = TENSORPIPE_CUDA_HEADERS + [":tensorpipe_cuda_config_header"],
includes = [
".",
],
@ -153,8 +172,7 @@ cc_library(
],
visibility = ["//visibility:public"],
deps = [
":libnop",
":libuv",
":tensorpipe_cpu",
"@cuda",
],
)

View File

@ -352,12 +352,14 @@ libtorch_distributed_extra_sources = [
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/message.cpp",
"torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp",
"torch/csrc/distributed/rpc/profiler/server_process_global_profiler.cpp",
"torch/csrc/distributed/rpc/python_call.cpp",
"torch/csrc/distributed/rpc/python_remote_call.cpp",
"torch/csrc/distributed/rpc/python_resp.cpp",
"torch/csrc/distributed/rpc/process_group_agent.cpp",
"torch/csrc/distributed/rpc/request_callback.cpp",
"torch/csrc/distributed/rpc/request_callback_no_python.cpp",
"torch/csrc/distributed/rpc/rpc_agent.cpp",
@ -367,6 +369,9 @@ libtorch_distributed_extra_sources = [
"torch/csrc/distributed/rpc/script_call.cpp",
"torch/csrc/distributed/rpc/script_remote_call.cpp",
"torch/csrc/distributed/rpc/script_resp.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_process_group_agent.cpp",
"torch/csrc/distributed/rpc/torchscript_functions.cpp",
"torch/csrc/distributed/rpc/types.cpp",
"torch/csrc/distributed/rpc/utils.cpp",
@ -522,6 +527,7 @@ libtorch_cuda_distributed_base_sources = [
# These files are only supported on Linux (and others) but not on Windows.
libtorch_cuda_distributed_extra_sources = [
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/lib/c10d/NCCLUtils.cpp",
"torch/lib/c10d/ProcessGroupNCCL.cpp",
]
@ -710,17 +716,11 @@ libtorch_python_distributed_core_sources = [
libtorch_python_distributed_sources = libtorch_python_distributed_core_sources + [
"torch/csrc/distributed/autograd/init.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/init.cpp",
"torch/csrc/distributed/rpc/process_group_agent.cpp",
"torch/csrc/distributed/rpc/py_rref.cpp",
"torch/csrc/distributed/rpc/python_functions.cpp",
"torch/csrc/distributed/rpc/python_rpc_handler.cpp",
"torch/csrc/distributed/rpc/request_callback_impl.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_process_group_agent.cpp",
"torch/csrc/distributed/rpc/testing/init.cpp",
"torch/csrc/distributed/rpc/unpickled_python_call.cpp",
"torch/csrc/distributed/rpc/unpickled_python_remote_call.cpp",

View File

@ -261,11 +261,9 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
endif()
if(USE_DISTRIBUTED)
list(APPEND TORCH_PYTHON_COMPILE_DEFINITIONS USE_DISTRIBUTED)
if(WIN32)
append_filelist("libtorch_python_distributed_core_sources" TORCH_PYTHON_SRCS)
else()
list(APPEND TORCH_PYTHON_COMPILE_DEFINITIONS USE_RPC)
append_filelist("libtorch_python_distributed_sources" TORCH_PYTHON_SRCS)
endif()
# Disable certain warnings for GCC-9.X
@ -274,10 +272,6 @@ if(USE_DISTRIBUTED)
set_source_files_properties(${TORCH_SRC_DIR}/csrc/distributed/rpc/testing/init.cpp PROPERTIES COMPILE_FLAGS "-Wno-cast-function-type")
set_source_files_properties(${TORCH_SRC_DIR}/csrc/distributed/c10d/init.cpp PROPERTIES COMPILE_FLAGS "-Wno-cast-function-type")
endif()
if(USE_TENSORPIPE)
list(APPEND TORCH_PYTHON_LINK_LIBRARIES tensorpipe)
list(APPEND TORCH_PYTHON_PUBLIC_COMPILE_DEFINITIONS USE_TENSORPIPE)
endif()
# NCCL is a private dependency of libtorch, but libtorch_python includes
# some private headers of libtorch, which in turn include NCCL. As a hacky
# alternative to making NCCL a public dependency of libtorch, we make it

View File

@ -1,5 +0,0 @@
#pragma once
#if defined(USE_CUDA) && !defined(__HIP_PLATFORM_HCC__)
#define USE_CUDA_NOT_ROCM
#endif

View File

@ -10,7 +10,6 @@
#include <tensorpipe/tensorpipe.h>
#include <torch/csrc/distributed/rpc/agent_utils.h>
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/tensorpipe_utils.h>
#include <torch/csrc/distributed/rpc/utils.h>

View File

@ -9,7 +9,6 @@
#include <c10d/PrefixStore.hpp>
#include <c10d/ProcessGroup.hpp>
#include <c10d/Store.hpp>
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
// Forward-declare the TensorPipe classes we need, to avoid including its

View File

@ -1,8 +1,7 @@
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/tensorpipe_agent.h>
#include <torch/csrc/distributed/rpc/tensorpipe_utils.h>
#if defined(USE_TENSORPIPE) && defined(USE_CUDA_NOT_ROCM)
#if defined(USE_TENSORPIPE) && !defined(__HIP_PLATFORM_HCC__)
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAGuard.h>

View File

@ -1,4 +1,3 @@
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/tensorpipe_utils.h>
#ifdef USE_TENSORPIPE

View File

@ -2,7 +2,6 @@
#ifdef USE_TENSORPIPE
#include <torch/csrc/distributed/rpc/macros.h>
#include <torch/csrc/distributed/rpc/utils.h>
namespace tensorpipe {