mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/24876 This contains very basic functionality of adding 'send' autograd function to our autograd graph. The purpose of this change is to validate the basic structure proposed here makes sense. Once this makes sense, we can build upon this to address more complicated scenarios. At a high level we've added the following functionality: 1) Define a very simple 'SendRpcBackwards' autograd function. 2) Attach this function to appropriate tensors when we call an RPC. 3) Store the send function in our distributed autograd context. ghstack-source-id: 89359708 Test Plan: unit tests. Differential Revision: D16903255 fbshipit-source-id: 6c04794a8e58b199795404225fd9da0c1440460e
28 lines
877 B
CMake
28 lines
877 B
CMake
set(DIST_AUTOGRAD_TEST_DIR "${TORCH_ROOT}/test/cpp/dist_autograd")
|
|
set(DIST_AUTOGRAD_TEST_SOURCES
|
|
${TORCH_ROOT}/test/cpp/common/main.cpp
|
|
${DIST_AUTOGRAD_TEST_DIR}/test_dist_autograd.cpp
|
|
)
|
|
|
|
add_executable(test_dist_autograd ${DIST_AUTOGRAD_TEST_SOURCES})
|
|
target_include_directories(test_dist_autograd PRIVATE ${ATen_CPU_INCLUDE})
|
|
target_link_libraries(test_dist_autograd PRIVATE torch gtest)
|
|
|
|
if (USE_CUDA)
|
|
target_link_libraries(test_dist_autograd PRIVATE
|
|
${CUDA_LIBRARIES}
|
|
${CUDA_NVRTC_LIB}
|
|
${CUDA_CUDA_LIB}
|
|
${TORCH_CUDA_LIBRARIES})
|
|
|
|
target_compile_definitions(test_dist_autograd PRIVATE "USE_CUDA")
|
|
endif()
|
|
|
|
if (INSTALL_TEST)
|
|
install(TARGETS test_dist_autograd DESTINATION bin)
|
|
# Install PDB files for MSVC builds
|
|
if (MSVC AND BUILD_SHARED_LIBS)
|
|
install(FILES $<TARGET_PDB_FILE:test_dist_autograd> DESTINATION bin OPTIONAL)
|
|
endif()
|
|
endif()
|