Unify caffe2 and libtorch build scripts on Windows (#18683)

Summary:
`scripts/build_windows.bat` is the original way to build caffe2 on Windows, but since it is merged into libtorch, the build scripts should be unified because they actually do the same thing except there are some different flags.

The follow-up is to add the tests. Looks like the CI job for caffe2 windows is defined [here](https://github.com/pytorch/ossci-job-dsl/blob/master/src/jobs/caffe2.groovy#L906). Could we make them a separate file, just like what we've done in `.jenkins/pytorch/win-build.sh`? There's a bunch of things we can do there, like using ninja and sccache to accelerate build.

cc orionr yf225
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18683

Differential Revision: D14730188

Pulled By: ezyang

fbshipit-source-id: ea287d7f213d66c49faac307250c31f9abeb0ebe
This commit is contained in:
peter
2019-04-05 07:44:43 -07:00
committed by Facebook Github Bot
parent 84068f43f2
commit 0829ef00dd
4 changed files with 37 additions and 37 deletions

View File

@ -5,11 +5,15 @@
#include <cuda_fp16.h>
#ifdef _WIN32
#if !defined(AT_CORE_STATIC_WINDOWS)
# if defined(ATen_cuda_EXPORTS) || defined(caffe2_gpu_EXPORTS) || defined(CAFFE2_CUDA_BUILD_MAIN_LIB)
# define AT_CUDA_API __declspec(dllexport)
# else
# define AT_CUDA_API __declspec(dllimport)
# endif
#else
# define AT_CUDA_API
#endif
#elif defined(__GNUC__)
#if defined(ATen_cuda_EXPORTS) || defined(caffe2_gpu_EXPORTS)
#define AT_CUDA_API __attribute__((__visibility__("default")))

View File

@ -3,6 +3,7 @@ if (NOT CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO)
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
find_package(Caffe2 REQUIRED)
set(BUILD_TEST ON)
option(BUILD_SHARED_LIBS "Build shared libs." ON)
endif()
if (BUILD_TEST)
@ -12,7 +13,7 @@ if (BUILD_TEST)
target_link_libraries(caffe2_module_test_dynamic caffe2_library)
install(TARGETS caffe2_module_test_dynamic DESTINATION lib)
if (MSVC)
if (MSVC AND BUILD_SHARED_LIBS)
install(FILES $<TARGET_PDB_FILE:caffe2_module_test_dynamic> DESTINATION lib OPTIONAL)
endif()
endif()

View File

@ -14,6 +14,18 @@ if NOT DEFINED BUILD_BINARY (
set BUILD_BINARY=OFF
)
if NOT DEFINED BUILD_SHARED_LIBS (
set BUILD_SHARED_LIBS=OFF
)
if NOT DEFINED BUILD_TORCH (
set BUILD_TORCH=OFF
)
IF NOT DEFINED BUILDING_WITH_TORCH_LIBS (
set BUILDING_WITH_TORCH_LIBS=OFF
)
if NOT DEFINED CAFFE2_STATIC_LINK_CUDA (
set CAFFE2_STATIC_LINK_CUDA=OFF
)
@ -22,6 +34,14 @@ if NOT DEFINED CMAKE_BUILD_TYPE (
set CMAKE_BUILD_TYPE=Release
)
if NOT DEFINED ONNX_NAMESPACE (
set ONNX_NAMESPACE=onnx_c2
)
if NOT DEFINED TORCH_CUDA_ARCH_LIST (
set TORCH_CUDA_ARCH_LIST=5.0
)
if NOT DEFINED USE_CUDA (
set USE_CUDA=OFF
)
@ -56,36 +76,10 @@ echo CAFFE2_ROOT=%CAFFE2_ROOT%
echo CMAKE_GENERATOR=%CMAKE_GENERATOR%
echo CMAKE_BUILD_TYPE=%CMAKE_BUILD_TYPE%
if not exist %CAFFE2_ROOT%\build mkdir %CAFFE2_ROOT%\build
cd %CAFFE2_ROOT%\build
:: Set up cmake. We will skip building the test files right now.
:: TODO: enable cuda support.
cmake ^
-G%CMAKE_GENERATOR% ^
-DBUILD_TEST=OFF ^
-DBUILD_BINARY=%BUILD_BINARY% ^
-DCMAKE_BUILD_TYPE=%CMAKE_BUILD_TYPE% ^
-DCAFFE2_STATIC_LINK_CUDA=%CAFFE2_STATIC_LINK_CUDA% ^
-DUSE_CUDA=%USE_CUDA% ^
-DTORCH_CUDA_ARCH_LIST=5.0 ^
-DUSE_NNPACK=OFF ^
-DUSE_CUB=OFF ^
-DUSE_GLOG=OFF ^
-DUSE_GFLAGS=OFF ^
-DUSE_LMDB=OFF ^
-DUSE_LEVELDB=OFF ^
-DUSE_OBSERVERS=%USE_OBSERVERS%^
-DUSE_ROCKSDB=OFF ^
-DUSE_OPENCV=OFF ^
-DBUILD_SHARED_LIBS=OFF ^
-DBUILD_PYTHON=OFF^
-DPYTHON_EXECUTABLE=python^
.. ^
|| goto :label_error
:: Actually run the build
cmake --build . --config %CMAKE_BUILD_TYPE% -- /maxcpucount:%NUMBER_OF_PROCESSORS% || goto :label_error
pushd %CAFFE2_ROOT%
python tools\build_libtorch.py || goto :label_error
popd
echo "Caffe2 built successfully"
cd %ORIGINAL_DIR%

View File

@ -160,10 +160,10 @@ def run_cmake(version,
PYTHON_EXECUTABLE=escape_path(sys.executable),
PYTHON_LIBRARY=escape_path(cmake_python_library),
PYTHON_INCLUDE_DIR=escape_path(distutils.sysconfig.get_python_inc()),
BUILDING_WITH_TORCH_LIBS="ON",
BUILDING_WITH_TORCH_LIBS=os.getenv("BUILDING_WITH_TORCH_LIBS", "ON"),
TORCH_BUILD_VERSION=version,
CMAKE_BUILD_TYPE=build_type,
BUILD_TORCH="ON",
BUILD_TORCH=os.getenv("BUILD_TORCH", "ON"),
BUILD_PYTHON=build_python,
BUILD_SHARED_LIBS=os.getenv("BUILD_SHARED_LIBS", "ON"),
BUILD_BINARY=check_env_flag('BUILD_BINARY'),
@ -219,7 +219,6 @@ def run_cmake(version,
cmake_defines(cmake_args,
CMAKE_C_COMPILER="{}/gcc".format(expected_wrapper),
CMAKE_CXX_COMPILER="{}/g++".format(expected_wrapper))
pprint(cmake_args)
for env_var_name in my_env:
if env_var_name.startswith('gh'):
# github env vars use utf-8, on windows, non-ascii code may
@ -236,6 +235,7 @@ def run_cmake(version,
# 1. https://cmake.org/cmake/help/latest/manual/cmake.1.html#synopsis
# 2. https://stackoverflow.com/a/27169347
cmake_args.append(base_dir)
pprint(cmake_args)
check_call(cmake_args, cwd=build_dir, env=my_env)
@ -258,17 +258,18 @@ def build_caffe2(version,
build_dir,
my_env)
if IS_WINDOWS:
build_cmd = ['cmake', '--build', '.', '--target', 'install', '--config', build_type, '--']
if USE_NINJA:
# sccache will fail if all cores are used for compiling
j = max(1, multiprocessing.cpu_count() - 1)
if max_jobs is not None:
j = min(int(max_jobs), j)
check_call(['cmake', '--build', '.', '--target', 'install', '--config', build_type, '--', '-j', str(j)],
cwd=build_dir, env=my_env)
build_cmd += ['-j', str(j)]
check_call(build_cmd, cwd=build_dir, env=my_env)
else:
j = max_jobs or str(multiprocessing.cpu_count())
check_call(['msbuild', 'INSTALL.vcxproj', '/p:Configuration={} /maxcpucount:{}'.format(build_type, j)],
cwd=build_dir, env=my_env)
build_cmd += ['/maxcpucount:{}'.format(j)]
check_call(build_cmd, cwd=build_dir, env=my_env)
else:
if USE_NINJA:
ninja_cmd = ['ninja', 'install']