mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Forcing gcc ABI and safer bash scripts, v2 (#20540)
Summary: First time this was merged it broke master and was reverted. This time I do not add ```set -u``` to the .circleci/scripts/setup* scripts. There's still a chance that ```set -u``` breaks the binary builds on master, but at least those can be fixed in parallel and don't completely eliminate signal from all merges. Pull Request resolved: https://github.com/pytorch/pytorch/pull/20540 Differential Revision: D15373444 Pulled By: pjh5 fbshipit-source-id: 0203c20865827366ecd8fa07b2db74d255549ed1
This commit is contained in:
committed by
Facebook Github Bot
parent
66c6133264
commit
5821a76b8e
@ -139,6 +139,11 @@ setup_ci_environment: &setup_ci_environment
|
||||
no_output_timeout: "1h"
|
||||
command: ~/workspace/.circleci/scripts/setup_ci_environment.sh
|
||||
|
||||
# Installs expect and moreutils so that we can call `unbuffer` and `ts`.
|
||||
# Also installs OpenMP
|
||||
# !!!!NOTE!!!! this is copied into a binary_macos_brew_update job which is the
|
||||
# same but does not install libomp. If you are changing this, consider if you
|
||||
# need to change that step as well.
|
||||
macos_brew_update: &macos_brew_update
|
||||
name: Brew update and install moreutils, expect and libomp
|
||||
no_output_timeout: "1h"
|
||||
@ -154,21 +159,6 @@ macos_brew_update: &macos_brew_update
|
||||
brew install expect
|
||||
brew install libomp
|
||||
|
||||
# In version 2.1 and above we could make this a command and pass a parameter to
|
||||
# it, but in this version there is no way to pass a parameter to a step
|
||||
binary_macos_brew_update: &binary_macos_brew_update
|
||||
name: Brew update and install moreutils and expect
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
# moreutils installs a `parallel` executable by default, which conflicts
|
||||
# with the executable from the GNU `parallel`, so we must unlink GNU
|
||||
# `parallel` first, and relink it afterwards
|
||||
brew update
|
||||
brew unlink parallel
|
||||
brew install moreutils
|
||||
brew link parallel --overwrite
|
||||
brew install expect
|
||||
|
||||
|
||||
##############################################################################
|
||||
@ -387,6 +377,7 @@ caffe2_linux_test_defaults: &caffe2_linux_test_defaults
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_test_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Macos build defaults
|
||||
##############################################################################
|
||||
@ -498,13 +489,13 @@ caffe2_macos_build_defaults: &caffe2_macos_build_defaults
|
||||
# do not need both the pytorch and builder repos, so this is a little wasteful
|
||||
# (smoke tests and upload jobs do not need the pytorch repo).
|
||||
binary_checkout: &binary_checkout
|
||||
name: Checkout
|
||||
name: Checkout pytorch/builder repo
|
||||
command: ~/workspace/.circleci/scripts/binary_checkout.sh
|
||||
|
||||
# Parses circleci arguments in a consistent way, essentially routing to the
|
||||
# correct pythonXgccXcudaXos build we want
|
||||
binary_populate_env: &binary_populate_env
|
||||
name: Set up env
|
||||
name: Set up binary env variables
|
||||
command: ~/workspace/.circleci/scripts/binary_populate_env.sh
|
||||
|
||||
binary_install_miniconda: &binary_install_miniconda
|
||||
@ -521,6 +512,25 @@ binary_run_in_docker: &binary_run_in_docker
|
||||
# This step only runs on circleci linux machine executors that themselves
|
||||
# need to start docker images
|
||||
command: ~/workspace/.circleci/scripts/binary_run_in_docker.sh
|
||||
|
||||
# This is copied almost verbatim from the macos_brew_update job
|
||||
# In version 2.1 and above we could make this a command and pass a parameter to
|
||||
# it, but in this version there is no way to pass a parameter to a step
|
||||
binary_macos_brew_update: &binary_macos_brew_update
|
||||
name: Brew update and install moreutils and expect
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
# moreutils installs a `parallel` executable by default, which conflicts
|
||||
# with the executable from the GNU `parallel`, so we must unlink GNU
|
||||
# `parallel` first, and relink it afterwards
|
||||
brew update
|
||||
brew unlink parallel
|
||||
brew install moreutils
|
||||
brew link parallel --overwrite
|
||||
brew install expect
|
||||
|
||||
|
||||
# binary linux build defaults
|
||||
##############################################################################
|
||||
binary_linux_build: &binary_linux_build
|
||||
@ -535,14 +545,14 @@ binary_linux_build: &binary_linux_build
|
||||
- run:
|
||||
name: Install unbuffer and ts
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
source /env
|
||||
retry yum -q -y install epel-release
|
||||
retry yum -q -y install expect moreutils
|
||||
- run:
|
||||
name: Upgrade gcc version (based on env var)
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
source /env
|
||||
if [[ "$DESIRED_DEVTOOLSET" == 'devtoolset7' ]]; then
|
||||
source "/builder/upgrade_gcc_abi.sh"
|
||||
@ -550,6 +560,11 @@ binary_linux_build: &binary_linux_build
|
||||
# Env variables are not persisted into the next step
|
||||
echo "export PATH=$PATH" >> /env
|
||||
echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH" >> /env
|
||||
|
||||
# We need to set this variable manually because
|
||||
# https://github.com/pytorch/pytorch/blob/master/torch/abi-check.cpp
|
||||
# sets the ABI to 0 by default
|
||||
echo "export _GLIBCXX_USE_CXX11_ABI=1" >> /env
|
||||
else
|
||||
echo "Not upgrading gcc version"
|
||||
fi
|
||||
@ -611,6 +626,7 @@ binary_linux_upload: &binary_linux_upload
|
||||
no_output_timeout: "1h"
|
||||
command: ~/workspace/.circleci/scripts/binary_linux_upload.sh
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Macos binary build defaults
|
||||
# The root of everything is /Users/distiller/pytorch-ci-env/workspace
|
||||
@ -634,7 +650,7 @@ binary_mac_build: &binary_mac_build
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
@ -643,7 +659,7 @@ binary_mac_build: &binary_mac_build
|
||||
name: Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_test.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
@ -675,6 +691,8 @@ binary_mac_upload: &binary_mac_upload
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_upload.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
|
||||
# Nighlty build smoke tests defaults
|
||||
# These are the second-round smoke tests. These make sure that the binaries are
|
||||
# correct from a user perspective, testing that they exist from the cloud are
|
||||
@ -686,10 +704,14 @@ smoke_linux_test: &smoke_linux_test
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- attach_workspace:
|
||||
at: /home/circleci/project
|
||||
- run:
|
||||
<<: *setup_linux_system_environment
|
||||
- run:
|
||||
<<: *setup_ci_environment
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
@ -699,8 +721,7 @@ smoke_linux_test: &smoke_linux_test
|
||||
set -ex
|
||||
cat >/home/circleci/project/ci_test_script.sh <<EOL
|
||||
# The following code will be executed inside Docker container
|
||||
set -ex
|
||||
git clone https://github.com/pytorch/builder.git /builder
|
||||
set -eux -o pipefail
|
||||
/builder/smoke_test.sh
|
||||
# The above code will be executed inside Docker container
|
||||
EOL
|
||||
@ -713,18 +734,24 @@ smoke_mac_test: &smoke_mac_test
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- attach_workspace: # TODO - we can `cp` from ~/workspace
|
||||
at: /Users/distiller/project
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
<<: *binary_macos_brew_update
|
||||
- run:
|
||||
<<: *binary_install_miniconda
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
source "/Users/distiller/project/env"
|
||||
git clone https://github.com/pytorch/builder.git
|
||||
unbuffer ./builder/smoke_test.sh | ts
|
||||
|
||||
##############################################################################
|
||||
# Job specifications job specs
|
||||
##############################################################################
|
||||
@ -969,6 +996,7 @@ jobs:
|
||||
PYTHON_VERSION: "3.6"
|
||||
<<: *pytorch_linux_build_defaults
|
||||
|
||||
|
||||
setup:
|
||||
docker:
|
||||
- image: circleci/python:3.7.3
|
||||
@ -981,6 +1009,8 @@ jobs:
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths: .circleci/scripts
|
||||
|
||||
|
||||
pytorch_short_perf_test_gpu:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-short-perf-test-gpu
|
||||
@ -1183,6 +1213,7 @@ jobs:
|
||||
git submodule sync && git submodule update -q --init --recursive
|
||||
chmod a+x .jenkins/pytorch/macos-build.sh
|
||||
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
|
||||
|
||||
caffe2_py2_gcc4_8_ubuntu14_04_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build"
|
||||
@ -1326,6 +1357,7 @@ jobs:
|
||||
PYTHON_VERSION: "2"
|
||||
<<: *caffe2_macos_build_defaults
|
||||
|
||||
|
||||
# update_s3_htmls job
|
||||
# These jobs create html files for every cpu/cu## folder in s3. The html
|
||||
# files just store the names of all the files in that folder (which are
|
||||
@ -1364,7 +1396,7 @@ jobs:
|
||||
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env
|
||||
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
|
||||
source /home/circleci/project/env
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
@ -1413,7 +1445,7 @@ jobs:
|
||||
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env
|
||||
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
|
||||
source /home/circleci/project/env
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
# Not any awscli will work. Most won't. This one will work
|
||||
export PATH="$MINICONDA_ROOT/bin:$PATH"
|
||||
@ -1422,6 +1454,7 @@ jobs:
|
||||
pip install awscli==1.16.46
|
||||
|
||||
"/home/circleci/project/builder/cron/upload_binary_sizes.sh"
|
||||
|
||||
##############################################################################
|
||||
# Binary build specs individual job specifications
|
||||
##############################################################################
|
||||
@ -2037,6 +2070,7 @@ jobs:
|
||||
resource_class: gpu.medium
|
||||
<<: *binary_linux_test
|
||||
|
||||
|
||||
# There is currently no testing for libtorch TODO
|
||||
# binary_linux_libtorch_2.7m_cpu_test:
|
||||
# environment:
|
||||
@ -2942,6 +2976,7 @@ workflows:
|
||||
- pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
|
||||
requires:
|
||||
- setup
|
||||
|
||||
- caffe2_py2_gcc4_8_ubuntu14_04_build:
|
||||
requires:
|
||||
- setup
|
||||
@ -3051,6 +3086,7 @@ workflows:
|
||||
# requires:
|
||||
# - setup
|
||||
# - binary_linux_conda_3.6_cu90_build
|
||||
|
||||
##############################################################################
|
||||
# Daily smoke test trigger
|
||||
##############################################################################
|
||||
@ -3876,3 +3912,4 @@ workflows:
|
||||
context: org-member
|
||||
requires:
|
||||
- setup
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
# This step runs on multiple executors with different envfile locations
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
@ -20,13 +19,13 @@ export BUILDER_ROOT="$workdir/builder"
|
||||
# Clone the Pytorch branch
|
||||
git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT"
|
||||
pushd "$PYTORCH_ROOT"
|
||||
if [[ -n "$CIRCLE_PR_NUMBER" ]]; then
|
||||
if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
# "smoke" binary build on PRs
|
||||
git fetch --force origin "pull/${CIRCLE_PR_NUMBER}/head:remotes/origin/pull/${CIRCLE_PR_NUMBER}"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
git checkout -q -B "$CIRCLE_BRANCH"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
elif [[ -n "$CIRCLE_SHA1" ]]; then
|
||||
elif [[ -n "${CIRCLE_SHA1:-}" ]]; then
|
||||
# Scheduled workflows & "smoke" binary build on master on PR merges
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
git checkout -q -B master
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
# This step runs on multiple executors with different envfile locations
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
source "/Users/distiller/project/env"
|
||||
@ -22,10 +22,6 @@ chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
|
||||
# TODO we can probably remove the next two lines
|
||||
export PATH="$MINICONDA_ROOT/bin:$PATH"
|
||||
source "$MINICONDA_ROOT/bin/activate"
|
||||
|
||||
# We can't actually add miniconda to the PATH in the envfile, because that
|
||||
# breaks 'unbuffer' in Mac jobs. This is probably because conda comes with
|
||||
# a tclsh, which then gets inserted before the tclsh needed in /usr/bin
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "RUNNING ON $(uname -a) WITH $(nproc) CPUS AND $(free -m)"
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
source /env
|
||||
|
||||
# Defaults here so they can be changed in one place
|
||||
|
@ -3,7 +3,7 @@
|
||||
source /home/circleci/project/env
|
||||
cat >/home/circleci/project/ci_test_script.sh <<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
# Set up Python
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
@ -18,18 +18,21 @@ fi
|
||||
|
||||
# Install the package
|
||||
# These network calls should not have 'retry's because they are installing
|
||||
# locally
|
||||
# locally and aren't actually network calls
|
||||
pkg="/final_pkgs/\$(ls /final_pkgs)"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
conda install -y "\$pkg" --offline
|
||||
retry conda install -yq future numpy protobuf six
|
||||
else
|
||||
pip install "\$pkg"
|
||||
retry pip install -q future numpy protobuf six
|
||||
fi
|
||||
|
||||
# Test the package
|
||||
pushd /pytorch
|
||||
/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA"
|
||||
/builder/check_binary.sh
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
echo "Prepared script to run in next step"
|
||||
echo
|
||||
echo
|
||||
echo "The script that will run in the next step is:"
|
||||
cat /home/circleci/project/ci_test_script.sh
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Do NOT set -x
|
||||
source /home/circleci/project/env
|
||||
set -eu -o pipefail
|
||||
set +x
|
||||
declare -x "AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
declare -x "AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
@ -17,7 +18,7 @@ chmod +x /home/circleci/project/login_to_anaconda.sh
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
export PATH="$MINICONDA_ROOT/bin:$PATH"
|
||||
|
||||
# Upload the package to the final location
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/Users/distiller/project/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/bash
|
||||
# Do NOT set -x
|
||||
set -eu -o pipefail
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
@ -16,7 +17,7 @@ chmod +x /Users/distiller/project/login_to_anaconda.sh
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
|
@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
export TZ=UTC
|
||||
|
||||
# We need to write an envfile to persist these variables to following
|
||||
@ -24,7 +23,7 @@ configs=($BUILD_ENVIRONMENT)
|
||||
export PACKAGE_TYPE="${configs[0]}"
|
||||
export DESIRED_PYTHON="${configs[1]}"
|
||||
export DESIRED_CUDA="${configs[2]}"
|
||||
export DESIRED_DEVTOOLSET="${configs[3]}"
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
export BUILD_PYTHONLESS=1
|
||||
fi
|
||||
@ -63,8 +62,8 @@ echo "Running on $(uname -a) at $(date)"
|
||||
export PACKAGE_TYPE="$PACKAGE_TYPE"
|
||||
export DESIRED_PYTHON="$DESIRED_PYTHON"
|
||||
export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="$LIBTORCH_VARIANT"
|
||||
export BUILD_PYTHONLESS="$BUILD_PYTHONLESS"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
export DESIRED_DEVTOOLSET="$DESIRED_DEVTOOLSET"
|
||||
|
||||
export DATE="$DATE"
|
||||
@ -87,9 +86,9 @@ export BUILDER_ROOT="$workdir/builder"
|
||||
export MINICONDA_ROOT="$workdir/miniconda"
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs"
|
||||
|
||||
export CIRCLE_TAG="$CIRCLE_TAG"
|
||||
export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="$CIRCLE_PR_NUMBER"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
|
@ -9,13 +9,15 @@
|
||||
source /home/circleci/project/env
|
||||
echo "Running the following code in Docker"
|
||||
cat /home/circleci/project/ci_test_script.sh
|
||||
set -ex -o pipefail
|
||||
echo
|
||||
echo
|
||||
set -eux -o pipefail
|
||||
|
||||
# Expect actual code to be written to this file
|
||||
chmod +x /home/circleci/project/ci_test_script.sh
|
||||
|
||||
# Run the docker
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
export id=$(docker run --runtime=nvidia -t -d "${DOCKER_IMAGE}")
|
||||
else
|
||||
export id=$(docker run -t -d "${DOCKER_IMAGE}")
|
||||
|
@ -1,11 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
set -ex -o pipefail
|
||||
|
||||
# Check if we should actually run
|
||||
echo "BUILD_ENVIRONMENT: ${BUILD_ENVIRONMENT}"
|
||||
echo "CIRCLE_PULL_REQUEST: ${CIRCLE_PULL_REQUEST}"
|
||||
echo "CIRCLE_PULL_REQUEST: ${CIRCLE_PULL_REQUEST:-}"
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-slow-* ]]; then
|
||||
if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then
|
||||
if ! [ -z "${CIRCLE_PULL_REQUEST:-}" ]; then
|
||||
# It's a PR; test for [slow ci] tag on the TOPMOST commit
|
||||
topmost_commit=$(git log --format='%B' -n 1 HEAD)
|
||||
if !(echo $topmost_commit | grep -q -e '\[slow ci\]' -e '\[ci slow\]' -e '\[test slow\]' -e '\[slow test\]'); then
|
||||
@ -15,7 +15,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *-slow-* ]]; then
|
||||
fi
|
||||
fi
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
|
||||
if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then
|
||||
if ! [ -z "${CIRCLE_PULL_REQUEST:-}" ]; then
|
||||
# It's a PR; test for [xla ci] tag on the TOPMOST commit
|
||||
topmost_commit=$(git log --format='%B' -n 1 HEAD)
|
||||
if !(echo $topmost_commit | grep -q -e '\[xla ci\]' -e '\[ci xla\]' -e '\[test xla\]' -e '\[xla test\]'); then
|
||||
@ -28,7 +28,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
|
||||
fi
|
||||
fi
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *namedtensor* ]]; then
|
||||
if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then
|
||||
if ! [ -z "${CIRCLE_PULL_REQUEST:-}" ]; then
|
||||
# It's a PR; test for [namedtensor] tag on the TOPMOST commit
|
||||
topmost_commit=$(git log --format='%B' -n 1 HEAD)
|
||||
if !(echo $topmost_commit | grep -q -e '\[namedtensor\]' -e '\[ci namedtensor\]' -e '\[namedtensor ci\]'); then
|
||||
@ -74,7 +74,7 @@ sudo pkill -SIGHUP dockerd
|
||||
|
||||
sudo pip -q install awscli==1.16.35
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
@ -83,10 +83,10 @@ fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then
|
||||
echo "declare -x IN_CIRCLECI=1" > /home/circleci/project/env
|
||||
echo "declare -x COMMIT_SOURCE=${CIRCLE_BRANCH}" >> /home/circleci/project/env
|
||||
echo "declare -x PYTHON_VERSION=${PYTHON_VERSION}" >> /home/circleci/project/env
|
||||
echo "declare -x COMMIT_SOURCE=${CIRCLE_BRANCH:-}" >> /home/circleci/project/env
|
||||
echo "declare -x PYTHON_VERSION=${PYTHON_VERSION:-}" >> /home/circleci/project/env
|
||||
echo "declare -x SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> /home/circleci/project/env
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
echo "declare -x TORCH_CUDA_ARCH_LIST=5.2" >> /home/circleci/project/env
|
||||
fi
|
||||
export SCCACHE_MAX_JOBS=`expr $(nproc) - 1`
|
||||
@ -97,21 +97,21 @@ if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
|
||||
# This IAM user allows write access to S3 bucket for sccache & bazels3cache
|
||||
set +x
|
||||
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}" >> /home/circleci/project/env
|
||||
set -x
|
||||
else
|
||||
# This IAM user allows write access to S3 bucket for sccache
|
||||
set +x
|
||||
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}" >> /home/circleci/project/env
|
||||
set -x
|
||||
fi
|
||||
fi
|
||||
|
||||
# This IAM user only allows read-write access to ECR
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4}
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4:-}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4:-}
|
||||
eval $(aws ecr get-login --region us-east-1 --no-include-email)
|
||||
set -x
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
# Set up CircleCI GPG keys for apt, if needed
|
||||
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
# There is currently no testing for libtorch TODO
|
||||
# binary_linux_libtorch_2.7m_cpu_test:
|
||||
# environment:
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
# update_s3_htmls job
|
||||
# These jobs create html files for every cpu/cu## folder in s3. The html
|
||||
# files just store the names of all the files in that folder (which are
|
||||
@ -36,7 +37,7 @@
|
||||
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env
|
||||
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
|
||||
source /home/circleci/project/env
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
@ -85,7 +86,7 @@
|
||||
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env
|
||||
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
|
||||
source /home/circleci/project/env
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
|
||||
# Not any awscli will work. Most won't. This one will work
|
||||
export PATH="$MINICONDA_ROOT/bin:$PATH"
|
||||
@ -94,3 +95,4 @@
|
||||
pip install awscli==1.16.46
|
||||
|
||||
"/home/circleci/project/builder/cron/upload_binary_sizes.sh"
|
||||
|
||||
|
@ -139,6 +139,11 @@ setup_ci_environment: &setup_ci_environment
|
||||
no_output_timeout: "1h"
|
||||
command: ~/workspace/.circleci/scripts/setup_ci_environment.sh
|
||||
|
||||
# Installs expect and moreutils so that we can call `unbuffer` and `ts`.
|
||||
# Also installs OpenMP
|
||||
# !!!!NOTE!!!! this is copied into a binary_macos_brew_update job which is the
|
||||
# same but does not install libomp. If you are changing this, consider if you
|
||||
# need to change that step as well.
|
||||
macos_brew_update: &macos_brew_update
|
||||
name: Brew update and install moreutils, expect and libomp
|
||||
no_output_timeout: "1h"
|
||||
@ -154,18 +159,3 @@ macos_brew_update: &macos_brew_update
|
||||
brew install expect
|
||||
brew install libomp
|
||||
|
||||
# In version 2.1 and above we could make this a command and pass a parameter to
|
||||
# it, but in this version there is no way to pass a parameter to a step
|
||||
binary_macos_brew_update: &binary_macos_brew_update
|
||||
name: Brew update and install moreutils and expect
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
# moreutils installs a `parallel` executable by default, which conflicts
|
||||
# with the executable from the GNU `parallel`, so we must unlink GNU
|
||||
# `parallel` first, and relink it afterwards
|
||||
brew update
|
||||
brew unlink parallel
|
||||
brew install moreutils
|
||||
brew link parallel --overwrite
|
||||
brew install expect
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
pytorch_short_perf_test_gpu:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-short-perf-test-gpu
|
||||
@ -200,3 +201,4 @@
|
||||
git submodule sync && git submodule update -q --init --recursive
|
||||
chmod a+x .jenkins/pytorch/macos-build.sh
|
||||
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
setup:
|
||||
docker:
|
||||
- image: circleci/python:3.7.3
|
||||
@ -10,3 +11,4 @@
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths: .circleci/scripts
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
# binary linux build defaults
|
||||
##############################################################################
|
||||
binary_linux_build: &binary_linux_build
|
||||
@ -12,14 +13,14 @@ binary_linux_build: &binary_linux_build
|
||||
- run:
|
||||
name: Install unbuffer and ts
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
source /env
|
||||
retry yum -q -y install epel-release
|
||||
retry yum -q -y install expect moreutils
|
||||
- run:
|
||||
name: Upgrade gcc version (based on env var)
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
source /env
|
||||
if [[ "$DESIRED_DEVTOOLSET" == 'devtoolset7' ]]; then
|
||||
source "/builder/upgrade_gcc_abi.sh"
|
||||
@ -27,6 +28,11 @@ binary_linux_build: &binary_linux_build
|
||||
# Env variables are not persisted into the next step
|
||||
echo "export PATH=$PATH" >> /env
|
||||
echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH" >> /env
|
||||
|
||||
# We need to set this variable manually because
|
||||
# https://github.com/pytorch/pytorch/blob/master/torch/abi-check.cpp
|
||||
# sets the ABI to 0 by default
|
||||
echo "export _GLIBCXX_USE_CXX11_ABI=1" >> /env
|
||||
else
|
||||
echo "Not upgrading gcc version"
|
||||
fi
|
||||
@ -87,3 +93,4 @@ binary_linux_upload: &binary_linux_upload
|
||||
name: Upload
|
||||
no_output_timeout: "1h"
|
||||
command: ~/workspace/.circleci/scripts/binary_linux_upload.sh
|
||||
|
||||
|
@ -215,3 +215,4 @@ caffe2_linux_test_defaults: &caffe2_linux_test_defaults
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_test_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
|
@ -22,7 +22,7 @@ binary_mac_build: &binary_mac_build
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
@ -31,7 +31,7 @@ binary_mac_build: &binary_mac_build
|
||||
name: Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
set -eux -o pipefail
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_test.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
@ -63,3 +63,4 @@ binary_mac_upload: &binary_mac_upload
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_upload.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
|
@ -25,13 +25,13 @@
|
||||
# do not need both the pytorch and builder repos, so this is a little wasteful
|
||||
# (smoke tests and upload jobs do not need the pytorch repo).
|
||||
binary_checkout: &binary_checkout
|
||||
name: Checkout
|
||||
name: Checkout pytorch/builder repo
|
||||
command: ~/workspace/.circleci/scripts/binary_checkout.sh
|
||||
|
||||
# Parses circleci arguments in a consistent way, essentially routing to the
|
||||
# correct pythonXgccXcudaXos build we want
|
||||
binary_populate_env: &binary_populate_env
|
||||
name: Set up env
|
||||
name: Set up binary env variables
|
||||
command: ~/workspace/.circleci/scripts/binary_populate_env.sh
|
||||
|
||||
binary_install_miniconda: &binary_install_miniconda
|
||||
@ -48,3 +48,21 @@ binary_run_in_docker: &binary_run_in_docker
|
||||
# This step only runs on circleci linux machine executors that themselves
|
||||
# need to start docker images
|
||||
command: ~/workspace/.circleci/scripts/binary_run_in_docker.sh
|
||||
|
||||
# This is copied almost verbatim from the macos_brew_update job
|
||||
# In version 2.1 and above we could make this a command and pass a parameter to
|
||||
# it, but in this version there is no way to pass a parameter to a step
|
||||
binary_macos_brew_update: &binary_macos_brew_update
|
||||
name: Brew update and install moreutils and expect
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
# moreutils installs a `parallel` executable by default, which conflicts
|
||||
# with the executable from the GNU `parallel`, so we must unlink GNU
|
||||
# `parallel` first, and relink it afterwards
|
||||
brew update
|
||||
brew unlink parallel
|
||||
brew install moreutils
|
||||
brew link parallel --overwrite
|
||||
brew install expect
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
# Nighlty build smoke tests defaults
|
||||
# These are the second-round smoke tests. These make sure that the binaries are
|
||||
# correct from a user perspective, testing that they exist from the cloud are
|
||||
@ -9,10 +10,14 @@ smoke_linux_test: &smoke_linux_test
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- attach_workspace:
|
||||
at: /home/circleci/project
|
||||
- run:
|
||||
<<: *setup_linux_system_environment
|
||||
- run:
|
||||
<<: *setup_ci_environment
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
@ -22,8 +27,7 @@ smoke_linux_test: &smoke_linux_test
|
||||
set -ex
|
||||
cat >/home/circleci/project/ci_test_script.sh <<EOL
|
||||
# The following code will be executed inside Docker container
|
||||
set -ex
|
||||
git clone https://github.com/pytorch/builder.git /builder
|
||||
set -eux -o pipefail
|
||||
/builder/smoke_test.sh
|
||||
# The above code will be executed inside Docker container
|
||||
EOL
|
||||
@ -36,15 +40,21 @@ smoke_mac_test: &smoke_mac_test
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- attach_workspace: # TODO - we can `cp` from ~/workspace
|
||||
at: /Users/distiller/project
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
<<: *binary_macos_brew_update
|
||||
- run:
|
||||
<<: *binary_install_miniconda
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
source "/Users/distiller/project/env"
|
||||
git clone https://github.com/pytorch/builder.git
|
||||
unbuffer ./builder/smoke_test.sh | ts
|
||||
|
||||
|
@ -40,3 +40,4 @@
|
||||
# requires:
|
||||
# - setup
|
||||
# - binary_linux_conda_3.6_cu90_build
|
||||
|
||||
|
@ -10,3 +10,4 @@
|
||||
- pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
|
||||
requires:
|
||||
- setup
|
||||
|
||||
|
@ -27,3 +27,4 @@
|
||||
context: org-member
|
||||
requires:
|
||||
- setup
|
||||
|
||||
|
@ -16,6 +16,11 @@ set(CMAKE_CXX_STANDARD 11)
|
||||
if (NOT MSVC)
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
endif()
|
||||
if (DEFINED GLIBCXX_USE_CXX11_ABI)
|
||||
if (${GLIBCXX_USE_CXX11_ABI} EQUAL 1)
|
||||
set(CXX_STANDARD_REQUIRED ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
|
@ -709,23 +709,27 @@ if (NOT INTERN_BUILD_MOBILE)
|
||||
|
||||
# XXX This ABI check cannot be run with arm-linux-androideabi-g++
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
message(STATUS "${CMAKE_CXX_COMPILER} ${TORCH_SRC_DIR}/abi-check.cpp -o ${CMAKE_BINARY_DIR}/abi-check")
|
||||
execute_process(
|
||||
COMMAND
|
||||
"${CMAKE_CXX_COMPILER}"
|
||||
"${TORCH_SRC_DIR}/abi-check.cpp"
|
||||
"-o"
|
||||
"${CMAKE_BINARY_DIR}/abi-check"
|
||||
RESULT_VARIABLE ABI_CHECK_COMPILE_RESULT)
|
||||
if (ABI_CHECK_COMPILE_RESULT)
|
||||
message(FATAL_ERROR "Could not compile ABI Check: ${ABI_CHECK_COMPILE_RESULT}")
|
||||
endif()
|
||||
execute_process(
|
||||
COMMAND "${CMAKE_BINARY_DIR}/abi-check"
|
||||
RESULT_VARIABLE ABI_CHECK_RESULT
|
||||
OUTPUT_VARIABLE GLIBCXX_USE_CXX11_ABI)
|
||||
if (ABI_CHECK_RESULT)
|
||||
message(WARNING "Could not run ABI Check: ${ABI_CHECK_RESULT}")
|
||||
if (DEFINED GLIBCXX_USE_CXX11_ABI)
|
||||
message(STATUS "_GLIBCXX_USE_CXX11_ABI is already defined as a cmake variable")
|
||||
else()
|
||||
message(STATUS "${CMAKE_CXX_COMPILER} ${TORCH_SRC_DIR}/abi-check.cpp -o ${CMAKE_BINARY_DIR}/abi-check")
|
||||
execute_process(
|
||||
COMMAND
|
||||
"${CMAKE_CXX_COMPILER}"
|
||||
"${TORCH_SRC_DIR}/abi-check.cpp"
|
||||
"-o"
|
||||
"${CMAKE_BINARY_DIR}/abi-check"
|
||||
RESULT_VARIABLE ABI_CHECK_COMPILE_RESULT)
|
||||
if (ABI_CHECK_COMPILE_RESULT)
|
||||
message(FATAL_ERROR "Could not compile ABI Check: ${ABI_CHECK_COMPILE_RESULT}")
|
||||
endif()
|
||||
execute_process(
|
||||
COMMAND "${CMAKE_BINARY_DIR}/abi-check"
|
||||
RESULT_VARIABLE ABI_CHECK_RESULT
|
||||
OUTPUT_VARIABLE GLIBCXX_USE_CXX11_ABI)
|
||||
if (ABI_CHECK_RESULT)
|
||||
message(WARNING "Could not run ABI Check: ${ABI_CHECK_RESULT}")
|
||||
endif()
|
||||
endif()
|
||||
message(STATUS "Determined _GLIBCXX_USE_CXX11_ABI=${GLIBCXX_USE_CXX11_ABI}")
|
||||
endif()
|
||||
|
@ -213,6 +213,9 @@ def run_cmake(version,
|
||||
USE_GFLAGS=os.getenv('USE_GFLAGS'),
|
||||
WERROR=os.getenv('WERROR'))
|
||||
|
||||
if os.getenv('_GLIBCXX_USE_CXX11_ABI'):
|
||||
cmake_defines(cmake_args, GLIBCXX_USE_CXX11_ABI=os.getenv('_GLIBCXX_USE_CXX11_ABI'))
|
||||
|
||||
if os.getenv('USE_OPENMP'):
|
||||
cmake_defines(cmake_args, USE_OPENMP=check_env_flag('USE_OPENMP'))
|
||||
|
||||
|
Reference in New Issue
Block a user