mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-29 19:24:55 +08:00
Compare commits
91 Commits
release/1.
...
v1.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 50c90a22be | |||
| 80ae6d294b | |||
| de394b672d | |||
| 92c6401bb9 | |||
| b4f32dd292 | |||
| 3e451b4796 | |||
| 036a591556 | |||
| 86d9ee8dee | |||
| aa44ffb4c9 | |||
| 162b054e39 | |||
| 7f044f7398 | |||
| 0c81d6ba4b | |||
| d1752f2bf8 | |||
| 49fbeb8cc8 | |||
| f0d3fc70b4 | |||
| fb489555a9 | |||
| b5144f1068 | |||
| a5c08a6abd | |||
| 6cc759269f | |||
| 6742476ba3 | |||
| 067aee5f30 | |||
| 0a7f7e6d30 | |||
| e9fc91cbca | |||
| 23df957e94 | |||
| a7b161c08b | |||
| 6bae48c127 | |||
| c248943743 | |||
| e058a37fe4 | |||
| aa7112a618 | |||
| b728ffabc3 | |||
| d67898a93b | |||
| 9a25673478 | |||
| 17613ad73c | |||
| beaae6a2b6 | |||
| 328f49968c | |||
| 7f9096f868 | |||
| 7e94ee235f | |||
| 318fb8e8b9 | |||
| 43bb1b2356 | |||
| 87fbd27cc0 | |||
| 225c38b719 | |||
| 8074526e7f | |||
| 06a866de94 | |||
| 9b22a55499 | |||
| f8d3eac4c3 | |||
| 68b4d22da7 | |||
| 66b73b0950 | |||
| 32eb3b8d7b | |||
| 5a2a34cd2d | |||
| a8083e18e8 | |||
| bc3fb36ed7 | |||
| e0822f1089 | |||
| cbfd4e05e9 | |||
| b9a2c8ac5c | |||
| 6d7a73c0da | |||
| 15e4827617 | |||
| 024fa34700 | |||
| 95d2c7fc98 | |||
| 7ba2baee00 | |||
| ccf3a6de3d | |||
| 1ba6fc4ca6 | |||
| d4d4bf5686 | |||
| cee965fae9 | |||
| 544c16cdbf | |||
| 494a5563b4 | |||
| ba4c3a1c2c | |||
| c556f9052f | |||
| 5c80dd3c1f | |||
| 2d8ee11139 | |||
| ebc2519bec | |||
| 8c9e4b250d | |||
| 6a6f047fc6 | |||
| deadc27c23 | |||
| 6276fda119 | |||
| 65ee8f2c23 | |||
| 6126cfab2c | |||
| e2f6fed611 | |||
| 667deb92f7 | |||
| 0e88de5580 | |||
| 3c8ce2a57e | |||
| f2080fb3f2 | |||
| 84afb7b0c1 | |||
| b6e976ae2d | |||
| 8626a1cc81 | |||
| 831566ec90 | |||
| f7b3b20457 | |||
| 8ce38cf27d | |||
| a94f9c7246 | |||
| 2fc3bb8571 | |||
| f694d4d872 | |||
| d7b6d945eb |
@ -340,12 +340,12 @@ Libtorch packages are built in the wheel build scripts: manywheel/build_*.sh for
|
||||
|
||||
All linux builds occur in docker images. The docker images are
|
||||
|
||||
* pytorch/conda-cuda
|
||||
* soumith/conda-cuda
|
||||
* Has ALL CUDA versions installed. The script pytorch/builder/conda/switch_cuda_version.sh sets /usr/local/cuda to a symlink to e.g. /usr/local/cuda-10.0 to enable different CUDA builds
|
||||
* Also used for cpu builds
|
||||
* pytorch/manylinux-cuda90
|
||||
* pytorch/manylinux-cuda92
|
||||
* pytorch/manylinux-cuda100
|
||||
* soumith/manylinux-cuda90
|
||||
* soumith/manylinux-cuda92
|
||||
* soumith/manylinux-cuda100
|
||||
* Also used for cpu builds
|
||||
|
||||
The Dockerfiles are available in pytorch/builder, but there is no circleci job or script to build these docker images, and they cannot be run locally (unless you have the correct local packages/paths). Only Soumith can build them right now.
|
||||
@ -411,7 +411,7 @@ You can build Linux binaries locally easily using docker.
|
||||
|
||||
```
|
||||
# Run the docker
|
||||
# Use the correct docker image, pytorch/conda-cuda used here as an example
|
||||
# Use the correct docker image, soumith/conda-cuda used here as an example
|
||||
#
|
||||
# -v path/to/foo:path/to/bar makes path/to/foo on your local machine (the
|
||||
# machine that you're running the command on) accessible to the docker
|
||||
@ -426,7 +426,7 @@ docker run \
|
||||
-v your/pytorch/repo:/pytorch \
|
||||
-v your/builder/repo:/builder \
|
||||
-v where/you/want/packages/to/appear:/final_pkgs \
|
||||
-it pytorch/conda-cuda /bin/bash
|
||||
-it soumith/conda-cuda /bin/bash
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
@ -466,7 +466,7 @@ But if you want to try, then I’d recommend
|
||||
# Always install miniconda 3, even if building for Python <3
|
||||
new_conda="~/my_new_conda"
|
||||
conda_sh="$new_conda/install_miniconda.sh"
|
||||
curl -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This module models the tree of configuration variants
|
||||
for "smoketest" builds.
|
||||
@ -34,14 +36,15 @@ def get_processor_arch_name(cuda_version):
|
||||
|
||||
LINUX_PACKAGE_VARIANTS = OrderedDict(
|
||||
manywheel=[
|
||||
"2.7m",
|
||||
"2.7mu",
|
||||
"3.5m",
|
||||
"3.6m",
|
||||
"3.7m",
|
||||
"3.8m",
|
||||
],
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7m",
|
||||
"2.7m",
|
||||
],
|
||||
)
|
||||
|
||||
@ -51,21 +54,11 @@ CONFIG_TREE_DATA = OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)),
|
||||
windows=(dimensions.CUDA_VERSIONS, OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
"2.7",
|
||||
],
|
||||
)),
|
||||
)
|
||||
|
||||
CONFIG_TREE_DATA_NO_WINDOWS = CONFIG_TREE_DATA.copy()
|
||||
CONFIG_TREE_DATA_NO_WINDOWS.pop("windows")
|
||||
|
||||
# GCC config variants:
|
||||
#
|
||||
# All the nightlies (except libtorch with new gcc ABI) are built with devtoolset7,
|
||||
@ -82,11 +75,6 @@ LINUX_GCC_CONFIG_VARIANTS = OrderedDict(
|
||||
],
|
||||
)
|
||||
|
||||
WINDOWS_LIBTORCH_CONFIG_VARIANTS = [
|
||||
"debug",
|
||||
"release",
|
||||
]
|
||||
|
||||
|
||||
class TopLevelNode(ConfigNode):
|
||||
def __init__(self, node_name, config_tree_data, smoke):
|
||||
@ -121,8 +109,6 @@ class PackageFormatConfigNode(ConfigNode):
|
||||
def get_children(self):
|
||||
if self.find_prop("os_name") == "linux":
|
||||
return [LinuxGccConfigNode(self, v) for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]]
|
||||
elif self.find_prop("os_name") == "windows" and self.find_prop("package_format") == "libtorch":
|
||||
return [WindowsLibtorchConfigNode(self, v) for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS]
|
||||
else:
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("cuda_versions")]
|
||||
|
||||
@ -144,16 +130,6 @@ class LinuxGccConfigNode(ConfigNode):
|
||||
return [ArchConfigNode(self, v) for v in cuda_versions]
|
||||
|
||||
|
||||
class WindowsLibtorchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, libtorch_config_variant):
|
||||
super(WindowsLibtorchConfigNode, self).__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant))
|
||||
|
||||
self.props["libtorch_config_variant"] = libtorch_config_variant
|
||||
|
||||
def get_children(self):
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("cuda_versions")]
|
||||
|
||||
|
||||
class ArchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, cu):
|
||||
super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(cu))
|
||||
|
||||
@ -1,12 +1,15 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.binary_build_data as binary_build_data
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.lib.visualization as visualization
|
||||
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, os, cuda_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant, libtorch_config_variant):
|
||||
def __init__(self, os, cuda_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant):
|
||||
|
||||
self.os = os
|
||||
self.cuda_version = cuda_version
|
||||
@ -15,19 +18,16 @@ class Conf(object):
|
||||
self.smoke = smoke
|
||||
self.libtorch_variant = libtorch_variant
|
||||
self.gcc_config_variant = gcc_config_variant
|
||||
self.libtorch_config_variant = libtorch_config_variant
|
||||
|
||||
def gen_build_env_parms(self):
|
||||
elems = [self.pydistro] + self.parms + [binary_build_data.get_processor_arch_name(self.cuda_version)]
|
||||
if self.gcc_config_variant is not None:
|
||||
elems.append(str(self.gcc_config_variant))
|
||||
if self.libtorch_config_variant is not None:
|
||||
elems.append(str(self.libtorch_config_variant))
|
||||
return elems
|
||||
|
||||
def gen_docker_image(self):
|
||||
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
|
||||
return miniutils.quote("pytorch/pytorch-binary-docker-image-ubuntu16.04:latest")
|
||||
return miniutils.quote("soumith/conda-cuda-cxx11-ubuntu1604:latest")
|
||||
|
||||
docker_word_substitution = {
|
||||
"manywheel": "manylinux",
|
||||
@ -36,21 +36,18 @@ class Conf(object):
|
||||
|
||||
docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)
|
||||
|
||||
# The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
|
||||
alt_docker_suffix = self.cuda_version or "102"
|
||||
# The cpu nightlies are built on the soumith/manylinux-cuda100 docker image
|
||||
alt_docker_suffix = self.cuda_version or "100"
|
||||
docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
|
||||
return miniutils.quote("pytorch/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
|
||||
return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
|
||||
|
||||
def get_name_prefix(self):
|
||||
return "smoke" if self.smoke else "binary"
|
||||
|
||||
def gen_build_name(self, build_or_test, nightly):
|
||||
def gen_build_name(self, build_or_test):
|
||||
|
||||
parts = [self.get_name_prefix(), self.os] + self.gen_build_env_parms()
|
||||
|
||||
if nightly:
|
||||
parts.append("nightly")
|
||||
|
||||
if self.libtorch_variant:
|
||||
parts.append(self.libtorch_variant)
|
||||
|
||||
@ -60,37 +57,17 @@ class Conf(object):
|
||||
joined = "_".join(parts)
|
||||
return joined.replace(".", "_")
|
||||
|
||||
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False):
|
||||
def gen_workflow_job(self, phase, upload_phase_dependency=None):
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.gen_build_name(phase, nightly)
|
||||
job_def["name"] = self.gen_build_name(phase)
|
||||
job_def["build_environment"] = miniutils.quote(" ".join(self.gen_build_env_parms()))
|
||||
job_def["requires"] = ["setup"]
|
||||
if self.smoke:
|
||||
job_def["requires"].append("update_s3_htmls_for_nightlies")
|
||||
job_def["requires"].append("update_s3_htmls_for_nightlies_devtoolset7")
|
||||
job_def["filters"] = {"branches": {"only": "postnightly"}}
|
||||
else:
|
||||
filter_branches = ["nightly"]
|
||||
# we only want to add the release branch filter if we aren't
|
||||
# uploading
|
||||
if phase not in ["upload"]:
|
||||
filter_branches.append(r"/release\/.*/")
|
||||
job_def["filters"] = {
|
||||
"branches": {
|
||||
"only": filter_branches
|
||||
},
|
||||
# Will run on tags like v1.5.0-rc1, etc.
|
||||
"tags": {
|
||||
# Using a raw string here to avoid having to escape
|
||||
# anything
|
||||
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
|
||||
}
|
||||
}
|
||||
job_def["filters"] = {"branches": {"only": "nightly"}}
|
||||
if self.libtorch_variant:
|
||||
job_def["libtorch_variant"] = miniutils.quote(self.libtorch_variant)
|
||||
if phase == "test":
|
||||
if not self.smoke:
|
||||
job_def["requires"].append(self.gen_build_name("build", nightly))
|
||||
job_def["requires"].append(self.gen_build_name("build"))
|
||||
if not (self.smoke and self.os == "macos"):
|
||||
job_def["docker_image"] = self.gen_docker_image()
|
||||
|
||||
@ -105,7 +82,7 @@ class Conf(object):
|
||||
job_def["resource_class"] = "gpu.medium"
|
||||
if phase == "upload":
|
||||
job_def["context"] = "org-member"
|
||||
job_def["requires"] = ["setup", self.gen_build_name(upload_phase_dependency, nightly)]
|
||||
job_def["requires"] = ["setup", self.gen_build_name(upload_phase_dependency)]
|
||||
|
||||
os_name = miniutils.override(self.os, {"macos": "mac"})
|
||||
job_name = "_".join([self.get_name_prefix(), os_name, phase])
|
||||
@ -113,18 +90,11 @@ class Conf(object):
|
||||
|
||||
def get_root(smoke, name):
|
||||
|
||||
if smoke:
|
||||
return binary_build_data.TopLevelNode(
|
||||
name,
|
||||
binary_build_data.CONFIG_TREE_DATA_NO_WINDOWS,
|
||||
smoke,
|
||||
)
|
||||
else:
|
||||
return binary_build_data.TopLevelNode(
|
||||
name,
|
||||
binary_build_data.CONFIG_TREE_DATA,
|
||||
smoke,
|
||||
)
|
||||
return binary_build_data.TopLevelNode(
|
||||
name,
|
||||
binary_build_data.CONFIG_TREE_DATA,
|
||||
smoke,
|
||||
)
|
||||
|
||||
|
||||
def gen_build_env_list(smoke):
|
||||
@ -142,7 +112,6 @@ def gen_build_env_list(smoke):
|
||||
c.find_prop("smoke"),
|
||||
c.find_prop("libtorch_variant"),
|
||||
c.find_prop("gcc_config_variant"),
|
||||
c.find_prop("libtorch_config_variant"),
|
||||
)
|
||||
newlist.append(conf)
|
||||
|
||||
@ -158,7 +127,7 @@ def get_nightly_uploads():
|
||||
mylist = []
|
||||
for conf in configs:
|
||||
phase_dependency = "test" if predicate_exclude_nonlinux_and_libtorch(conf) else "build"
|
||||
mylist.append(conf.gen_workflow_job("upload", phase_dependency, nightly=True))
|
||||
mylist.append(conf.gen_workflow_job("upload", phase_dependency))
|
||||
|
||||
return mylist
|
||||
|
||||
@ -169,25 +138,32 @@ def get_nightly_tests():
|
||||
|
||||
tests = []
|
||||
for conf_options in filtered_configs:
|
||||
yaml_item = conf_options.gen_workflow_job("test", nightly=True)
|
||||
yaml_item = conf_options.gen_workflow_job("test")
|
||||
tests.append(yaml_item)
|
||||
|
||||
return tests
|
||||
|
||||
|
||||
def get_jobs(toplevel_key, smoke):
|
||||
jobs_list = []
|
||||
def add_jobs_and_render(jobs_dict, toplevel_key, smoke, cron_schedule):
|
||||
|
||||
jobs_list = ["setup"]
|
||||
|
||||
configs = gen_build_env_list(smoke)
|
||||
phase = "build" if toplevel_key == "binarybuilds" else "test"
|
||||
for build_config in configs:
|
||||
jobs_list.append(build_config.gen_workflow_job(phase, nightly=True))
|
||||
jobs_list.append(build_config.gen_workflow_job(phase))
|
||||
|
||||
return jobs_list
|
||||
jobs_dict[toplevel_key] = OrderedDict(
|
||||
jobs=jobs_list,
|
||||
)
|
||||
|
||||
graph = visualization.generate_graph(get_root(smoke, toplevel_key))
|
||||
graph.draw(toplevel_key + "-config-dimensions.png", prog="twopi")
|
||||
|
||||
|
||||
def get_binary_build_jobs():
|
||||
return get_jobs("binarybuilds", False)
|
||||
def add_binary_build_jobs(jobs_dict):
|
||||
add_jobs_and_render(jobs_dict, "binarybuilds", False, "5 5 * * *")
|
||||
|
||||
|
||||
def get_binary_smoke_test_jobs():
|
||||
return get_jobs("binarysmoketests", True)
|
||||
def add_binary_smoke_test_jobs(jobs_dict):
|
||||
add_jobs_and_render(jobs_dict, "binarysmoketests", True, "15 16 * * *")
|
||||
|
||||
@ -1,12 +1,38 @@
|
||||
from cimodel.lib.conf_tree import ConfigNode, XImportant
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
|
||||
from cimodel.lib.conf_tree import Ver
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = [
|
||||
(Ver("ubuntu", "14.04"), [
|
||||
(Ver("gcc", "4.8"), [X("py2")]),
|
||||
(Ver("gcc", "4.9"), [X("py2")]),
|
||||
]),
|
||||
(Ver("ubuntu", "16.04"), [
|
||||
([Ver("clang", "7")], [XImportant("onnx_main_py3.6"),
|
||||
XImportant("onnx_ort1_py3.6"),
|
||||
XImportant("onnx_ort2_py3.6")]),
|
||||
(Ver("cuda", "9.0"), [
|
||||
# TODO make explicit that this is a "secret TensorRT build"
|
||||
# (see https://github.com/pytorch/pytorch/pull/17323#discussion_r259446749)
|
||||
# TODO Uh oh, were we supposed to make this one important?!
|
||||
X("py2"),
|
||||
XImportant("cmake"),
|
||||
]),
|
||||
(Ver("cuda", "10.1"), [XImportant("py3.5")]), # TensorRT 6 build
|
||||
(Ver("mkl"), [XImportant("py2")]),
|
||||
(Ver("gcc", "5"), [XImportant("onnx_py2")]),
|
||||
(Ver("clang", "3.8"), [X("py2")]),
|
||||
(Ver("clang", "3.9"), [X("py2")]),
|
||||
(Ver("clang", "7"), [XImportant("py2"), XImportant("onnx_py3.6")]),
|
||||
(Ver("android"), [XImportant("py2")]),
|
||||
]),
|
||||
(Ver("centos", "7"), [
|
||||
(Ver("cuda", "9.0"), [X("py2")]),
|
||||
]),
|
||||
(Ver("macos", "10.13"), [
|
||||
# TODO ios and system aren't related. system qualifies where the python comes
|
||||
# from (use the system python instead of homebrew or anaconda)
|
||||
(Ver("ios"), [X("py2")]),
|
||||
(Ver("system"), [XImportant("py2")]),
|
||||
]),
|
||||
]
|
||||
|
||||
@ -28,22 +54,15 @@ class TreeConfigNode(ConfigNode):
|
||||
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
|
||||
|
||||
def is_build_only(self):
|
||||
if str(self.find_prop("language_version")) == "onnx_main_py3.6" or \
|
||||
str(self.find_prop("language_version")) == "onnx_ort1_py3.6" or \
|
||||
str(self.find_prop("language_version")) == "onnx_ort2_py3.6":
|
||||
if str(self.find_prop("language_version")) == "onnx_py3.6":
|
||||
return False
|
||||
return set(str(c) for c in self.find_prop("compiler_version")).intersection({
|
||||
return str(self.find_prop("compiler_version")) in [
|
||||
"gcc4.9",
|
||||
"clang3.8",
|
||||
"clang3.9",
|
||||
"clang7",
|
||||
"android",
|
||||
}) or self.find_prop("distro_version").name == "macos"
|
||||
|
||||
def is_test_only(self):
|
||||
if str(self.find_prop("language_version")) == "onnx_ort1_py3.6" or \
|
||||
str(self.find_prop("language_version")) == "onnx_ort2_py3.6":
|
||||
return True
|
||||
return False
|
||||
] or self.find_prop("distro_version").name == "macos"
|
||||
|
||||
|
||||
class TopLevelNode(TreeConfigNode):
|
||||
@ -77,7 +96,6 @@ class LanguageConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["language_version"] = node_name
|
||||
self.props["build_only"] = self.is_build_only()
|
||||
self.props["test_only"] = self.is_test_only()
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.dimensions as dimensions
|
||||
@ -12,31 +14,23 @@ from dataclasses import dataclass
|
||||
|
||||
DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/"
|
||||
|
||||
DOCKER_IMAGE_VERSION = "345"
|
||||
DOCKER_IMAGE_VERSION = 324
|
||||
|
||||
|
||||
@dataclass
|
||||
class Conf:
|
||||
language: str
|
||||
distro: Ver
|
||||
# There could be multiple compiler versions configured (e.g. nvcc
|
||||
# for gpu files and host compiler (gcc/clang) for cpu files)
|
||||
compilers: [Ver]
|
||||
compiler: Ver
|
||||
build_only: bool
|
||||
test_only: bool
|
||||
is_important: bool
|
||||
|
||||
@property
|
||||
def compiler_names(self):
|
||||
return [c.name for c in self.compilers]
|
||||
|
||||
# TODO: Eventually we can probably just remove the cudnn7 everywhere.
|
||||
def get_cudnn_insertion(self):
|
||||
|
||||
omit = self.language == "onnx_main_py3.6" \
|
||||
or self.language == "onnx_ort1_py3.6" \
|
||||
or self.language == "onnx_ort2_py3.6" \
|
||||
or set(self.compiler_names).intersection({"android", "mkl", "clang"}) \
|
||||
omit = self.language == "onnx_py2" \
|
||||
or self.language == "onnx_py3.6" \
|
||||
or self.compiler.name in ["android", "mkl", "clang"] \
|
||||
or str(self.distro) in ["ubuntu14.04", "macos10.13"]
|
||||
|
||||
return [] if omit else ["cudnn7"]
|
||||
@ -48,17 +42,10 @@ class Conf:
|
||||
] + self.get_build_name_middle_parts()
|
||||
|
||||
def get_build_name_middle_parts(self):
|
||||
return [str(c) for c in self.compilers] + self.get_cudnn_insertion() + [str(self.distro)]
|
||||
return [str(self.compiler)] + self.get_cudnn_insertion() + [str(self.distro)]
|
||||
|
||||
def construct_phase_name(self, phase):
|
||||
root_parts = self.get_build_name_root_parts()
|
||||
|
||||
build_name_substitutions = {
|
||||
"onnx_ort1_py3.6": "onnx_main_py3.6",
|
||||
"onnx_ort2_py3.6": "onnx_main_py3.6",
|
||||
}
|
||||
if phase == "build":
|
||||
root_parts = [miniutils.override(r, build_name_substitutions) for r in root_parts]
|
||||
return "_".join(root_parts + [phase]).replace(".", "_")
|
||||
|
||||
def get_platform(self):
|
||||
@ -70,10 +57,9 @@ class Conf:
|
||||
def gen_docker_image(self):
|
||||
|
||||
lang_substitutions = {
|
||||
"onnx_main_py3.6": "py3.6",
|
||||
"onnx_ort1_py3.6": "py3.6",
|
||||
"onnx_ort2_py3.6": "py3.6",
|
||||
"cmake": "py3",
|
||||
"onnx_py2": "py2",
|
||||
"onnx_py3.6": "py3.6",
|
||||
"cmake": "py2",
|
||||
}
|
||||
|
||||
lang = miniutils.override(self.language, lang_substitutions)
|
||||
@ -83,10 +69,8 @@ class Conf:
|
||||
def gen_workflow_params(self, phase):
|
||||
parameters = OrderedDict()
|
||||
lang_substitutions = {
|
||||
"onnx_py3": "onnx-py3",
|
||||
"onnx_main_py3.6": "onnx-main-py3.6",
|
||||
"onnx_ort1_py3.6": "onnx-ort1-py3.6",
|
||||
"onnx_ort2_py3.6": "onnx-ort2-py3.6",
|
||||
"onnx_py2": "onnx-py2",
|
||||
"onnx_py3.6": "onnx-py3.6",
|
||||
}
|
||||
|
||||
lang = miniutils.override(self.language, lang_substitutions)
|
||||
@ -98,11 +82,11 @@ class Conf:
|
||||
|
||||
build_env_name = "-".join(parts)
|
||||
parameters["build_environment"] = miniutils.quote(build_env_name)
|
||||
if "ios" in self.compiler_names:
|
||||
if self.compiler.name == "ios":
|
||||
parameters["build_ios"] = miniutils.quote("1")
|
||||
if phase == "test":
|
||||
# TODO cuda should not be considered a compiler
|
||||
if "cuda" in self.compiler_names:
|
||||
if self.compiler.name == "cuda":
|
||||
parameters["use_cuda_docker_runtime"] = miniutils.quote("1")
|
||||
|
||||
if self.distro.name != "macos":
|
||||
@ -110,7 +94,7 @@ class Conf:
|
||||
if self.build_only:
|
||||
parameters["build_only"] = miniutils.quote("1")
|
||||
if phase == "test":
|
||||
resource_class = "large" if "cuda" not in self.compiler_names else "gpu.medium"
|
||||
resource_class = "large" if self.compiler.name != "cuda" else "gpu.medium"
|
||||
parameters["resource_class"] = resource_class
|
||||
|
||||
return parameters
|
||||
@ -127,7 +111,7 @@ class Conf:
|
||||
job_name = "caffe2_" + self.get_platform() + "_build"
|
||||
|
||||
if not self.is_important:
|
||||
job_def["filters"] = {"branches": {"only": ["master", r"/ci-all\/.*/", r"/release\/.*/"]}}
|
||||
job_def["filters"] = {"branches": {"only": ["master", r"/ci-all\/.*/"]}}
|
||||
job_def.update(self.gen_workflow_params(phase))
|
||||
return {job_name : job_def}
|
||||
|
||||
@ -143,12 +127,12 @@ def instantiate_configs():
|
||||
root = get_root()
|
||||
found_configs = conf_tree.dfs(root)
|
||||
for fc in found_configs:
|
||||
|
||||
c = Conf(
|
||||
language=fc.find_prop("language_version"),
|
||||
distro=fc.find_prop("distro_version"),
|
||||
compilers=fc.find_prop("compiler_version"),
|
||||
compiler=fc.find_prop("compiler_version"),
|
||||
build_only=fc.find_prop("build_only"),
|
||||
test_only=fc.find_prop("test_only"),
|
||||
is_important=fc.find_prop("important"),
|
||||
)
|
||||
|
||||
@ -161,13 +145,16 @@ def get_workflow_jobs():
|
||||
|
||||
configs = instantiate_configs()
|
||||
|
||||
# TODO Why don't we build this config?
|
||||
# See https://github.com/pytorch/pytorch/pull/17323#discussion_r259450540
|
||||
filtered_configs = filter(lambda x: not (str(x.distro) == "ubuntu14.04" and str(x.compiler) == "gcc4.9"), configs)
|
||||
|
||||
x = []
|
||||
for conf_options in configs:
|
||||
for conf_options in filtered_configs:
|
||||
|
||||
phases = ["build"]
|
||||
if not conf_options.build_only:
|
||||
phases = dimensions.PHASES
|
||||
if conf_options.test_only:
|
||||
phases = ["test"]
|
||||
|
||||
for phase in phases:
|
||||
x.append(conf_options.gen_workflow_job(phase))
|
||||
|
||||
@ -1,15 +1,18 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
PHASES = ["build", "test"]
|
||||
|
||||
CUDA_VERSIONS = [
|
||||
None, # cpu build
|
||||
"92",
|
||||
"100",
|
||||
"101",
|
||||
"102",
|
||||
]
|
||||
|
||||
STANDARD_PYTHON_VERSIONS = [
|
||||
"2.7",
|
||||
"3.5",
|
||||
"3.6",
|
||||
"3.7",
|
||||
"3.8"
|
||||
]
|
||||
|
||||
@ -1,26 +1,32 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = [
|
||||
("xenial", [
|
||||
(None, [
|
||||
X("3.5"),
|
||||
XImportant("2.7.9"),
|
||||
X("2.7"),
|
||||
XImportant("3.5"), # Not run on all PRs, but should be included on [test all]
|
||||
X("nightly"),
|
||||
]),
|
||||
("gcc", [
|
||||
("4.8", [X("3.6")]),
|
||||
("5.4", [ # All this subtree rebases to master and then build
|
||||
XImportant("3.6"),
|
||||
("3.6", [
|
||||
("parallel_tbb", [X(True)]),
|
||||
("parallel_native", [X(True)]),
|
||||
("namedtensor", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
# TODO: bring back libtorch test
|
||||
("7", [X("3.6")]),
|
||||
]),
|
||||
("clang", [
|
||||
("5", [
|
||||
XImportant("3.6"), # This is actually the ASAN build
|
||||
("3.6", [
|
||||
("namedtensor", [XImportant(True)]), # ASAN
|
||||
]),
|
||||
]),
|
||||
("7", [
|
||||
("3.6", [
|
||||
@ -37,16 +43,15 @@ CONFIG_TREE_DATA = [
|
||||
# and
|
||||
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
|
||||
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
|
||||
X("3.6"),
|
||||
]),
|
||||
("9.2", [X("3.6")]),
|
||||
("10.1", [X("3.6")]),
|
||||
("10.2", [
|
||||
X("2.7"),
|
||||
XImportant("3.6"),
|
||||
("3.6", [
|
||||
("libtorch", [XImportant(True)])
|
||||
("2.7", [
|
||||
("namedtensor", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
("9.2", [X("3.6")]),
|
||||
("10", [X("3.6")]),
|
||||
("10.1", [X("3.6")]),
|
||||
]),
|
||||
("android", [
|
||||
("r19c", [
|
||||
@ -124,9 +129,7 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
|
||||
next_nodes = {
|
||||
"xla": XlaConfigNode,
|
||||
"parallel_tbb": ParallelTBBConfigNode,
|
||||
"parallel_native": ParallelNativeConfigNode,
|
||||
"libtorch": LibTorchConfigNode,
|
||||
"namedtensor": NamedTensorConfigNode,
|
||||
"important": ImportantConfigNode,
|
||||
"android_abi": AndroidAbiConfigNode,
|
||||
}
|
||||
@ -143,32 +146,13 @@ class XlaConfigNode(TreeConfigNode):
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
class ParallelTBBConfigNode(TreeConfigNode):
|
||||
|
||||
class NamedTensorConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PARALLELTBB=" + str(label)
|
||||
return "NAMEDTENSOR=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["parallel_backend"] = "paralleltbb"
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
class ParallelNativeConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PARALLELNATIVE=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["parallel_backend"] = "parallelnative"
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
class LibTorchConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "BUILD_TEST_LIBTORCH=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_libtorch"] = node_name
|
||||
self.props["is_namedtensor"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.pytorch_build_data import TopLevelNode, CONFIG_TREE_DATA
|
||||
@ -13,7 +15,7 @@ DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"
|
||||
|
||||
# ARE YOU EDITING THIS NUMBER? MAKE SURE YOU READ THE GUIDANCE AT THE
|
||||
# TOP OF .circleci/config.yml
|
||||
DOCKER_IMAGE_VERSION = "f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
DOCKER_IMAGE_VERSION = 347
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -31,9 +33,8 @@ class Conf:
|
||||
gpu_resource: Optional[str] = None
|
||||
dependent_tests: List = field(default_factory=list)
|
||||
parent_build: Optional['Conf'] = None
|
||||
is_libtorch: bool = False
|
||||
is_namedtensor: bool = False
|
||||
is_important: bool = False
|
||||
parallel_backend: Optional[str] = None
|
||||
|
||||
# TODO: Eliminate the special casing for docker paths
|
||||
# In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
|
||||
@ -46,10 +47,8 @@ class Conf:
|
||||
leading.append("pytorch")
|
||||
if self.is_xla and not for_docker:
|
||||
leading.append("xla")
|
||||
if self.is_libtorch and not for_docker:
|
||||
leading.append("libtorch")
|
||||
if self.parallel_backend is not None and not for_docker:
|
||||
leading.append(self.parallel_backend)
|
||||
if self.is_namedtensor and not for_docker:
|
||||
leading.append("namedtensor")
|
||||
|
||||
cuda_parms = []
|
||||
if self.cuda_version:
|
||||
@ -114,7 +113,7 @@ class Conf:
|
||||
if not self.is_important:
|
||||
# If you update this, update
|
||||
# caffe2_build_definitions.py too
|
||||
job_def["filters"] = {"branches": {"only": ["master", r"/ci-all\/.*/", r"/release\/.*/"]}}
|
||||
job_def["filters"] = {"branches": {"only": ["master", r"/ci-all\/.*/"]}}
|
||||
job_def.update(self.gen_workflow_params(phase))
|
||||
|
||||
return {job_name : job_def}
|
||||
@ -160,12 +159,7 @@ def gen_dependent_configs(xenial_parent_config):
|
||||
|
||||
configs.append(c)
|
||||
|
||||
return configs
|
||||
|
||||
def gen_docs_configs(xenial_parent_config):
|
||||
configs = []
|
||||
|
||||
for x in ["pytorch_python_doc_push", "pytorch_cpp_doc_push"]:
|
||||
for x in ["pytorch_short_perf_test_gpu", "pytorch_python_doc_push", "pytorch_cpp_doc_push"]:
|
||||
configs.append(HiddenConf(x, parent_build=xenial_parent_config))
|
||||
|
||||
return configs
|
||||
@ -226,13 +220,12 @@ def instantiate_configs():
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
|
||||
if cuda_version in ["9.2", "10", "10.1", "10.2"]:
|
||||
if cuda_version in ["9.2", "10", "10.1"]:
|
||||
# TODO The gcc version is orthogonal to CUDA version?
|
||||
parms_list.append("gcc7")
|
||||
|
||||
is_libtorch = fc.find_prop("is_libtorch") or False
|
||||
is_namedtensor = fc.find_prop("is_namedtensor") or False
|
||||
is_important = fc.find_prop("is_important") or False
|
||||
parallel_backend = fc.find_prop("parallel_backend") or None
|
||||
|
||||
gpu_resource = None
|
||||
if cuda_version and cuda_version != "10":
|
||||
@ -247,33 +240,22 @@ def instantiate_configs():
|
||||
is_xla,
|
||||
restrict_phases,
|
||||
gpu_resource,
|
||||
is_libtorch=is_libtorch,
|
||||
is_namedtensor=is_namedtensor,
|
||||
is_important=is_important,
|
||||
parallel_backend=parallel_backend,
|
||||
)
|
||||
|
||||
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
|
||||
# should run on a CPU-only build that runs on all PRs.
|
||||
if distro_name == 'xenial' and fc.find_prop("pyver") == '3.6' \
|
||||
and cuda_version is None \
|
||||
and parallel_backend is None \
|
||||
and compiler_name == 'gcc' \
|
||||
and fc.find_prop('compiler_version') == '5.4':
|
||||
c.dependent_tests = gen_docs_configs(c)
|
||||
|
||||
if cuda_version == "10.1" and python_version == "3.6" and not is_libtorch:
|
||||
if cuda_version == "9" and python_version == "3.6":
|
||||
c.dependent_tests = gen_dependent_configs(c)
|
||||
|
||||
if (compiler_name == "gcc"
|
||||
and compiler_version == "5.4"
|
||||
and not is_libtorch
|
||||
and parallel_backend is None):
|
||||
and not is_namedtensor):
|
||||
bc_breaking_check = Conf(
|
||||
"backward-compatibility-check",
|
||||
[],
|
||||
is_xla=False,
|
||||
restrict_phases=["test"],
|
||||
is_libtorch=False,
|
||||
is_namedtensor=False,
|
||||
is_important=True,
|
||||
parent_build=c,
|
||||
)
|
||||
@ -288,7 +270,7 @@ def get_workflow_jobs():
|
||||
|
||||
config_list = instantiate_configs()
|
||||
|
||||
x = []
|
||||
x = ["setup"]
|
||||
for conf_options in config_list:
|
||||
|
||||
phases = conf_options.restrict_phases or dimensions.PHASES
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Dict
|
||||
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
def quote(s):
|
||||
return sandwich('"', s)
|
||||
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This module encapsulates dependencies on pygraphviz
|
||||
"""
|
||||
|
||||
8140
.circleci/config.yml
8140
.circleci/config.yml
File diff suppressed because it is too large
Load Diff
@ -1,19 +0,0 @@
|
||||
# Docker images for Jenkins
|
||||
|
||||
This directory contains everything needed to build the Docker images
|
||||
that are used in our CI
|
||||
|
||||
The Dockerfiles located in subdirectories are parameterized to
|
||||
conditionally run build stages depending on build arguments passed to
|
||||
`docker build`. This lets us use only a few Dockerfiles for many
|
||||
images. The different configurations are identified by a freeform
|
||||
string that we call a _build environment_. This string is persisted in
|
||||
each image as the `BUILD_ENVIRONMENT` environment variable.
|
||||
|
||||
See `build.sh` for valid build environments (it's the giant switch).
|
||||
|
||||
## Contents
|
||||
|
||||
* `build.sh` -- dispatch script to launch all builds
|
||||
* `common` -- scripts used to execute individual Docker build stages
|
||||
* `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
|
||||
@ -1 +0,0 @@
|
||||
<manifest package="org.pytorch.deps" />
|
||||
@ -1,68 +0,0 @@
|
||||
buildscript {
|
||||
ext {
|
||||
minSdkVersion = 21
|
||||
targetSdkVersion = 28
|
||||
compileSdkVersion = 28
|
||||
buildToolsVersion = '28.0.3'
|
||||
|
||||
coreVersion = "1.2.0"
|
||||
extJUnitVersion = "1.1.1"
|
||||
runnerVersion = "1.2.0"
|
||||
rulesVersion = "1.2.0"
|
||||
junitVersion = "4.12"
|
||||
}
|
||||
|
||||
repositories {
|
||||
google()
|
||||
mavenLocal()
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:3.3.2'
|
||||
classpath "com.jfrog.bintray.gradle:gradle-bintray-plugin:1.8.0"
|
||||
classpath "com.github.dcendents:android-maven-gradle-plugin:2.1"
|
||||
classpath "org.jfrog.buildinfo:build-info-extractor-gradle:4.9.8"
|
||||
}
|
||||
}
|
||||
|
||||
repositories {
|
||||
google()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
apply plugin: 'com.android.library'
|
||||
|
||||
android {
|
||||
compileSdkVersion rootProject.compileSdkVersion
|
||||
buildToolsVersion rootProject.buildToolsVersion
|
||||
|
||||
defaultConfig {
|
||||
minSdkVersion minSdkVersion
|
||||
targetSdkVersion targetSdkVersion
|
||||
}
|
||||
|
||||
sourceSets {
|
||||
main {
|
||||
manifest.srcFile 'AndroidManifest.xml'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
implementation 'com.android.support:appcompat-v7:28.0.0'
|
||||
implementation 'androidx.appcompat:appcompat:1.0.0'
|
||||
implementation 'com.facebook.fbjni:fbjni-java-only:0.0.3'
|
||||
implementation 'com.google.code.findbugs:jsr305:3.0.1'
|
||||
implementation 'com.facebook.soloader:nativeloader:0.8.0'
|
||||
|
||||
implementation 'junit:junit:' + rootProject.junitVersion
|
||||
implementation 'androidx.test:core:' + rootProject.coreVersion
|
||||
|
||||
implementation 'junit:junit:' + rootProject.junitVersion
|
||||
implementation 'androidx.test:core:' + rootProject.coreVersion
|
||||
implementation 'androidx.test.ext:junit:' + rootProject.extJUnitVersion
|
||||
implementation 'androidx.test:rules:' + rootProject.rulesVersion
|
||||
implementation 'androidx.test:runner:' + rootProject.runnerVersion
|
||||
}
|
||||
@ -1,281 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
image="$1"
|
||||
shift
|
||||
|
||||
if [ -z "${image}" ]; then
|
||||
echo "Usage: $0 IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Generalize
|
||||
OS="ubuntu"
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
if [[ "$image" == *-cuda* ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
fi
|
||||
|
||||
if [[ "$image" == *-trusty* ]]; then
|
||||
UBUNTU_VERSION=14.04
|
||||
elif [[ "$image" == *-xenial* ]]; then
|
||||
UBUNTU_VERSION=16.04
|
||||
elif [[ "$image" == *-artful* ]]; then
|
||||
UBUNTU_VERSION=17.10
|
||||
elif [[ "$image" == *-bionic* ]]; then
|
||||
UBUNTU_VERSION=18.04
|
||||
fi
|
||||
|
||||
TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/14.04/x86_64"
|
||||
|
||||
# It's annoying to rename jobs every time you want to rewrite a
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$image" in
|
||||
pytorch-linux-bionic-clang9-thrift-llvmdev)
|
||||
CLANG_VERSION=9
|
||||
THRIFT=yes
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py2.7.9)
|
||||
TRAVIS_PYTHON_VERSION=2.7.9
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py2.7)
|
||||
TRAVIS_PYTHON_VERSION=2.7
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3.5)
|
||||
TRAVIS_PYTHON_VERSION=3.5
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.8)
|
||||
# TODO: This is a hack, get rid of this as soon as you get rid of the travis downloads
|
||||
TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/16.04/x86_64"
|
||||
TRAVIS_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc4.8)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=4.8
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc5.4)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=5
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc7.2)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-pynightly)
|
||||
TRAVIS_PYTHON_VERSION=nightly
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda9-cudnn7-py2)
|
||||
CUDA_VERSION=9.0
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=2.7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda9-cudnn7-py3)
|
||||
CUDA_VERSION=9.0
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=9.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.0
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.1
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=5.0
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-android-ndk-r19c)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=5.0
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
ANDROID=yes
|
||||
ANDROID_NDK_VERSION=r19c
|
||||
GRADLE_VERSION=4.10.3
|
||||
CMAKE_VERSION=3.7.0
|
||||
NINJA_VERSION=1.9.0
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-clang7)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
esac
|
||||
|
||||
# Set Jenkins UID and GID if running Jenkins
|
||||
if [ -n "${JENKINS:-}" ]; then
|
||||
JENKINS_UID=$(id -u jenkins)
|
||||
JENKINS_GID=$(id -g jenkins)
|
||||
fi
|
||||
|
||||
tmp_tag="tmp-$(cat /dev/urandom | tr -dc 'a-z' | fold -w 32 | head -n 1)"
|
||||
|
||||
# Build image
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg "TRAVIS_DL_URL_PREFIX=${TRAVIS_DL_URL_PREFIX}" \
|
||||
--build-arg "BUILD_ENVIRONMENT=${image}" \
|
||||
--build-arg "PROTOBUF=${PROTOBUF:-}" \
|
||||
--build-arg "THRIFT=${THRIFT:-}" \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "DB=${DB:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
--build-arg "EC2=${EC2:-}" \
|
||||
--build-arg "JENKINS=${JENKINS:-}" \
|
||||
--build-arg "JENKINS_UID=${JENKINS_UID:-}" \
|
||||
--build-arg "JENKINS_GID=${JENKINS_GID:-}" \
|
||||
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
|
||||
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
|
||||
--build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \
|
||||
--build-arg "TRAVIS_PYTHON_VERSION=${TRAVIS_PYTHON_VERSION}" \
|
||||
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
||||
--build-arg "ANDROID=${ANDROID}" \
|
||||
--build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \
|
||||
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
||||
--build-arg "CMAKE_VERSION=${CMAKE_VERSION:-}" \
|
||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||
--build-arg "KATEX=${KATEX:-}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
.
|
||||
|
||||
function drun() {
|
||||
docker run --rm "$tmp_tag" $*
|
||||
}
|
||||
|
||||
if [[ "$OS" == "ubuntu" ]]; then
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF Ubuntu); then
|
||||
echo "OS=ubuntu, but:"
|
||||
drun lsb_release -a
|
||||
exit 1
|
||||
fi
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF "$UBUNTU_VERSION"); then
|
||||
echo "UBUNTU_VERSION=$UBUNTU_VERSION, but:"
|
||||
drun lsb_release -a
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$TRAVIS_PYTHON_VERSION" ]; then
|
||||
if [[ "$TRAVIS_PYTHON_VERSION" != nightly ]]; then
|
||||
if !(drun python --version 2>&1 | grep -qF "Python $TRAVIS_PYTHON_VERSION"); then
|
||||
echo "TRAVIS_PYTHON_VERSION=$TRAVIS_PYTHON_VERSION, but:"
|
||||
drun python --version
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Please manually check nightly is OK:"
|
||||
drun python --version
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
if !(drun python --version 2>&1 | grep -qF "Python $ANACONDA_PYTHON_VERSION"); then
|
||||
echo "ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION, but:"
|
||||
drun python --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
if !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
|
||||
echo "GCC_VERSION=$GCC_VERSION, but:"
|
||||
drun gcc --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
if !(drun clang --version 2>&1 | grep -qF "clang version $CLANG_VERSION"); then
|
||||
echo "CLANG_VERSION=$CLANG_VERSION, but:"
|
||||
drun clang --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
if !(drun katex --version); then
|
||||
echo "KATEX=$KATEX, but:"
|
||||
drun katex --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@ -1,49 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*)
|
||||
}
|
||||
|
||||
# If UPSTREAM_BUILD_ID is set (see trigger job), then we can
|
||||
# use it to tag this build with the same ID used to tag all other
|
||||
# base image builds. Also, we can try and pull the previous
|
||||
# image first, to avoid rebuilding layers that haven't changed.
|
||||
|
||||
#until we find a way to reliably reuse previous build, this last_tag is not in use
|
||||
# last_tag="$(( CIRCLE_BUILD_NUM - 1 ))"
|
||||
tag="${CIRCLE_WORKFLOW_ID}"
|
||||
|
||||
|
||||
registry="308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
||||
image="${registry}/pytorch/${IMAGE_NAME}"
|
||||
|
||||
login() {
|
||||
aws ecr get-authorization-token --region us-east-1 --output text --query 'authorizationData[].authorizationToken' |
|
||||
base64 -d |
|
||||
cut -d: -f2 |
|
||||
docker login -u AWS --password-stdin "$1"
|
||||
}
|
||||
|
||||
# Retry on timeouts (can happen on job stampede).
|
||||
retry login "${registry}"
|
||||
|
||||
# Logout on exit
|
||||
trap "docker logout ${registry}" EXIT
|
||||
|
||||
# export EC2=1
|
||||
# export JENKINS=1
|
||||
|
||||
# Try to pull the previous image (perhaps we can reuse some layers)
|
||||
# if [ -n "${last_tag}" ]; then
|
||||
# docker pull "${image}:${last_tag}" || true
|
||||
# fi
|
||||
|
||||
# Build new image
|
||||
./build.sh ${IMAGE_NAME} -t "${image}:${tag}"
|
||||
|
||||
docker push "${image}:${tag}"
|
||||
|
||||
docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}"
|
||||
aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read
|
||||
@ -1,129 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "${ANDROID_NDK}" ]
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends autotools-dev autoconf unzip
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
pushd /tmp
|
||||
curl -Os --retry 3 https://dl.google.com/android/repository/android-ndk-${ANDROID_NDK}-linux-x86_64.zip
|
||||
popd
|
||||
_ndk_dir=/opt/ndk
|
||||
mkdir -p "$_ndk_dir"
|
||||
unzip -qo /tmp/android*.zip -d "$_ndk_dir"
|
||||
_versioned_dir=$(find "$_ndk_dir/" -mindepth 1 -maxdepth 1 -type d)
|
||||
mv "$_versioned_dir"/* "$_ndk_dir"/
|
||||
rmdir "$_versioned_dir"
|
||||
rm -rf /tmp/*
|
||||
|
||||
# Install OpenJDK
|
||||
# https://hub.docker.com/r/picoded/ubuntu-openjdk-8-jdk/dockerfile/
|
||||
|
||||
sudo apt-get update && \
|
||||
apt-get install -y openjdk-8-jdk && \
|
||||
apt-get install -y ant && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -rf /var/cache/oracle-jdk8-installer;
|
||||
|
||||
# Fix certificate issues, found as of
|
||||
# https://bugs.launchpad.net/ubuntu/+source/ca-certificates-java/+bug/983302
|
||||
|
||||
sudo apt-get update && \
|
||||
apt-get install -y ca-certificates-java && \
|
||||
apt-get clean && \
|
||||
update-ca-certificates -f && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -rf /var/cache/oracle-jdk8-installer;
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
|
||||
|
||||
# Installing android sdk
|
||||
# https://github.com/circleci/circleci-images/blob/staging/android/Dockerfile.m4
|
||||
|
||||
_sdk_version=sdk-tools-linux-3859397.zip
|
||||
_android_home=/opt/android/sdk
|
||||
|
||||
rm -rf $_android_home
|
||||
sudo mkdir -p $_android_home
|
||||
curl --silent --show-error --location --fail --retry 3 --output /tmp/$_sdk_version https://dl.google.com/android/repository/$_sdk_version
|
||||
sudo unzip -q /tmp/$_sdk_version -d $_android_home
|
||||
rm /tmp/$_sdk_version
|
||||
|
||||
sudo chmod -R 777 $_android_home
|
||||
|
||||
export ANDROID_HOME=$_android_home
|
||||
export ADB_INSTALL_TIMEOUT=120
|
||||
|
||||
export PATH="${ANDROID_HOME}/emulator:${ANDROID_HOME}/tools:${ANDROID_HOME}/tools/bin:${ANDROID_HOME}/platform-tools:${PATH}"
|
||||
echo "PATH:${PATH}"
|
||||
alias sdkmanager="$ANDROID_HOME/tools/bin/sdkmanager"
|
||||
|
||||
sudo mkdir ~/.android && sudo echo '### User Sources for Android SDK Manager' > ~/.android/repositories.cfg
|
||||
sudo chmod -R 777 ~/.android
|
||||
|
||||
yes | sdkmanager --licenses
|
||||
yes | sdkmanager --update
|
||||
|
||||
sdkmanager \
|
||||
"tools" \
|
||||
"platform-tools" \
|
||||
"emulator"
|
||||
|
||||
sdkmanager \
|
||||
"build-tools;28.0.3" \
|
||||
"build-tools;29.0.2"
|
||||
|
||||
sdkmanager \
|
||||
"platforms;android-28" \
|
||||
"platforms;android-29"
|
||||
sdkmanager --list
|
||||
|
||||
# Installing Gradle
|
||||
echo "GRADLE_VERSION:${GRADLE_VERSION}"
|
||||
_gradle_home=/opt/gradle
|
||||
sudo rm -rf $gradle_home
|
||||
sudo mkdir -p $_gradle_home
|
||||
|
||||
wget --no-verbose --output-document=/tmp/gradle.zip \
|
||||
"https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip"
|
||||
|
||||
sudo unzip -q /tmp/gradle.zip -d $_gradle_home
|
||||
rm /tmp/gradle.zip
|
||||
|
||||
sudo chmod -R 777 $_gradle_home
|
||||
|
||||
export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION
|
||||
alias gradle="${GRADLE_HOME}/bin/gradle"
|
||||
|
||||
export PATH="${GRADLE_HOME}/bin/:${PATH}"
|
||||
echo "PATH:${PATH}"
|
||||
|
||||
gradle --version
|
||||
|
||||
mkdir /var/lib/jenkins/gradledeps
|
||||
cp build.gradle /var/lib/jenkins/gradledeps
|
||||
cp AndroidManifest.xml /var/lib/jenkins/gradledeps
|
||||
|
||||
pushd /var/lib/jenkins
|
||||
|
||||
export GRADLE_LOCAL_PROPERTIES=gradledeps/local.properties
|
||||
rm -f $GRADLE_LOCAL_PROPERTIES
|
||||
echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES
|
||||
echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
|
||||
|
||||
chown -R jenkins /var/lib/jenkins/gradledeps
|
||||
chgrp -R jenkins /var/lib/jenkins/gradledeps
|
||||
|
||||
sudo -H -u jenkins $GRADLE_HOME/bin/gradle -p /var/lib/jenkins/gradledeps -g /var/lib/jenkins/.gradle --refresh-dependencies --debug --stacktrace assemble
|
||||
|
||||
chown -R jenkins /var/lib/jenkins/.gradle
|
||||
chgrp -R jenkins /var/lib/jenkins/.gradle
|
||||
|
||||
popd
|
||||
|
||||
rm -rf /var/lib/jenkins/.gradle/daemon
|
||||
@ -1,75 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ "$UBUNTU_VERSION" == "14.04" ]]; then
|
||||
# cmake 2 is too old
|
||||
cmake3=cmake3
|
||||
else
|
||||
cmake3=cmake
|
||||
fi
|
||||
|
||||
if [[ "$UBUNTU_VERSION" == "18.04" ]]; then
|
||||
cmake3="cmake=3.10*"
|
||||
else
|
||||
cmake3="${cmake3}=3.5*"
|
||||
fi
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
# TODO: libiomp also gets installed by conda, aka there's a conflict
|
||||
ccache_deps="asciidoc docbook-xml docbook-xsl xsltproc"
|
||||
numpy_deps="gfortran"
|
||||
apt-get install -y --no-install-recommends \
|
||||
$ccache_deps \
|
||||
$numpy_deps \
|
||||
${cmake3} \
|
||||
apt-transport-https \
|
||||
autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
libatlas-base-dev \
|
||||
libc6-dbg \
|
||||
libiomp-dev \
|
||||
libyaml-dev \
|
||||
libz-dev \
|
||||
libjpeg-dev \
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
python \
|
||||
python-dev \
|
||||
python-setuptools \
|
||||
python-wheel \
|
||||
software-properties-common \
|
||||
sudo \
|
||||
wget \
|
||||
vim
|
||||
|
||||
# Install Valgrind separately since the apt-get version is too old.
|
||||
mkdir valgrind_build && cd valgrind_build
|
||||
if ! wget http://valgrind.org/downloads/valgrind-3.14.0.tar.bz2
|
||||
then
|
||||
wget https://sourceware.org/ftp/valgrind/valgrind-3.14.0.tar.bz2
|
||||
fi
|
||||
tar -xjf valgrind-3.14.0.tar.bz2
|
||||
cd valgrind-3.14.0
|
||||
./configure --prefix=/usr/local
|
||||
make
|
||||
sudo make install
|
||||
cd ../../
|
||||
rm -rf valgrind_build
|
||||
alias valgrind="/usr/local/bin/valgrind"
|
||||
|
||||
# TODO: THIS IS A HACK!!!
|
||||
# distributed nccl(2) tests are a bit busted, see https://github.com/pytorch/pytorch/issues/5877
|
||||
if dpkg -s libnccl-dev; then
|
||||
apt-get remove -y libnccl-dev libnccl2 --allow-change-held-packages
|
||||
fi
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
mkdir -p /opt/cache/bin
|
||||
mkdir -p /opt/cache/lib
|
||||
sed -e 's|PATH="\(.*\)"|PATH="/opt/cache/bin:\1"|g' -i /etc/environment
|
||||
export PATH="/opt/cache/bin:$PATH"
|
||||
|
||||
# Setup compiler cache
|
||||
curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /opt/cache/bin/sccache
|
||||
chmod a+x /opt/cache/bin/sccache
|
||||
|
||||
function write_sccache_stub() {
|
||||
printf "#!/bin/sh\nexec sccache $(which $1) \$*" > "/opt/cache/bin/$1"
|
||||
chmod a+x "/opt/cache/bin/$1"
|
||||
}
|
||||
|
||||
write_sccache_stub cc
|
||||
write_sccache_stub c++
|
||||
write_sccache_stub gcc
|
||||
write_sccache_stub g++
|
||||
write_sccache_stub clang
|
||||
write_sccache_stub clang++
|
||||
|
||||
if [ -n "$CUDA_VERSION" ]; then
|
||||
# TODO: This is a workaround for the fact that PyTorch's FindCUDA
|
||||
# implementation cannot find nvcc if it is setup this way, because it
|
||||
# appears to search for the nvcc in PATH, and use its path to infer
|
||||
# where CUDA is installed. Instead, we install an nvcc symlink outside
|
||||
# of the PATH, and set CUDA_NVCC_EXECUTABLE so that we make use of it.
|
||||
|
||||
printf "#!/bin/sh\nexec sccache $(which nvcc) \"\$@\"" > /opt/cache/lib/nvcc
|
||||
chmod a+x /opt/cache/lib/nvcc
|
||||
fi
|
||||
@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
|
||||
if [[ $CLANG_VERSION == 7 && $UBUNTU_VERSION == 16.04 ]]; then
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||
elif [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then
|
||||
sudo apt-get update
|
||||
# gpg-agent is not available by default on 18.04
|
||||
sudo apt-get install -y --no-install-recommends gpg-agent
|
||||
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main"
|
||||
fi
|
||||
|
||||
sudo apt-get update
|
||||
apt-get install -y --no-install-recommends clang-"$CLANG_VERSION"
|
||||
apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"
|
||||
|
||||
# Install dev version of LLVM.
|
||||
if [ -n "$LLVMDEV" ]; then
|
||||
sudo apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"-dev
|
||||
fi
|
||||
|
||||
# Use update-alternatives to make this version the default
|
||||
# TODO: Decide if overriding gcc as well is a good idea
|
||||
# update-alternatives --install /usr/bin/gcc gcc /usr/bin/clang-"$CLANG_VERSION" 50
|
||||
# update-alternatives --install /usr/bin/g++ g++ /usr/bin/clang++-"$CLANG_VERSION" 50
|
||||
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-"$CLANG_VERSION" 50
|
||||
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-"$CLANG_VERSION" 50
|
||||
|
||||
# clang's packaging is a little messed up (the runtime libs aren't
|
||||
# added into the linker path), so give it a little help
|
||||
clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux")
|
||||
echo "$clang_lib" > /etc/ld.so.conf.d/clang.conf
|
||||
ldconfig
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "$CMAKE_VERSION" ]
|
||||
|
||||
# Turn 3.6.3 into v3.6
|
||||
path=$(echo "${CMAKE_VERSION}" | sed -e 's/\([0-9].[0-9]\+\).*/v\1/')
|
||||
file="cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz"
|
||||
|
||||
# Download and install specific CMake version in /usr/local
|
||||
pushd /tmp
|
||||
curl -Os --retry 3 "https://cmake.org/files/${path}/${file}"
|
||||
tar -C /usr/local --strip-components 1 --no-same-owner -zxf cmake-*.tar.gz
|
||||
rm -f cmake-*.tar.gz
|
||||
popd
|
||||
@ -1,92 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Optionally install conda
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
|
||||
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
|
||||
|
||||
case "$MAJOR_PYTHON_VERSION" in
|
||||
2)
|
||||
CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
3)
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
|
||||
as_jenkins() {
|
||||
# NB: unsetting the environment variables works around a conda bug
|
||||
# https://github.com/conda/conda/issues/6576
|
||||
# NB: Pass on PATH and LD_LIBRARY_PATH to sudo invocation
|
||||
# NB: This must be run from a directory that jenkins has access to,
|
||||
# works around https://github.com/conda/conda-package-handling/pull/34
|
||||
sudo -H -u jenkins env -u SUDO_UID -u SUDO_GID -u SUDO_COMMAND -u SUDO_USER env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $*
|
||||
}
|
||||
|
||||
pushd /tmp
|
||||
wget -q "${BASE_URL}/${CONDA_FILE}"
|
||||
chmod +x "${CONDA_FILE}"
|
||||
as_jenkins ./"${CONDA_FILE}" -b -f -p "/opt/conda"
|
||||
popd
|
||||
|
||||
# NB: Don't do this, rely on the rpath to get it right
|
||||
#echo "/opt/conda/lib" > /etc/ld.so.conf.d/conda-python.conf
|
||||
#ldconfig
|
||||
sed -e 's|PATH="\(.*\)"|PATH="/opt/conda/bin:\1"|g' -i /etc/environment
|
||||
export PATH="/opt/conda/bin:$PATH"
|
||||
|
||||
# Ensure we run conda in a directory that jenkins has write access to
|
||||
pushd /opt/conda
|
||||
|
||||
# Track latest conda update
|
||||
as_jenkins conda update -n base conda
|
||||
|
||||
# Install correct Python version
|
||||
as_jenkins conda install python="$ANACONDA_PYTHON_VERSION"
|
||||
|
||||
conda_install() {
|
||||
# Ensure that the install command don't upgrade/downgrade Python
|
||||
# This should be called as
|
||||
# conda_install pkg1 pkg2 ... [-c channel]
|
||||
as_jenkins conda install -q -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
# DO NOT install cmake here as it would install a version newer than 3.5, but
|
||||
# we want to pin to version 3.5.
|
||||
conda_install numpy pyyaml mkl mkl-include setuptools cffi typing future six
|
||||
if [[ "$CUDA_VERSION" == 9.0* ]]; then
|
||||
conda_install magma-cuda90 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 9.1* ]]; then
|
||||
conda_install magma-cuda91 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 9.2* ]]; then
|
||||
conda_install magma-cuda92 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 10.0* ]]; then
|
||||
conda_install magma-cuda100 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 10.1* ]]; then
|
||||
conda_install magma-cuda101 -c pytorch
|
||||
fi
|
||||
|
||||
# TODO: This isn't working atm
|
||||
conda_install nnpack -c killeent
|
||||
|
||||
# Install some other packages
|
||||
# TODO: Why is scipy pinned
|
||||
# numba & llvmlite is pinned because of https://github.com/numba/numba/issues/4368
|
||||
# scikit-learn is pinned because of
|
||||
# https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5
|
||||
# only)
|
||||
as_jenkins pip install --progress-bar off pytest scipy==1.1.0 scikit-learn==0.20.3 scikit-image librosa>=0.6.2 psutil numba==0.46.0 llvmlite==0.30.0
|
||||
|
||||
popd
|
||||
fi
|
||||
@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# This function installs protobuf 2.6
|
||||
install_protobuf_26() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-2.6.1.tar.gz
|
||||
pushd "$pb_dir" && ./configure && make && make check && sudo make install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
libhiredis-dev \
|
||||
libleveldb-dev \
|
||||
liblmdb-dev \
|
||||
libsnappy-dev
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Need EPEL for many packages we depend on.
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
yum install -y \
|
||||
hiredis-devel \
|
||||
leveldb-devel \
|
||||
lmdb-devel \
|
||||
snappy-devel
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
if [ -f /etc/lsb-release ]; then
|
||||
install_ubuntu
|
||||
elif [ -f /etc/os-release ]; then
|
||||
install_centos
|
||||
else
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
fi
|
||||
@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
|
||||
# Need the official toolchain repo to get alternate packages
|
||||
add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
apt-get update
|
||||
apt-get install -y g++-$GCC_VERSION
|
||||
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
mkdir -p /usr/local/include
|
||||
cp jni.h /usr/local/include
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
|
||||
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends yarn
|
||||
yarn global add katex --prefix /usr/local
|
||||
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "$NINJA_VERSION" ]
|
||||
|
||||
url="https://github.com/ninja-build/ninja/releases/download/v${NINJA_VERSION}/ninja-linux.zip"
|
||||
|
||||
pushd /tmp
|
||||
wget --no-verbose --output-document=ninja-linux.zip "$url"
|
||||
unzip ninja-linux.zip -d /usr/local/bin
|
||||
rm -f ninja-linux.zip
|
||||
popd
|
||||
@ -1,56 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# This function installs protobuf 2.6
|
||||
install_protobuf_26() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-2.6.1.tar.gz
|
||||
pushd "$pb_dir" && ./configure && make && make check && sudo make install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
# Ubuntu 14.04 ships with protobuf 2.5, but ONNX needs protobuf >= 2.6
|
||||
# so we install that here if on 14.04
|
||||
# Ubuntu 14.04 also has cmake 2.8.12 as the default option, so we will
|
||||
# install cmake3 here and use cmake3.
|
||||
apt-get update
|
||||
if [[ "$UBUNTU_VERSION" == 14.04 ]]; then
|
||||
apt-get install -y --no-install-recommends cmake3
|
||||
install_protobuf_26
|
||||
else
|
||||
apt-get install -y --no-install-recommends \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Centos7 ships with protobuf 2.5, but ONNX needs protobuf >= 2.6
|
||||
# so we always install install that here
|
||||
install_protobuf_26
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
if [ -f /etc/lsb-release ]; then
|
||||
install_ubuntu
|
||||
elif [ -f /etc/os-release ]; then
|
||||
install_centos
|
||||
else
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
fi
|
||||
@ -1,14 +0,0 @@
|
||||
apt-get update
|
||||
apt-get install -y sudo wget libboost-dev libboost-test-dev libboost-program-options-dev libboost-filesystem-dev libboost-thread-dev libevent-dev automake libtool flex bison pkg-config g++ libssl-dev
|
||||
wget https://www-us.apache.org/dist/thrift/0.12.0/thrift-0.12.0.tar.gz
|
||||
tar -xvf thrift-0.12.0.tar.gz
|
||||
cd thrift-0.12.0
|
||||
for file in ./compiler/cpp/Makefile*; do
|
||||
sed -i 's/\-Werror//' $file
|
||||
done
|
||||
./bootstrap.sh
|
||||
./configure --without-php --without-java --without-python --without-nodejs --without-go --without-ruby
|
||||
sudo make
|
||||
sudo make install
|
||||
cd ..
|
||||
rm thrift-0.12.0.tar.gz
|
||||
@ -1,97 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
as_jenkins() {
|
||||
# NB: Preserve PATH and LD_LIBRARY_PATH changes
|
||||
sudo -H -u jenkins env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $*
|
||||
}
|
||||
|
||||
if [ -n "$TRAVIS_PYTHON_VERSION" ]; then
|
||||
|
||||
mkdir -p /opt/python
|
||||
chown jenkins:jenkins /opt/python
|
||||
|
||||
# Download Python binary from Travis
|
||||
pushd tmp
|
||||
as_jenkins wget --quiet ${TRAVIS_DL_URL_PREFIX}/python-$TRAVIS_PYTHON_VERSION.tar.bz2
|
||||
# NB: The tarball also comes with /home/travis virtualenv that we
|
||||
# don't care about. (Maybe we should, but we've worked around the
|
||||
# "how do I install to python" issue by making this entire directory
|
||||
# user-writable "lol")
|
||||
# NB: Relative ordering of opt/python and flags matters
|
||||
as_jenkins tar xjf python-$TRAVIS_PYTHON_VERSION.tar.bz2 --strip-components=2 --directory /opt/python opt/python
|
||||
popd
|
||||
|
||||
echo "/opt/python/$TRAVIS_PYTHON_VERSION/lib" > /etc/ld.so.conf.d/travis-python.conf
|
||||
ldconfig
|
||||
sed -e 's|PATH="\(.*\)"|PATH="/opt/python/'"$TRAVIS_PYTHON_VERSION"'/bin:\1"|g' -i /etc/environment
|
||||
export PATH="/opt/python/$TRAVIS_PYTHON_VERSION/bin:$PATH"
|
||||
|
||||
python --version
|
||||
pip --version
|
||||
|
||||
# Install pip from source.
|
||||
# The python-pip package on Ubuntu Trusty is old
|
||||
# and upon install numpy doesn't use the binary
|
||||
# distribution, and fails to compile it from source.
|
||||
pushd tmp
|
||||
as_jenkins curl -L -O https://pypi.python.org/packages/11/b6/abcb525026a4be042b486df43905d6893fb04f05aac21c32c638e939e447/pip-9.0.1.tar.gz
|
||||
as_jenkins tar zxf pip-9.0.1.tar.gz
|
||||
pushd pip-9.0.1
|
||||
as_jenkins python setup.py install
|
||||
popd
|
||||
rm -rf pip-9.0.1*
|
||||
popd
|
||||
|
||||
# Install pip packages
|
||||
as_jenkins pip install --upgrade pip
|
||||
|
||||
pip --version
|
||||
|
||||
if [[ "$TRAVIS_PYTHON_VERSION" == nightly ]]; then
|
||||
# These two packages have broken Cythonizations uploaded
|
||||
# to PyPi, see:
|
||||
#
|
||||
# - https://github.com/numpy/numpy/issues/10500
|
||||
# - https://github.com/yaml/pyyaml/issues/117
|
||||
#
|
||||
# Furthermore, the released version of Cython does not
|
||||
# have these issues fixed.
|
||||
#
|
||||
# While we are waiting on fixes for these, we build
|
||||
# from Git for now. Feel free to delete this conditional
|
||||
# branch if things start working again (you may need
|
||||
# to do this if these packages regress on Git HEAD.)
|
||||
as_jenkins pip install git+https://github.com/cython/cython.git
|
||||
as_jenkins pip install git+https://github.com/numpy/numpy.git
|
||||
as_jenkins pip install git+https://github.com/yaml/pyyaml.git
|
||||
else
|
||||
as_jenkins pip install numpy pyyaml
|
||||
fi
|
||||
|
||||
as_jenkins pip install \
|
||||
future \
|
||||
hypothesis \
|
||||
protobuf \
|
||||
pytest \
|
||||
pillow \
|
||||
typing
|
||||
|
||||
as_jenkins pip install mkl mkl-devel
|
||||
|
||||
# SciPy does not support Python 3.7 or Python 2.7.9
|
||||
if [[ "$TRAVIS_PYTHON_VERSION" != nightly ]] && [[ "$TRAVIS_PYTHON_VERSION" != "2.7.9" ]]; then
|
||||
as_jenkins pip install scipy==1.1.0 scikit-image librosa>=0.6.2
|
||||
fi
|
||||
|
||||
# Install psutil for dataloader tests
|
||||
as_jenkins pip install psutil
|
||||
|
||||
# Install dill for serialization tests
|
||||
as_jenkins pip install "dill>=0.3.1"
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
fi
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Mirror jenkins user in container
|
||||
echo "jenkins:x:1014:1014::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1014:" >> /etc/group
|
||||
|
||||
# Create $HOME
|
||||
mkdir -p /var/lib/jenkins
|
||||
chown jenkins:jenkins /var/lib/jenkins
|
||||
mkdir -p /var/lib/jenkins/.ccache
|
||||
chown jenkins:jenkins /var/lib/jenkins/.ccache
|
||||
|
||||
# Allow writing to /usr/local (for make install)
|
||||
chown jenkins:jenkins /usr/local
|
||||
|
||||
# Allow sudo
|
||||
# TODO: Maybe we shouldn't
|
||||
echo 'jenkins ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/jenkins
|
||||
@ -1,57 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# This function installs protobuf 2.6
|
||||
install_protobuf_26() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-2.6.1.tar.gz
|
||||
pushd "$pb_dir" && ./configure && make && make check && sudo make install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
libopencv-dev \
|
||||
libavcodec-dev
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Need EPEL for many packages we depend on.
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
yum install -y \
|
||||
opencv-devel \
|
||||
ffmpeg-devel
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
if [ -f /etc/lsb-release ]; then
|
||||
install_ubuntu
|
||||
elif [ -f /etc/os-release ]; then
|
||||
install_centos
|
||||
else
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
fi
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,85 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install non-standard Python versions (via Travis binaries)
|
||||
ARG TRAVIS_PYTHON_VERSION
|
||||
ENV PATH /opt/python/$TRAVIS_PYTHON_VERSION/bin:$PATH
|
||||
ADD ./common/install_travis_python.sh install_travis_python.sh
|
||||
RUN bash ./install_travis_python.sh && rm install_travis_python.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CUDA_NVCC_EXECUTABLE=/opt/cache/lib/nvcc
|
||||
|
||||
# Add jni.h for java host build
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
ADD ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,115 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
ARG CLANG_VERSION
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install thrift.
|
||||
ARG THRIFT
|
||||
ADD ./common/install_thrift.sh install_thrift.sh
|
||||
RUN if [ -n "${THRIFT}" ]; then bash ./install_thrift.sh; fi
|
||||
RUN rm install_thrift.sh
|
||||
ENV INSTALLED_THRIFT ${THRIFT}
|
||||
|
||||
# Install user
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install non-standard Python versions (via Travis binaries)
|
||||
ARG TRAVIS_PYTHON_VERSION
|
||||
ARG TRAVIS_DL_URL_PREFIX
|
||||
ENV PATH /opt/python/$TRAVIS_PYTHON_VERSION/bin:$PATH
|
||||
ADD ./common/install_travis_python.sh install_travis_python.sh
|
||||
RUN bash ./install_travis_python.sh && rm install_travis_python.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install Android NDK
|
||||
ARG ANDROID
|
||||
ARG ANDROID_NDK
|
||||
ARG GRADLE_VERSION
|
||||
ADD ./common/install_android.sh install_android.sh
|
||||
ADD ./android/AndroidManifest.xml AndroidManifest.xml
|
||||
ADD ./android/build.gradle build.gradle
|
||||
RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi
|
||||
RUN rm install_android.sh
|
||||
RUN rm AndroidManifest.xml
|
||||
RUN rm build.gradle
|
||||
ENV INSTALLED_ANDROID ${ANDROID}
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Add jni.h for java host build
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
ADD ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,13 +0,0 @@
|
||||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update && apt-get install -y python-pip && rm -rf /var/lib/apt/lists/* /var/log/dpkg.log
|
||||
|
||||
ADD requirements.txt /requirements.txt
|
||||
|
||||
RUN pip install -r /requirements.txt
|
||||
|
||||
ADD gc.py /usr/bin/gc.py
|
||||
|
||||
ADD docker_hub.py /usr/bin/docker_hub.py
|
||||
|
||||
ENTRYPOINT ["/usr/bin/gc.py"]
|
||||
@ -1,125 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
import os
|
||||
|
||||
|
||||
IMAGE_INFO = namedtuple(
|
||||
"IMAGE_INFO", ("repo", "tag", "size", "last_updated_at", "last_updated_by")
|
||||
)
|
||||
|
||||
|
||||
def build_access_token(username, passwordtr):
|
||||
r = requests.post(
|
||||
"https://hub.docker.com/v2/users/login/",
|
||||
data={"username": username, "password": password},
|
||||
)
|
||||
r.raise_for_status()
|
||||
token = r.json().get("token")
|
||||
return {"Authorization": "JWT " + token}
|
||||
|
||||
|
||||
def list_repos(user, token):
|
||||
r = requests.get("https://hub.docker.com/v2/repositories/" + user, headers=token)
|
||||
r.raise_for_status()
|
||||
ret = sorted(
|
||||
repo["user"] + "/" + repo["name"] for repo in r.json().get("results", [])
|
||||
)
|
||||
if ret:
|
||||
print("repos found:")
|
||||
print("".join("\n\t" + r for r in ret))
|
||||
return ret
|
||||
|
||||
|
||||
def list_tags(repo, token):
|
||||
r = requests.get(
|
||||
"https://hub.docker.com/v2/repositories/" + repo + "/tags", headers=token
|
||||
)
|
||||
r.raise_for_status()
|
||||
return [
|
||||
IMAGE_INFO(
|
||||
repo=repo,
|
||||
tag=t["name"],
|
||||
size=t["full_size"],
|
||||
last_updated_at=t["last_updated"],
|
||||
last_updated_by=t["last_updater_username"],
|
||||
)
|
||||
for t in r.json().get("results", [])
|
||||
]
|
||||
|
||||
|
||||
def save_to_s3(tags):
|
||||
table_content = ""
|
||||
client = boto3.client("s3")
|
||||
for t in tags:
|
||||
table_content += (
|
||||
"<tr><td>{repo}</td><td>{tag}</td><td>{size}</td>"
|
||||
"<td>{last_updated_at}</td><td>{last_updated_by}</td></tr>"
|
||||
).format(
|
||||
repo=t.repo,
|
||||
tag=t.tag,
|
||||
size=t.size,
|
||||
last_updated_at=t.last_updated_at,
|
||||
last_updated_by=t.last_updated_by,
|
||||
)
|
||||
html_body = """
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet"
|
||||
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
|
||||
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
|
||||
crossorigin="anonymous">
|
||||
<link rel="stylesheet" type="text/css"
|
||||
href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js">
|
||||
</script>
|
||||
<script type="text/javascript" charset="utf8"
|
||||
src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
|
||||
<title> docker image info</title>
|
||||
</head>
|
||||
<body>
|
||||
<table class="table table-striped table-hover" id="docker">
|
||||
<caption>Docker images on docker hub</caption>
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<th scope="col">repo</th>
|
||||
<th scope="col">tag</th>
|
||||
<th scope="col">size</th>
|
||||
<th scope="col">last_updated_at</th>
|
||||
<th scope="col">last_updated_by</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{table_content}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
<script>
|
||||
$(document).ready( function () {{
|
||||
$('#docker').DataTable({{paging: false}});
|
||||
}} );py
|
||||
</script>
|
||||
</html>
|
||||
""".format(
|
||||
table_content=table_content
|
||||
)
|
||||
client.put_object(
|
||||
Bucket="docker.pytorch.org",
|
||||
ACL="public-read",
|
||||
Key="docker_hub.html",
|
||||
Body=html_body,
|
||||
ContentType="text/html",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
username = os.environ.get("DOCKER_HUB_USERNAME")
|
||||
password = os.environ.get("DOCKER_HUB_PASSWORD")
|
||||
token = build_access_token(username, password)
|
||||
tags = []
|
||||
for repo in list_repos("pytorch", token):
|
||||
tags.extend(list_tags(repo, token))
|
||||
save_to_s3(tags)
|
||||
@ -1,202 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import boto3
|
||||
import pytz
|
||||
import sys
|
||||
|
||||
|
||||
def save_to_s3(project, data):
|
||||
table_content = ""
|
||||
client = boto3.client("s3")
|
||||
for repo, tag, window, age, pushed in data:
|
||||
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
|
||||
repo=repo, tag=tag, window=window, age=age, pushed=pushed
|
||||
)
|
||||
html_body = """
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet"
|
||||
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
|
||||
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
|
||||
crossorigin="anonymous">
|
||||
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
|
||||
<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
|
||||
<title>{project} nightly and permanent docker image info</title>
|
||||
</head>
|
||||
<body>
|
||||
<table class="table table-striped table-hover" id="docker">
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<th scope="col">repo</th>
|
||||
<th scope="col">tag</th>
|
||||
<th scope="col">keep window</th>
|
||||
<th scope="col">age</th>
|
||||
<th scope="col">pushed at</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{table_content}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
<script>
|
||||
$(document).ready( function () {{
|
||||
$('#docker').DataTable({{paging: false}});
|
||||
}} );
|
||||
</script>
|
||||
</html>
|
||||
""".format(
|
||||
project=project, table_content=table_content
|
||||
)
|
||||
|
||||
# for pytorch, file can be found at
|
||||
# http://ossci-docker.s3-website.us-east-1.amazonaws.com/pytorch.html
|
||||
# and later one we can config docker.pytorch.org to point to the location
|
||||
|
||||
client.put_object(
|
||||
Bucket="docker.pytorch.org",
|
||||
ACL="public-read",
|
||||
Key="{project}.html".format(project=project),
|
||||
Body=html_body,
|
||||
ContentType="text/html",
|
||||
)
|
||||
|
||||
|
||||
def repos(client):
|
||||
paginator = client.get_paginator("describe_repositories")
|
||||
pages = paginator.paginate(registryId="308535385114")
|
||||
for page in pages:
|
||||
for repo in page["repositories"]:
|
||||
yield repo
|
||||
|
||||
|
||||
def images(client, repository):
|
||||
paginator = client.get_paginator("describe_images")
|
||||
pages = paginator.paginate(
|
||||
registryId="308535385114", repositoryName=repository["repositoryName"]
|
||||
)
|
||||
for page in pages:
|
||||
for image in page["imageDetails"]:
|
||||
yield image
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Delete old Docker tags from registry")
|
||||
parser.add_argument(
|
||||
"--dry-run", action="store_true", help="Dry run; print tags that would be deleted"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-stable-days",
|
||||
type=int,
|
||||
default=14,
|
||||
help="Days of stable Docker tags to keep (non per-build images)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-unstable-days",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Days of unstable Docker tags to keep (per-build images)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filter-prefix",
|
||||
type=str,
|
||||
default="",
|
||||
help="Only run cleanup for repositories with this prefix",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore-tags",
|
||||
type=str,
|
||||
default="",
|
||||
help="Never cleanup these tags (comma separated)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.ignore_tags or not args.filter_prefix:
|
||||
print(
|
||||
"""
|
||||
Missing required arguments --ignore-tags and --filter-prefix
|
||||
|
||||
You must specify --ignore-tags and --filter-prefix to avoid accidentally
|
||||
pruning a stable Docker tag which is being actively used. This will
|
||||
make you VERY SAD. So pay attention.
|
||||
|
||||
First, which filter-prefix do you want? The list of valid prefixes
|
||||
is in jobs/private.groovy under the 'docker-registry-cleanup' job.
|
||||
You probably want either pytorch or caffe2.
|
||||
|
||||
Second, which ignore-tags do you want? It should be whatever the most
|
||||
up-to-date DockerVersion for the repository in question is. Follow
|
||||
the imports of jobs/pytorch.groovy to find them.
|
||||
"""
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
client = boto3.client("ecr", region_name="us-east-1")
|
||||
stable_window = datetime.timedelta(days=args.keep_stable_days)
|
||||
unstable_window = datetime.timedelta(days=args.keep_unstable_days)
|
||||
now = datetime.datetime.now(pytz.UTC)
|
||||
ignore_tags = args.ignore_tags.split(",")
|
||||
|
||||
|
||||
def chunks(chunkable, n):
|
||||
""" Yield successive n-sized chunks from l.
|
||||
"""
|
||||
for i in range(0, len(chunkable), n):
|
||||
yield chunkable[i : i + n]
|
||||
|
||||
|
||||
stable_window_tags = []
|
||||
for repo in repos(client):
|
||||
repositoryName = repo["repositoryName"]
|
||||
if not repositoryName.startswith(args.filter_prefix):
|
||||
continue
|
||||
|
||||
# Keep list of image digests to delete for this repository
|
||||
digest_to_delete = []
|
||||
print(repositoryName)
|
||||
|
||||
for image in images(client, repo):
|
||||
tags = image.get("imageTags")
|
||||
if not isinstance(tags, (list,)) or len(tags) == 0:
|
||||
continue
|
||||
|
||||
tag = tags[0]
|
||||
created = image["imagePushedAt"]
|
||||
age = now - created
|
||||
# new images build on circle ci use workflow ID as tag, which has 4 "-"
|
||||
if tag.isdigit() or tag.count("-") == 4 or tag in ignore_tags:
|
||||
window = stable_window
|
||||
if tag in ignore_tags:
|
||||
stable_window_tags.append((repositoryName, tag, "", age, created))
|
||||
elif age < window:
|
||||
stable_window_tags.append((repositoryName, tag, window, age, created))
|
||||
else:
|
||||
window = unstable_window
|
||||
|
||||
if tag in ignore_tags:
|
||||
print("Ignoring tag {} (age: {})".format(tag, age))
|
||||
continue
|
||||
if age < window:
|
||||
print("Not deleting manifest for tag {} (age: {})".format(tag, age))
|
||||
continue
|
||||
|
||||
if args.dry_run:
|
||||
print("(dry run) Deleting manifest for tag {} (age: {})".format(tag, age))
|
||||
else:
|
||||
print("Deleting manifest for tag {} (age: {})".format(tag, age))
|
||||
digest_to_delete.append(image["imageDigest"])
|
||||
|
||||
# Issue batch delete for all images to delete for this repository
|
||||
# Note that as of 2018-07-25, the maximum number of images you can
|
||||
# delete in a single batch is 100, so chunk our list into batches of
|
||||
# 100
|
||||
for c in chunks(digest_to_delete, 100):
|
||||
client.batch_delete_image(
|
||||
registryId="308535385114",
|
||||
repositoryName=repositoryName,
|
||||
imageIds=[{"imageDigest": digest} for digest in c],
|
||||
)
|
||||
|
||||
save_to_s3(args.filter_prefix, stable_window_tags)
|
||||
@ -1,3 +0,0 @@
|
||||
boto3
|
||||
pytz
|
||||
requests
|
||||
@ -88,31 +88,24 @@ YAML_SOURCES = [
|
||||
File("job-specs-custom.yml"),
|
||||
File("binary_update_htmls.yml"),
|
||||
File("binary-build-tests.yml"),
|
||||
File("docker_jobs.yml"),
|
||||
File("workflows.yml"),
|
||||
|
||||
File("workflows-setup-job.yml"),
|
||||
File("windows-build-test.yml"),
|
||||
Listgen(pytorch_build_definitions.get_workflow_jobs, 3),
|
||||
File("workflows-pytorch-macos-builds.yml"),
|
||||
File("workflows-pytorch-android-gradle-build.yml"),
|
||||
File("workflows-pytorch-ios-builds.yml"),
|
||||
File("workflows-pytorch-mobile-builds.yml"),
|
||||
File("workflows-pytorch-ge-config-tests.yml"),
|
||||
Listgen(caffe2_build_definitions.get_workflow_jobs, 3),
|
||||
File("workflows-binary-builds-smoke-subset.yml"),
|
||||
Listgen(binary_build_definitions.get_binary_smoke_test_jobs, 3),
|
||||
Listgen(binary_build_definitions.get_binary_build_jobs, 3),
|
||||
Header("Daily smoke test trigger"),
|
||||
Treegen(binary_build_definitions.add_binary_smoke_test_jobs, 1),
|
||||
Header("Daily binary build trigger"),
|
||||
Treegen(binary_build_definitions.add_binary_build_jobs, 1),
|
||||
File("workflows-nightly-ios-binary-builds.yml"),
|
||||
File("workflows-nightly-android-binary-builds.yml"),
|
||||
|
||||
Header("Nightly tests"),
|
||||
Listgen(binary_build_definitions.get_nightly_tests, 3),
|
||||
File("workflows-nightly-uploads-header.yml"),
|
||||
Listgen(binary_build_definitions.get_nightly_uploads, 3),
|
||||
File("workflows-s3-html.yml"),
|
||||
File("workflows-docker-builder.yml"),
|
||||
File("workflows-ecr-gc.yml"),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -1,20 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
|
||||
# This step runs on multiple executors with different envfile locations
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
rm -rf /c/w
|
||||
ln -s "/c/Users/circleci/project" /c/w
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
@ -24,17 +13,11 @@ else
|
||||
fi
|
||||
|
||||
# It is very important that this stays in sync with binary_populate_env.sh
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
# We need to make the paths as short as possible on Windows
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
|
||||
# Clone the Pytorch branch
|
||||
retry git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT"
|
||||
git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT"
|
||||
pushd "$PYTORCH_ROOT"
|
||||
if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
# "smoke" binary build on PRs
|
||||
@ -50,13 +33,13 @@ else
|
||||
echo "Can't tell what to checkout"
|
||||
exit 1
|
||||
fi
|
||||
retry git submodule update --init --recursive
|
||||
git submodule update --init --recursive --quiet
|
||||
echo "Using Pytorch from "
|
||||
git --no-pager log --max-count 1
|
||||
popd
|
||||
|
||||
# Clone the Builder master repo
|
||||
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||
git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||
pushd "$BUILDER_ROOT"
|
||||
echo "Using builder from "
|
||||
git --no-pager log --max-count 1
|
||||
|
||||
@ -31,9 +31,9 @@ fi
|
||||
|
||||
conda_sh="$workdir/install_miniconda.sh"
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
curl --retry 3 -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
retry curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
else
|
||||
curl --retry 3 -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
retry curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
fi
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
|
||||
@ -1,28 +1,24 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
set -eux -o pipefail
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
echo "PWD: ${PWD}"
|
||||
WORKSPACE=/Users/distiller/workspace
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
export TCLLIBPATH="/usr/local/lib"
|
||||
|
||||
export TCLLIBPATH="/usr/local/lib"
|
||||
# Install conda
|
||||
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/conda.sh
|
||||
/bin/bash ~/conda.sh -b -p ~/anaconda
|
||||
curl -o ~/Downloads/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/Downloads/conda.sh
|
||||
/bin/bash ~/Downloads/conda.sh -b -p ~/anaconda
|
||||
export PATH="~/anaconda/bin:${PATH}"
|
||||
source ~/anaconda/bin/activate
|
||||
|
||||
# Install dependencies
|
||||
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
|
||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||
|
||||
# sync submodules
|
||||
cd ${PROJ_ROOT}
|
||||
git submodule sync
|
||||
git submodule update --init --recursive
|
||||
|
||||
# run build script
|
||||
chmod a+x ${PROJ_ROOT}/scripts/build_ios.sh
|
||||
echo "########################################################"
|
||||
@ -30,13 +26,13 @@ cat ${PROJ_ROOT}/scripts/build_ios.sh
|
||||
echo "########################################################"
|
||||
echo "IOS_ARCH: ${IOS_ARCH}"
|
||||
echo "IOS_PLATFORM: ${IOS_PLATFORM}"
|
||||
export BUILD_PYTORCH_MOBILE=1
|
||||
export IOS_ARCH=${IOS_ARCH}
|
||||
export IOS_PLATFORM=${IOS_PLATFORM}
|
||||
unbuffer ${PROJ_ROOT}/scripts/build_ios.sh 2>&1 | ts
|
||||
|
||||
#store the binary
|
||||
cd ${WORKSPACE}
|
||||
DEST_DIR=${WORKSPACE}/ios
|
||||
mkdir -p ${DEST_DIR}
|
||||
cp -R ${PROJ_ROOT}/build_ios/install ${DEST_DIR}
|
||||
mv ${DEST_DIR}/install ${DEST_DIR}/${IOS_ARCH}
|
||||
mv ${DEST_DIR}/install ${DEST_DIR}/${IOS_ARCH}
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
# install fastlane
|
||||
sudo gem install bundler && bundle install
|
||||
# install certificates
|
||||
echo "${IOS_CERT_KEY}" >> cert.txt
|
||||
base64 --decode cert.txt -o Certificates.p12
|
||||
rm cert.txt
|
||||
bundle exec fastlane install_cert
|
||||
# install the provisioning profile
|
||||
PROFILE=TestApp_CI.mobileprovision
|
||||
PROVISIONING_PROFILES=~/Library/MobileDevice/Provisioning\ Profiles
|
||||
mkdir -pv "${PROVISIONING_PROFILES}"
|
||||
cd "${PROVISIONING_PROFILES}"
|
||||
echo "${IOS_SIGN_KEY}" >> cert.txt
|
||||
base64 --decode cert.txt -o ${PROFILE}
|
||||
rm cert.txt
|
||||
# run the ruby build script
|
||||
if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||
echo 'Error: xcodebuild is not installed.'
|
||||
exit 1
|
||||
fi
|
||||
PROFILE=TestApp_CI
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||
@ -1,8 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
set -eux -o pipefail
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
echo "PWD: $(pwd)"
|
||||
WORKSPACE=/Users/distiller/workspace
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
ARTIFACTS_DIR=${WORKSPACE}/ios
|
||||
@ -14,13 +14,11 @@ mkdir -p ${ZIP_DIR}/src
|
||||
cp -R ${ARTIFACTS_DIR}/arm64/include ${ZIP_DIR}/install/
|
||||
# build a FAT bianry
|
||||
cd ${ZIP_DIR}/install/lib
|
||||
target_libs=(libc10.a libclog.a libcpuinfo.a libeigen_blas.a libpytorch_qnnpack.a libtorch_cpu.a libtorch.a libXNNPACK.a)
|
||||
target_libs=(libc10.a libclog.a libcpuinfo.a libeigen_blas.a libpytorch_qnnpack.a libtorch.a)
|
||||
for lib in ${target_libs[*]}
|
||||
do
|
||||
if [ -f "${ARTIFACTS_DIR}/x86_64/lib/${lib}" ] && [ -f "${ARTIFACTS_DIR}/arm64/lib/${lib}" ]; then
|
||||
libs=("${ARTIFACTS_DIR}/x86_64/lib/${lib}" "${ARTIFACTS_DIR}/arm64/lib/${lib}")
|
||||
lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/${lib}
|
||||
fi
|
||||
libs=(${ARTIFACTS_DIR}/x86_64/lib/${lib} ${ARTIFACTS_DIR}/arm64/lib/${lib})
|
||||
lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/${lib}
|
||||
done
|
||||
# for nnpack, we only support arm64 build
|
||||
cp ${ARTIFACTS_DIR}/arm64/lib/libnnpack.a ./
|
||||
|
||||
@ -13,13 +13,7 @@ elif [[ "$DESIRED_PYTHON" == 2.7mu ]]; then
|
||||
export PATH="/opt/python/cp27-cp27mu/bin:\$PATH"
|
||||
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
|
||||
python_path="/opt/python/cp\$python_nodot-cp\${python_nodot}"
|
||||
# Prior to Python 3.8 paths were suffixed with an 'm'
|
||||
if [[ -d "\${python_path}/bin" ]]; then
|
||||
export PATH="\${python_path}/bin:\$PATH"
|
||||
elif [[ -d "\${python_path}m/bin" ]]; then
|
||||
export PATH="\${python_path}m/bin:\$PATH"
|
||||
fi
|
||||
export PATH="/opt/python/cp\$python_nodot-cp\${python_nodot}m/bin:\$PATH"
|
||||
fi
|
||||
|
||||
# Install the package
|
||||
@ -32,11 +26,11 @@ pkg="/final_pkgs/\$(ls /final_pkgs)"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
conda install -y "\$pkg" --offline
|
||||
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
|
||||
retry conda install -y cpuonly -c pytorch
|
||||
conda install -y cpuonly -c pytorch
|
||||
fi
|
||||
retry conda install -yq future numpy protobuf six
|
||||
if [[ "$DESIRED_CUDA" != 'cpu' ]]; then
|
||||
# DESIRED_CUDA is in format cu90 or cu102
|
||||
# DESIRED_CUDA is in format cu90 or cu100
|
||||
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
|
||||
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
|
||||
else
|
||||
|
||||
@ -5,6 +5,15 @@ set -eu -o pipefail
|
||||
set +x
|
||||
declare -x "AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
declare -x "AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
cat >/home/circleci/project/login_to_anaconda.sh <<EOL
|
||||
set +x
|
||||
echo "Trying to login to Anaconda"
|
||||
yes | anaconda login \
|
||||
--username "$PYTORCH_BINARY_PJH5_CONDA_USERNAME" \
|
||||
--password "$PYTORCH_BINARY_PJH5_CONDA_PASSWORD"
|
||||
set -x
|
||||
EOL
|
||||
chmod +x /home/circleci/project/login_to_anaconda.sh
|
||||
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
@ -12,18 +21,12 @@ declare -x "AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
set -eux -o pipefail
|
||||
export PATH="$MINICONDA_ROOT/bin:$PATH"
|
||||
|
||||
# This gets set in binary_populate_env.sh, but lets have a sane default just in case
|
||||
PIP_UPLOAD_FOLDER=${PIP_UPLOAD_FOLDER:-nightly}
|
||||
# TODO: Combine CONDA_UPLOAD_CHANNEL and PIP_UPLOAD_FOLDER into one variable
|
||||
# The only difference is the trailing slash
|
||||
# Strip trailing slashes if there
|
||||
CONDA_UPLOAD_CHANNEL=$(echo "${PIP_UPLOAD_FOLDER}" | sed 's:/*$::')
|
||||
|
||||
# Upload the package to the final location
|
||||
pushd /home/circleci/project/final_pkgs
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
retry conda install -yq anaconda-client
|
||||
anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload "$(ls)" -u "pytorch-${CONDA_UPLOAD_CHANNEL}" --label main --no-progress --force
|
||||
retry timeout 30 /home/circleci/project/login_to_anaconda.sh
|
||||
anaconda upload "$(ls)" -u pytorch-nightly --label main --no-progress --force
|
||||
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
retry pip install -q awscli
|
||||
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
|
||||
@ -5,30 +5,26 @@ source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
pkg="$workdir/final_pkgs/$(ls $workdir/final_pkgs)"
|
||||
|
||||
# Don't test libtorch
|
||||
# TODO we should test libtorch
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create a new test env
|
||||
# TODO cut all this out into a separate test job and have an entirely different
|
||||
# miniconda
|
||||
if [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
source deactivate || true
|
||||
conda create -qyn test python="$DESIRED_PYTHON"
|
||||
source activate test >/dev/null
|
||||
fi
|
||||
source deactivate || true
|
||||
conda create -qyn test python="$DESIRED_PYTHON"
|
||||
source activate test >/dev/null
|
||||
|
||||
# Install the package
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
pkg="$(ls $workdir/final_pkgs/*-latest.zip)"
|
||||
unzip "$pkg" -d /tmp
|
||||
cd /tmp/libtorch
|
||||
elif [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
conda install -y "$pkg" --offline
|
||||
else
|
||||
pip install "$pkg" --no-index --no-dependencies -v
|
||||
fi
|
||||
|
||||
# Test
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
$workdir/builder/check_binary.sh
|
||||
else
|
||||
pushd "$workdir/pytorch"
|
||||
$workdir/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA"
|
||||
fi
|
||||
pushd "$workdir/pytorch"
|
||||
$workdir/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA"
|
||||
|
||||
@ -4,6 +4,15 @@ set -eu -o pipefail
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
cat >/Users/distiller/project/login_to_anaconda.sh <<EOL
|
||||
set +x
|
||||
echo "Trying to login to Anaconda"
|
||||
yes | anaconda login \
|
||||
--username "$PYTORCH_BINARY_PJH5_CONDA_USERNAME" \
|
||||
--password "$PYTORCH_BINARY_PJH5_CONDA_PASSWORD"
|
||||
set -x
|
||||
EOL
|
||||
chmod +x /Users/distiller/project/login_to_anaconda.sh
|
||||
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
@ -13,17 +22,11 @@ set -eux -o pipefail
|
||||
source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
|
||||
# This gets set in binary_populate_env.sh, but lets have a sane default just in case
|
||||
PIP_UPLOAD_FOLDER=${PIP_UPLOAD_FOLDER:-nightly}
|
||||
# TODO: Combine CONDA_UPLOAD_CHANNEL and PIP_UPLOAD_FOLDER into one variable
|
||||
# The only difference is the trailing slash
|
||||
# Strip trailing slashes if there
|
||||
CONDA_UPLOAD_CHANNEL=$(echo "${PIP_UPLOAD_FOLDER}" | sed 's:/*$::')
|
||||
|
||||
pushd "$workdir/final_pkgs"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
retry conda install -yq anaconda-client
|
||||
retry anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload "$(ls)" -u "pytorch-${CONDA_UPLOAD_CHANNEL}" --label main --no-progress --force
|
||||
retry /Users/distiller/project/login_to_anaconda.sh
|
||||
retry anaconda upload "$(ls)" -u pytorch-nightly --label main --no-progress --force
|
||||
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
retry pip install -q awscli
|
||||
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
|
||||
@ -2,31 +2,11 @@
|
||||
set -eux -o pipefail
|
||||
export TZ=UTC
|
||||
|
||||
tagged_version() {
|
||||
# Grabs version from either the env variable CIRCLE_TAG
|
||||
# or the pytorch git described version
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
GIT_DESCRIBE="git --git-dir ${workdir}/p/.git describe"
|
||||
else
|
||||
GIT_DESCRIBE="git --git-dir ${workdir}/pytorch/.git describe"
|
||||
fi
|
||||
if [[ -n "${CIRCLE_TAG:-}" ]]; then
|
||||
echo "${CIRCLE_TAG}"
|
||||
elif ${GIT_DESCRIBE} --exact --tags >/dev/null; then
|
||||
${GIT_DESCRIBE} --tags
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# We need to write an envfile to persist these variables to following
|
||||
# steps, but the location of the envfile depends on the circleci executor
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
@ -43,15 +23,7 @@ configs=($BUILD_ENVIRONMENT)
|
||||
export PACKAGE_TYPE="${configs[0]}"
|
||||
export DESIRED_PYTHON="${configs[1]}"
|
||||
export DESIRED_CUDA="${configs[2]}"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
export LIBTORCH_CONFIG="${configs[3]:-}"
|
||||
if [[ "$LIBTORCH_CONFIG" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
fi
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
export BUILD_PYTHONLESS=1
|
||||
fi
|
||||
@ -60,59 +32,36 @@ fi
|
||||
export DOCKER_IMAGE=${DOCKER_IMAGE:-}
|
||||
if [[ -z "$DOCKER_IMAGE" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export DOCKER_IMAGE="pytorch/conda-cuda"
|
||||
export DOCKER_IMAGE="soumith/conda-cuda"
|
||||
elif [[ "$DESIRED_CUDA" == cpu ]]; then
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda100"
|
||||
export DOCKER_IMAGE="soumith/manylinux-cuda100"
|
||||
else
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
export DOCKER_IMAGE="soumith/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Default to nightly, since that's where this normally uploads to
|
||||
PIP_UPLOAD_FOLDER='nightly/'
|
||||
# Upload to parallel folder for devtoolsets
|
||||
# All nightlies used to be devtoolset3, then devtoolset7 was added as a build
|
||||
# option, so the upload was redirected to nightly/devtoolset7 to avoid
|
||||
# conflicts with other binaries (there shouldn't be any conflicts). Now we are
|
||||
# making devtoolset7 the default.
|
||||
if [[ "$DESIRED_DEVTOOLSET" == 'devtoolset7' || "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* || "$(uname)" == 'Darwin' ]]; then
|
||||
export PIP_UPLOAD_FOLDER='nightly/'
|
||||
else
|
||||
# On linux machines, this shouldn't actually be called anymore. This is just
|
||||
# here for extra safety.
|
||||
export PIP_UPLOAD_FOLDER='nightly/devtoolset3/'
|
||||
fi
|
||||
|
||||
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
#TODO: We should be pulling semver version from the base version.txt
|
||||
BASE_BUILD_VERSION="1.5.0.dev$DATE"
|
||||
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
||||
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
||||
# the git tag
|
||||
if tagged_version >/dev/null; then
|
||||
# Switch upload folder to 'test/' if we are on a tag
|
||||
PIP_UPLOAD_FOLDER='test/'
|
||||
# Grab git tag, remove prefixed v and remove everything after -
|
||||
# Used to clean up tags that are for release candidates like v1.5.0-rc1
|
||||
# Turns tag v1.5.0-rc1 -> v1.5.0
|
||||
BASE_BUILD_VERSION="$(tagged_version | sed -e 's/^v//' -e 's/-.*$//')"
|
||||
fi
|
||||
if [[ "$(uname)" == 'Darwin' ]] || [[ "$DESIRED_CUDA" == "cu102" ]] || [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}"
|
||||
if [[ "$(uname)" == 'Darwin' ]] || [[ "$DESIRED_CUDA" == "cu100" ]] || [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export PYTORCH_BUILD_VERSION="1.3.0.dev$DATE"
|
||||
else
|
||||
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}+$DESIRED_CUDA"
|
||||
export PYTORCH_BUILD_VERSION="1.3.0.dev$DATE+$DESIRED_CUDA"
|
||||
fi
|
||||
export PYTORCH_BUILD_NUMBER=1
|
||||
|
||||
|
||||
JAVA_HOME=
|
||||
BUILD_JNI=OFF
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
POSSIBLE_JAVA_HOMES=()
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/local)
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/lib/jvm/java-8-openjdk-amd64)
|
||||
POSSIBLE_JAVA_HOMES+=(/Library/Java/JavaVirtualMachines/*.jdk/Contents/Home)
|
||||
for JH in "${POSSIBLE_JAVA_HOMES[@]}" ; do
|
||||
if [[ -e "$JH/include/jni.h" ]] ; then
|
||||
echo "Found jni.h under $JH"
|
||||
JAVA_HOME="$JH"
|
||||
BUILD_JNI=ON
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
echo "Did not find jni.h"
|
||||
fi
|
||||
fi
|
||||
|
||||
cat >>"$envfile" <<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
export TZ=UTC
|
||||
@ -124,13 +73,9 @@ export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
export DESIRED_DEVTOOLSET="$DESIRED_DEVTOOLSET"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
export DEBUG="${DEBUG:-}"
|
||||
fi
|
||||
|
||||
export DATE="$DATE"
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.5.0.dev
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.3.0.dev
|
||||
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
||||
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
@ -140,20 +85,13 @@ export TORCH_PACKAGE_NAME='torch'
|
||||
export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
|
||||
|
||||
export USE_FBGEMM=1
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
export BUILD_JNI=$BUILD_JNI
|
||||
export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER"
|
||||
export DOCKER_IMAGE="$DOCKER_IMAGE"
|
||||
|
||||
export workdir="$workdir"
|
||||
export MAC_PACKAGE_WORK_DIR="$workdir"
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
export MINICONDA_ROOT="$workdir/miniconda"
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs"
|
||||
|
||||
|
||||
@ -16,12 +16,31 @@ set -eux -o pipefail
|
||||
# Expect actual code to be written to this file
|
||||
chmod +x /home/circleci/project/ci_test_script.sh
|
||||
|
||||
VOLUME_MOUNTS="-v /home/circleci/project/:/circleci_stuff -v /home/circleci/project/final_pkgs:/final_pkgs -v ${PYTORCH_ROOT}:/pytorch -v ${BUILDER_ROOT}:/builder"
|
||||
# Run the docker
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --runtime=nvidia ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}")
|
||||
export id=$(docker run --runtime=nvidia -t -d "${DOCKER_IMAGE}")
|
||||
else
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}")
|
||||
export id=$(docker run -t -d "${DOCKER_IMAGE}")
|
||||
fi
|
||||
|
||||
# Copy the envfile and script with all the code to run into the docker.
|
||||
docker cp /home/circleci/project/. "$id:/circleci_stuff"
|
||||
|
||||
# Copy built packages into the docker to test. This should only exist on the
|
||||
# binary test jobs. The package should've been created from a binary build job,
|
||||
# whhich persisted the package to a CircleCI workspace, which this job then
|
||||
# copies into a GPU enabled docker for testing
|
||||
if [[ -d "/home/circleci/project/final_pkgs" ]]; then
|
||||
docker cp /home/circleci/project/final_pkgs "$id:/final_pkgs"
|
||||
fi
|
||||
|
||||
# Copy the needed repos into the docker. These do not exist in the smoke test
|
||||
# jobs, since the smoke test jobs do not need the Pytorch source code.
|
||||
if [[ -d "$PYTORCH_ROOT" ]]; then
|
||||
docker cp "$PYTORCH_ROOT" "$id:/pytorch"
|
||||
fi
|
||||
if [[ -d "$BUILDER_ROOT" ]]; then
|
||||
docker cp "$BUILDER_ROOT" "$id:/builder"
|
||||
fi
|
||||
|
||||
# Execute the test script that was populated by an earlier section
|
||||
|
||||
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/c/w/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export VC_YEAR=2017
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-windows
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
set -x
|
||||
|
||||
if [[ "$CIRCLECI" == 'true' && -d "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019" ]]; then
|
||||
rm -rf "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019"
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem before build:"
|
||||
df -h
|
||||
|
||||
pushd "$BUILDER_ROOT"
|
||||
if [[ "$PACKAGE_TYPE" == 'conda' ]]; then
|
||||
./windows/internal/build_conda.bat
|
||||
elif [[ "$PACKAGE_TYPE" == 'wheel' || "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
./windows/internal/build_wheels.bat
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem after build:"
|
||||
df -h
|
||||
@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
set +x
|
||||
declare -x "AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
declare -x "AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/env"
|
||||
|
||||
# This gets set in binary_populate_env.sh, but lets have a sane default just in case
|
||||
PIP_UPLOAD_FOLDER=${PIP_UPLOAD_FOLDER:-nightly/}
|
||||
# TODO: Combine CONDA_UPLOAD_CHANNEL and PIP_UPLOAD_FOLDER into one variable
|
||||
# The only difference is the trailing slash
|
||||
# Strip trailing slashes if there
|
||||
CONDA_UPLOAD_CHANNEL=$(echo "${PIP_UPLOAD_FOLDER}" | sed 's:/*$::')
|
||||
|
||||
pushd /root/workspace/final_pkgs
|
||||
# Upload the package to the final location
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
retry conda install -yq anaconda-client
|
||||
anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload "$(ls)" -u "pytorch-${CONDA_UPLOAD_CHANNEL}" --label main --no-progress --force
|
||||
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
retry conda install -c conda-forge -yq awscli
|
||||
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
for pkg in $(ls); do
|
||||
retry aws s3 cp "$pkg" "$s3_dir" --acl public-read
|
||||
done
|
||||
else
|
||||
retry conda install -c conda-forge -yq awscli
|
||||
s3_dir="s3://pytorch/whl/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
retry aws s3 cp "$(ls)" "$s3_dir" --acl public-read
|
||||
fi
|
||||
|
||||
@ -4,8 +4,6 @@ set -eux -o pipefail
|
||||
export ANDROID_NDK_HOME=/opt/ndk
|
||||
export ANDROID_HOME=/opt/android/sdk
|
||||
|
||||
# Must be in sync with GRADLE_VERSION in docker image for android
|
||||
# https://github.com/pietern/pytorch-dockerfiles/blob/master/build.sh#L155
|
||||
export GRADLE_VERSION=4.10.3
|
||||
export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION
|
||||
export GRADLE_PATH=$GRADLE_HOME/bin/gradle
|
||||
@ -47,39 +45,15 @@ fi
|
||||
env
|
||||
echo "BUILD_ENVIRONMENT:$BUILD_ENVIRONMENT"
|
||||
|
||||
GRADLE_PARAMS="-p android assembleRelease --debug --stacktrace"
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-gradle-build-only-x86_32* ]]; then
|
||||
GRADLE_PARAMS+=" -PABI_FILTERS=x86"
|
||||
fi
|
||||
|
||||
if [ -n "{GRADLE_OFFLINE:-}" ]; then
|
||||
GRADLE_PARAMS+=" --offline"
|
||||
fi
|
||||
|
||||
# touch gradle cache files to prevent expiration
|
||||
while IFS= read -r -d '' file
|
||||
do
|
||||
touch "$file" || true
|
||||
done < <(find /var/lib/jenkins/.gradle -type f -print0)
|
||||
|
||||
env
|
||||
|
||||
export GRADLE_LOCAL_PROPERTIES=~/workspace/android/local.properties
|
||||
rm -f $GRADLE_LOCAL_PROPERTIES
|
||||
echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES
|
||||
echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
|
||||
echo "cmake.dir=/usr/local" >> $GRADLE_LOCAL_PROPERTIES
|
||||
|
||||
$GRADLE_PATH $GRADLE_PARAMS
|
||||
|
||||
find . -type f -name "*.a" -exec ls -lh {} \;
|
||||
|
||||
while IFS= read -r -d '' file
|
||||
do
|
||||
echo
|
||||
echo "$file"
|
||||
ls -lah "$file"
|
||||
zipinfo -l "$file"
|
||||
done < <(find . -type f -name '*.aar' -print0)
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-gradle-build-only-x86_32* ]]; then
|
||||
$GRADLE_PATH -PABI_FILTERS=x86 -p ~/workspace/android/ assembleRelease
|
||||
else
|
||||
$GRADLE_PATH -p ~/workspace/android/ assembleRelease
|
||||
fi
|
||||
|
||||
find . -type f -name *aar -print | xargs tar cfvz ~/workspace/android/artifacts.tgz
|
||||
|
||||
@ -53,10 +53,11 @@ sudo apt-get -y install doxygen
|
||||
# Generate ATen files
|
||||
pushd "${pt_checkout}"
|
||||
pip install -r requirements.txt
|
||||
time python aten/src/ATen/gen.py \
|
||||
time GEN_TO_SOURCE=1 python aten/src/ATen/gen.py \
|
||||
-s aten/src/ATen \
|
||||
-d build/aten/src/ATen \
|
||||
aten/src/ATen/Declarations.cwrap \
|
||||
aten/src/THNN/generic/THNN.h \
|
||||
aten/src/THCUNN/generic/THCUNN.h \
|
||||
aten/src/ATen/nn.yaml \
|
||||
aten/src/ATen/native/native_functions.yaml
|
||||
@ -72,10 +73,10 @@ time python tools/setup_helpers/generate_code.py \
|
||||
|
||||
# Build the docs
|
||||
pushd docs/cpp
|
||||
pip install breathe==4.13.0 bs4 lxml six
|
||||
pip install breathe==4.11.1 bs4 lxml six
|
||||
pip install --no-cache-dir -e "git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme"
|
||||
pip install exhale>=0.2.1
|
||||
pip install sphinx==2.4.4
|
||||
pip install sphinx==1.8.5
|
||||
# Uncomment once it is fixed
|
||||
# pip install -r requirements.txt
|
||||
time make VERBOSE=1 html -j
|
||||
|
||||
@ -90,12 +90,6 @@ else
|
||||
find "$install_path" -name "*.html" -print0 | xargs -0 perl -pi -w -e "s@master\s+\((\d\.\d\.[A-Fa-f0-9]+\+[A-Fa-f0-9]+)\s+\)@<a href='http://pytorch.org/docs/versions.html'>$version \▼</a>@g"
|
||||
fi
|
||||
|
||||
# Prevent Google from indexing $install_path/_modules. This folder contains
|
||||
# generated source files.
|
||||
# NB: the following only works on gnu sed. The sed shipped with mac os is different.
|
||||
# One can `brew install gnu-sed` on a mac and then use "gsed" instead of "sed".
|
||||
find "$install_path/_modules" -name "*.html" -print0 | xargs -0 sed -i '/<head>/a \ \ <meta name="robots" content="noindex">'
|
||||
|
||||
git add "$install_path" || true
|
||||
git status
|
||||
git config user.email "soumith+bot@pytorch.org"
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
set -ex -o pipefail
|
||||
|
||||
# Set up NVIDIA docker repo
|
||||
curl -s -L --retry 3 https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
curl -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
||||
echo "deb https://nvidia.github.io/libnvidia-container/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
|
||||
echo "deb https://nvidia.github.io/nvidia-container-runtime/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
|
||||
echo "deb https://nvidia.github.io/nvidia-docker/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
|
||||
@ -45,7 +45,7 @@ retry () {
|
||||
retry sudo pip -q install awscli==1.16.35
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-440.59.run"
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-430.40.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
set -eux -o pipefail
|
||||
|
||||
# Set up CircleCI GPG keys for apt, if needed
|
||||
curl --retry 3 -s -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
||||
curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -
|
||||
|
||||
# Stop background apt updates. Hypothetically, the kill should not
|
||||
# be necessary, because stop is supposed to send a kill signal to
|
||||
|
||||
132
.circleci/scripts/should_run_job.py
Normal file
132
.circleci/scripts/should_run_job.py
Normal file
@ -0,0 +1,132 @@
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Modify this variable if you want to change the set of default jobs
|
||||
# which are run on all pull requests.
|
||||
#
|
||||
# WARNING: Actually, this is a lie; we're currently also controlling
|
||||
# the set of jobs to run via the Workflows filters in CircleCI config.
|
||||
|
||||
default_set = set([
|
||||
# PyTorch CPU
|
||||
# Selected oldest Python 2 version to ensure Python 2 coverage
|
||||
'pytorch-linux-xenial-py2.7.9',
|
||||
# PyTorch CUDA
|
||||
'pytorch-linux-xenial-cuda9-cudnn7-py3',
|
||||
# PyTorch ASAN
|
||||
'pytorch-linux-xenial-py3-clang5-asan',
|
||||
# PyTorch DEBUG
|
||||
'pytorch-linux-xenial-py3.6-gcc5.4',
|
||||
|
||||
# Caffe2 CPU
|
||||
'caffe2-py2-mkl-ubuntu16.04',
|
||||
# Caffe2 CUDA
|
||||
'caffe2-py3.5-cuda10.1-cudnn7-ubuntu16.04',
|
||||
# Caffe2 ONNX
|
||||
'caffe2-onnx-py2-gcc5-ubuntu16.04',
|
||||
'caffe2-onnx-py3.6-clang7-ubuntu16.04',
|
||||
# Caffe2 Clang
|
||||
'caffe2-py2-clang7-ubuntu16.04',
|
||||
# Caffe2 CMake
|
||||
'caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04',
|
||||
|
||||
# Binaries
|
||||
'manywheel 2.7mu cpu devtoolset7',
|
||||
'libtorch 2.7m cpu devtoolset7',
|
||||
'libtorch 2.7m cpu gcc5.4_cxx11-abi',
|
||||
'libtorch-ios-10.2.1-nightly-x86_64-build',
|
||||
'libtorch-ios-10.2.1-nightly-arm64-build',
|
||||
'libtorch-ios-10.2.1-nightly-binary-build-upload',
|
||||
|
||||
# Caffe2 Android
|
||||
'caffe2-py2-android-ubuntu16.04',
|
||||
# Caffe2 OSX
|
||||
'caffe2-py2-system-macos10.13',
|
||||
# PyTorch OSX
|
||||
'pytorch-macos-10.13-py3',
|
||||
'pytorch-macos-10.13-cuda9.2-cudnn7-py3',
|
||||
# PyTorch Android
|
||||
'pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_32-build',
|
||||
# PyTorch Android gradle
|
||||
'pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-only-x86_32',
|
||||
# Pytorch iOS builds
|
||||
'pytorch-ios-10.2.1-x86_64_build',
|
||||
'pytorch-ios-10.2.1-arm64_build',
|
||||
|
||||
# Pytorch backward compatibility check
|
||||
'pytorch-linux-backward-compatibility-check-test',
|
||||
|
||||
# XLA
|
||||
'pytorch-xla-linux-xenial-py3.6-clang7',
|
||||
|
||||
# Named tensor
|
||||
"pytorch-namedtensor-linux-xenial-py3.6-gcc5.4",
|
||||
"pytorch-namedtensor-linux-xenial-py3-clang5-asan",
|
||||
"pytorch-namedtensor-linux-xenial-cuda9-cudnn7-py2",
|
||||
|
||||
# Other checks
|
||||
'pytorch-short-perf-test-gpu',
|
||||
'pytorch-python-doc-push',
|
||||
'pytorch-cpp-doc-push',
|
||||
])
|
||||
|
||||
# Collection of jobs that are *temporarily* excluded from running on PRs.
|
||||
# Use this if there is a long-running job breakage that we can't fix with a
|
||||
# single revert.
|
||||
skip_override = {
|
||||
# example entry:
|
||||
# 'pytorch-cpp-doc-push': "https://github.com/pytorch/pytorch/issues/<related issue>"
|
||||
}
|
||||
|
||||
# Takes in commit message to analyze via stdin
|
||||
#
|
||||
# This script will query Git and attempt to determine if we should
|
||||
# run the current CI job under question
|
||||
#
|
||||
# NB: Try to avoid hard-coding names here, so there's less place to update when jobs
|
||||
# are updated/renamed
|
||||
#
|
||||
# Semantics in the presence of multiple tags:
|
||||
# - Let D be the set of default builds
|
||||
# - Let S be the set of explicitly specified builds
|
||||
# - Let O be the set of temporarily skipped builds
|
||||
# - Run S \/ (D - O)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('build_environment')
|
||||
args = parser.parse_args()
|
||||
|
||||
commit_msg = sys.stdin.read()
|
||||
|
||||
# Matches anything that looks like [foo ci] or [ci foo] or [foo test]
|
||||
# or [test foo]
|
||||
RE_MARKER = re.compile(r'\[(?:([^ \[\]]+) )?(?:ci|test)(?: ([^ \[\]]+))?\]')
|
||||
|
||||
markers = RE_MARKER.finditer(commit_msg)
|
||||
|
||||
for m in markers:
|
||||
if m.group(1) and m.group(2):
|
||||
print("Unrecognized marker: {}".format(m.group(0)))
|
||||
continue
|
||||
spec = m.group(1) or m.group(2)
|
||||
if spec is None:
|
||||
print("Unrecognized marker: {}".format(m.group(0)))
|
||||
continue
|
||||
if spec in args.build_environment or spec == 'all':
|
||||
print("Accepting {} due to commit marker {}".format(args.build_environment, m.group(0)))
|
||||
sys.exit(0)
|
||||
|
||||
skip_override_set = set(skip_override.keys())
|
||||
should_run_set = default_set - skip_override_set
|
||||
for spec in should_run_set:
|
||||
if spec in args.build_environment:
|
||||
print("Accepting {} as part of default set".format(args.build_environment))
|
||||
sys.exit(0)
|
||||
|
||||
print("Rejecting {}".format(args.build_environment))
|
||||
for spec, issue in skip_override.items():
|
||||
if spec in args.build_environment:
|
||||
print("This job is temporarily excluded from running on PRs. Reason: {}".format(issue))
|
||||
break
|
||||
sys.exit(1)
|
||||
29
.circleci/scripts/should_run_job.sh
Executable file
29
.circleci/scripts/should_run_job.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu -o pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
# Check if we should actually run
|
||||
echo "BUILD_ENVIRONMENT: ${BUILD_ENVIRONMENT:-}"
|
||||
echo "CIRCLE_PULL_REQUEST: ${CIRCLE_PULL_REQUEST:-}"
|
||||
if [ -z "${BUILD_ENVIRONMENT:-}" ]; then
|
||||
echo "Cannot run should_run_job.sh if BUILD_ENVIRONMENT is not defined!"
|
||||
echo "CircleCI scripts are probably misconfigured."
|
||||
exit 1
|
||||
fi
|
||||
if ! [ -e "$SCRIPT_DIR/COMMIT_MSG" ]; then
|
||||
echo "Cannot run should_run_job.sh if you don't have COMMIT_MSG"
|
||||
echo "written out. Are you perhaps running the wrong copy of this script?"
|
||||
echo "You should be running the copy in ~/workspace; SCRIPT_DIR=$SCRIPT_DIR"
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "${CIRCLE_PULL_REQUEST:-}" ]; then
|
||||
if [[ $CIRCLE_BRANCH != "ci-all/"* ]] && [[ $CIRCLE_BRANCH != "nightly" ]] && [[ $CIRCLE_BRANCH != "postnightly" ]] ; then
|
||||
# Don't swallow "script doesn't exist
|
||||
[ -e "$SCRIPT_DIR/should_run_job.py" ]
|
||||
if ! python "$SCRIPT_DIR/should_run_job.py" "${BUILD_ENVIRONMENT:-}" < "$SCRIPT_DIR/COMMIT_MSG" ; then
|
||||
circleci step halt
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -1,87 +0,0 @@
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def get_size(file_dir):
|
||||
try:
|
||||
# we should only expect one file, if no, something is wrong
|
||||
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
|
||||
return os.stat(file_name).st_size
|
||||
except:
|
||||
logging.exception(f"error getting file from: {file_dir}")
|
||||
return 0
|
||||
|
||||
|
||||
def build_message(size):
|
||||
pkg_type, py_ver, cu_ver, *_ = os.environ.get("BUILD_ENVIRONMENT", "").split() + [
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
]
|
||||
os_name = os.uname()[0].lower()
|
||||
if os_name == "darwin":
|
||||
os_name = "macos"
|
||||
return {
|
||||
"normal": {
|
||||
"os": os_name,
|
||||
"pkg_type": pkg_type,
|
||||
"py_ver": py_ver,
|
||||
"cu_ver": cu_ver,
|
||||
"pr": os.environ.get("CIRCLE_PR_NUMBER"),
|
||||
"build_num": os.environ.get("CIRCLE_BUILD_NUM"),
|
||||
"sha1": os.environ.get("CIRCLE_SHA1"),
|
||||
"branch": os.environ.get("CIRCLE_BRANCH"),
|
||||
},
|
||||
"int": {
|
||||
"time": int(time.time()),
|
||||
"size": size,
|
||||
"commit_time": int(os.environ.get("COMMIT_TIME", "0")),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def send_message(message):
|
||||
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN")
|
||||
if not access_token:
|
||||
raise ValueError("Can't find access token from environment variable")
|
||||
url = "https://graph.facebook.com/scribe_logs"
|
||||
r = requests.post(
|
||||
url,
|
||||
data={
|
||||
"access_token": access_token,
|
||||
"logs": json.dumps(
|
||||
[
|
||||
{
|
||||
"category": "perfpipe_pytorch_binary_size",
|
||||
"message": json.dumps(message),
|
||||
"line_escape": False,
|
||||
}
|
||||
]
|
||||
),
|
||||
},
|
||||
)
|
||||
print(r.text)
|
||||
r.raise_for_status()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
file_dir = os.environ.get(
|
||||
"PYTORCH_FINAL_PACKAGE_DIR", "/home/circleci/project/final_pkgs"
|
||||
)
|
||||
if len(sys.argv) == 2:
|
||||
file_dir = sys.argv[1]
|
||||
print("checking dir: " + file_dir)
|
||||
size = get_size(file_dir)
|
||||
if size != 0:
|
||||
try:
|
||||
send_message(build_message(size))
|
||||
except:
|
||||
logging.exception("can't send message")
|
||||
@ -1,25 +0,0 @@
|
||||
$VS_DOWNLOAD_LINK = "https://aka.ms/vs/15/release/vs_buildtools.exe"
|
||||
$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Tools.14.11",
|
||||
"--add Microsoft.Component.MSBuild",
|
||||
"--add Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
||||
"--add Microsoft.VisualStudio.Component.TextTemplating",
|
||||
"--add Microsoft.VisualStudio.Component.VC.CoreIde",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core",
|
||||
"--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
||||
"--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81")
|
||||
|
||||
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
echo "Download of the VS 2017 installer failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
||||
Remove-Item -Path vs_installer.exe -Force
|
||||
$exitCode = $process.ExitCode
|
||||
if (($exitCode -ne 0) -and ($exitCode -ne 3010)) {
|
||||
echo "VS 2017 installer exited with code $exitCode, which should be one of [0, 3010]."
|
||||
exit 1
|
||||
}
|
||||
@ -1,43 +1,43 @@
|
||||
#!/usr/bin/env python3
|
||||
import cimodel.data.caffe2_build_definitions as caffe2_build_definitions
|
||||
|
||||
import urllib.request
|
||||
import re
|
||||
|
||||
import cimodel.data.pytorch_build_definitions as pytorch_build_definitions
|
||||
from yaml import load
|
||||
import cimodel.data.caffe2_build_definitions as caffe2_build_definitions
|
||||
|
||||
try:
|
||||
from yaml import CLoader as Loader
|
||||
except ImportError:
|
||||
from yaml import Loader
|
||||
RE_VERSION = re.compile(r'allDeployedVersions = "([0-9,]+)"')
|
||||
|
||||
URL_TEMPLATE = (
|
||||
"https://raw.githubusercontent.com/pytorch/ossci-job-dsl/"
|
||||
"master/src/main/groovy/ossci/{}/DockerVersion.groovy"
|
||||
)
|
||||
|
||||
def load_config(filename=".circleci/config.yml"):
|
||||
with open(filename, "r") as fh:
|
||||
return load("".join(fh.readlines()), Loader)
|
||||
|
||||
|
||||
def load_tags_for_projects(workflow_config):
|
||||
return {
|
||||
v["ecr_gc_job"]["project"]: v["ecr_gc_job"]["tags_to_keep"]
|
||||
for v in workflow_config["workflows"]["ecr_gc"]["jobs"]
|
||||
if isinstance(v, dict) and "ecr_gc_job" in v
|
||||
}
|
||||
|
||||
|
||||
def check_version(job, tags, expected_version):
|
||||
valid_versions = tags[job].split(",")
|
||||
if expected_version not in valid_versions:
|
||||
raise RuntimeError(
|
||||
"We configured {} to use Docker version {}; but this "
|
||||
"version is not configured in job ecr_gc_job_for_{}. Non-deployed versions will be "
|
||||
"garbage collected two weeks after they are created. DO NOT LAND "
|
||||
"THIS TO MASTER without also updating ossci-job-dsl with this version."
|
||||
"\n\nDeployed versions: {}".format(job, expected_version, job, tags[job])
|
||||
)
|
||||
|
||||
def check_version(job, expected_version):
|
||||
url = URL_TEMPLATE.format(job)
|
||||
with urllib.request.urlopen(url) as f:
|
||||
contents = f.read().decode('utf-8')
|
||||
m = RE_VERSION.search(contents)
|
||||
if not m:
|
||||
raise RuntimeError(
|
||||
"Unbelievable! I could not find the variable allDeployedVersions in "
|
||||
"{}; did the organization of ossci-job-dsl change?\n\nFull contents:\n{}"
|
||||
.format(url, contents)
|
||||
)
|
||||
valid_versions = [int(v) for v in m.group(1).split(',')]
|
||||
if expected_version not in valid_versions:
|
||||
raise RuntimeError(
|
||||
"We configured {} to use Docker version {}; but this "
|
||||
"version is not deployed in {}. Non-deployed versions will be "
|
||||
"garbage collected two weeks after they are created. DO NOT LAND "
|
||||
"THIS TO MASTER without also updating ossci-job-dsl with this version."
|
||||
"\n\nDeployed versions: {}"
|
||||
.format(job, expected_version, url, m.group(1))
|
||||
)
|
||||
|
||||
def validate_docker_version():
|
||||
tags = load_tags_for_projects(load_config())
|
||||
check_version("pytorch", tags, pytorch_build_definitions.DOCKER_IMAGE_VERSION)
|
||||
check_version("caffe2", tags, caffe2_build_definitions.DOCKER_IMAGE_VERSION)
|
||||
check_version('pytorch', pytorch_build_definitions.DOCKER_IMAGE_VERSION)
|
||||
check_version('caffe2', caffe2_build_definitions.DOCKER_IMAGE_VERSION)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -52,12 +52,3 @@ binary_mac_params: &binary_mac_params
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||
|
||||
binary_windows_params: &binary_windows_params
|
||||
parameters:
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||
BUILD_FOR_SYSTEM: windows
|
||||
|
||||
|
||||
@ -12,3 +12,9 @@
|
||||
# resource_class: gpu.medium
|
||||
# <<: *binary_linux_test
|
||||
#
|
||||
# binary_linux_libtorch_2.7m_cu100_test:
|
||||
# environment:
|
||||
# BUILD_ENVIRONMENT: "libtorch 2.7m cu100"
|
||||
# resource_class: gpu.medium
|
||||
# <<: *binary_linux_test
|
||||
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
<<: *binary_linux_build_params
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
@ -19,8 +19,8 @@
|
||||
elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
|
||||
retry apt-get update
|
||||
retry apt-get -y install expect moreutils
|
||||
retry conda install -y -c eumetsat expect
|
||||
retry conda install -y cmake
|
||||
conda install -y -c eumetsat expect
|
||||
conda install -y cmake
|
||||
fi
|
||||
- run:
|
||||
name: Update compiler to devtoolset7
|
||||
@ -41,16 +41,6 @@
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
source "/pytorch/.circleci/scripts/binary_linux_build.sh"
|
||||
- run:
|
||||
name: save binary size
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
source /env
|
||||
cd /pytorch && export COMMIT_TIME=$(git log --max-count=1 --format=%ct || echo 0)
|
||||
pip3 install requests && \
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN=${SCRIBE_GRAPHQL_ACCESS_TOKEN} \
|
||||
python3 /pytorch/.circleci/scripts/upload_binary_size_to_scuba.py || exit 0
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /
|
||||
paths: final_pkgs
|
||||
@ -66,7 +56,7 @@
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
# TODO: We shouldn't attach the workspace multiple times
|
||||
- attach_workspace:
|
||||
at: /home/circleci/project
|
||||
@ -89,7 +79,7 @@
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- attach_workspace:
|
||||
@ -140,7 +130,7 @@
|
||||
smoke_mac_test:
|
||||
<<: *binary_linux_test_upload_params
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
@ -168,10 +158,10 @@
|
||||
binary_mac_build:
|
||||
<<: *binary_mac_params
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
@ -209,10 +199,10 @@
|
||||
binary_mac_upload: &binary_mac_upload
|
||||
<<: *binary_mac_params
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
@ -233,39 +223,33 @@
|
||||
binary_ios_build:
|
||||
<<: *pytorch_ios_params
|
||||
macos:
|
||||
xcode: "11.2.1"
|
||||
xcode: "10.2.1"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- checkout
|
||||
- run_brew_for_ios_build
|
||||
- run:
|
||||
name: Build
|
||||
contxt: org-member
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
script="/Users/distiller/project/.circleci/scripts/binary_ios_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "30m"
|
||||
command: |
|
||||
script="/Users/distiller/project/.circleci/scripts/binary_ios_test.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
- persist_to_workspace:
|
||||
root: /Users/distiller/workspace/
|
||||
paths: ios
|
||||
|
||||
binary_ios_upload:
|
||||
|
||||
binary_ios_upload:
|
||||
<<: *pytorch_ios_params
|
||||
macos:
|
||||
xcode: "11.2.1"
|
||||
xcode: "10.2.1"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- checkout
|
||||
- run_brew_for_ios_build
|
||||
- run:
|
||||
@ -275,46 +259,3 @@
|
||||
script="/Users/distiller/project/.circleci/scripts/binary_ios_upload.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
binary_windows_build:
|
||||
<<: *binary_windows_params
|
||||
executor:
|
||||
name: windows-cpu-with-nvidia-cuda
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
script="/c/w/p/.circleci/scripts/binary_windows_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
- persist_to_workspace:
|
||||
root: "C:/w"
|
||||
paths: final_pkgs
|
||||
|
||||
binary_windows_upload:
|
||||
<<: *binary_windows_params
|
||||
docker:
|
||||
- image: continuumio/miniconda
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Upload
|
||||
no_output_timeout: "10m"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
script="/pytorch/.circleci/scripts/binary_windows_upload.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- checkout
|
||||
- setup_ci_environment
|
||||
@ -41,7 +41,7 @@
|
||||
|
||||
echo "DOCKER_IMAGE: "${DOCKER_IMAGE}
|
||||
time docker pull ${DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
|
||||
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_build_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
@ -64,7 +64,7 @@
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
@ -112,9 +112,9 @@
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
else
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
fi
|
||||
docker cp /home/circleci/project/. "$id:/var/lib/jenkins/workspace"
|
||||
|
||||
@ -124,10 +124,10 @@
|
||||
caffe2_macos_build:
|
||||
<<: *caffe2_params
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- checkout
|
||||
- run_brew_for_macos_build
|
||||
- run:
|
||||
@ -151,7 +151,7 @@
|
||||
# Install Anaconda if we need to
|
||||
if [ -n "${CAFFE2_USE_ANACONDA}" ]; then
|
||||
rm -rf ${TMPDIR}/anaconda
|
||||
curl --retry 3 -o ${TMPDIR}/conda.sh https://repo.anaconda.com/miniconda/Miniconda${ANACONDA_VERSION}-latest-MacOSX-x86_64.sh
|
||||
curl -o ${TMPDIR}/conda.sh https://repo.continuum.io/miniconda/Miniconda${ANACONDA_VERSION}-latest-MacOSX-x86_64.sh
|
||||
chmod +x ${TMPDIR}/conda.sh
|
||||
/bin/bash ${TMPDIR}/conda.sh -b -p ${TMPDIR}/anaconda
|
||||
rm -f ${TMPDIR}/conda.sh
|
||||
@ -162,7 +162,7 @@
|
||||
pip -q install numpy
|
||||
|
||||
# Install sccache
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
|
||||
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
|
||||
|
||||
|
||||
@ -3,12 +3,16 @@ commands:
|
||||
# attaches the workspace at ~/workspace; this workspace is generated
|
||||
# by the setup job. Note that ~/workspace is not the default working
|
||||
# directory (that's ~/project).
|
||||
attach_scripts:
|
||||
description: "Attach the scripts that power everything else"
|
||||
should_run_job:
|
||||
description: "Test if the job should run or not"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
name: Attaching workspace
|
||||
at: ~/workspace
|
||||
- run:
|
||||
name: Should run job
|
||||
no_output_timeout: "2m"
|
||||
command: ~/workspace/.circleci/scripts/should_run_job.sh
|
||||
|
||||
# This system setup script is meant to run before the CI-related scripts, e.g.,
|
||||
# installing Git client, checking out code, setting up CI env, and
|
||||
|
||||
@ -1,84 +0,0 @@
|
||||
docker_build_job:
|
||||
parameters:
|
||||
image_name:
|
||||
type: string
|
||||
default: ""
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
resource_class: large
|
||||
environment:
|
||||
IMAGE_NAME: << parameters.image_name >>
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build_docker_image_<< parameters.image_name >>
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
set -x
|
||||
cd .circleci/docker && ./build_docker.sh
|
||||
docker_for_ecr_gc_build_job:
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build_docker_image_for_ecr_gc
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
cd .circleci/ecr_gc_docker
|
||||
docker build . -t 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
eval $(aws ecr get-login --no-include-email --region us-east-1)
|
||||
set -x
|
||||
docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||
ecr_gc_job:
|
||||
parameters:
|
||||
project:
|
||||
type: string
|
||||
default: "pytorch"
|
||||
tags_to_keep: # comma separate values
|
||||
type: string
|
||||
environment:
|
||||
PROJECT: << parameters.project >>
|
||||
IMAGE_TAG: << parameters.tags_to_keep >>
|
||||
docker:
|
||||
- image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||
aws_auth:
|
||||
aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
|
||||
steps:
|
||||
- run:
|
||||
name: garbage collecting for ecr images
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
set -x
|
||||
/usr/bin/gc.py --filter-prefix ${PROJECT} --ignore-tags ${IMAGE_TAG}
|
||||
|
||||
docker_hub_index_job:
|
||||
docker:
|
||||
- image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||
aws_auth:
|
||||
aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
|
||||
steps:
|
||||
- run:
|
||||
name: garbage collecting for ecr images
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export DOCKER_HUB_USERNAME=${CIRCLECI_DOCKER_HUB_USERNAME}
|
||||
export DOCKER_HUB_PASSWORD=${CIRCLECI_DOCKER_HUB_PASSWORD}
|
||||
set -x
|
||||
/usr/bin/docker_hub.py
|
||||
@ -1,9 +1,15 @@
|
||||
# WARNING: DO NOT EDIT THIS FILE DIRECTLY!!!
|
||||
# See the README.md in this directory.
|
||||
|
||||
# IMPORTANT: To update Docker image version, please follow
|
||||
# the instructions at
|
||||
# https://github.com/pytorch/pytorch/wiki/Docker-image-build-on-CircleCI
|
||||
# IMPORTANT: To update Docker image version, please first update
|
||||
# https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/pytorch/DockerVersion.groovy and
|
||||
# https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/caffe2/DockerVersion.groovy,
|
||||
# and then update DOCKER_IMAGE_VERSION at the top of the following files:
|
||||
# * cimodel/data/pytorch_build_definitions.py
|
||||
# * cimodel/data/caffe2_build_definitions.py
|
||||
# And the inline copies of the variable in
|
||||
# * verbatim-sources/job-specs-custom.yml
|
||||
# (grep for DOCKER_IMAGE)
|
||||
|
||||
version: 2.1
|
||||
|
||||
@ -13,17 +19,3 @@ docker_config_defaults: &docker_config_defaults
|
||||
# This IAM user only allows read-write access to ECR
|
||||
aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4}
|
||||
aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4}
|
||||
|
||||
executors:
|
||||
windows-with-nvidia-gpu:
|
||||
machine:
|
||||
resource_class: windows.gpu.nvidia.medium
|
||||
image: windows-server-2019-nvidia:stable
|
||||
shell: bash.exe
|
||||
|
||||
windows-cpu-with-nvidia-cuda:
|
||||
machine:
|
||||
# we will change to CPU host when it's ready
|
||||
resource_class: windows.xlarge
|
||||
image: windows-server-2019-vs2019:stable
|
||||
shell: bash.exe
|
||||
|
||||
@ -1,14 +1,49 @@
|
||||
pytorch_short_perf_test_gpu:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-short-perf-test-gpu
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:347"
|
||||
PYTHON_VERSION: "3.6"
|
||||
USE_CUDA_DOCKER_RUNTIME: "1"
|
||||
resource_class: gpu.medium
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Perf Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
|
||||
docker cp $id:/var/lib/jenkins/workspace/env /home/circleci/project/env
|
||||
# This IAM user allows write access to S3 bucket for perf test numbers
|
||||
set +x
|
||||
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PERF_TEST_S3_BUCKET_V4}" >> /home/circleci/project/env
|
||||
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PERF_TEST_S3_BUCKET_V4}" >> /home/circleci/project/env
|
||||
set -x
|
||||
docker cp /home/circleci/project/env $id:/var/lib/jenkins/workspace/env
|
||||
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/short-perf-test-gpu.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
pytorch_python_doc_push:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-python-doc-push
|
||||
# TODO: stop hardcoding this
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:347"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
@ -19,18 +54,18 @@
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
|
||||
# master branch docs push
|
||||
if [[ "${CIRCLE_BRANCH}" == "master" ]]; then
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/master master site") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
# stable release docs push. Due to some circleci limitations, we keep
|
||||
# an eternal PR open for merging v1.2.0 -> master for this job.
|
||||
# XXX: The following code is only run on the v1.2.0 branch, which might
|
||||
# an eternal PR open for merging v1.3.0 -> master for this job.
|
||||
# XXX: The following code is only run on the v1.3.0 branch, which might
|
||||
# not be exactly the same as what you see here.
|
||||
elif [[ "${CIRCLE_BRANCH}" == "v1.2.0" ]]; then
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/stable 1.2.0 site dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
elif [[ "${CIRCLE_BRANCH}" == "v1.3.0" ]]; then
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GITHUB_PYTORCHBOT_TOKEN=${GITHUB_PYTORCHBOT_TOKEN}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/stable 1.3.0 site-v1.3.0 dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
# For open PRs: Do a dry_run of the docs build, don't push build
|
||||
else
|
||||
@ -47,13 +82,13 @@
|
||||
pytorch_cpp_doc_push:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-cpp-doc-push
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:347"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
@ -64,7 +99,7 @@
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
|
||||
# master branch docs push
|
||||
if [[ "${CIRCLE_BRANCH}" == "master" ]]; then
|
||||
@ -93,10 +128,10 @@
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-build
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- checkout
|
||||
- run_brew_for_macos_build
|
||||
- run:
|
||||
@ -107,7 +142,7 @@
|
||||
export IN_CIRCLECI=1
|
||||
|
||||
# Install sccache
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
|
||||
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
|
||||
|
||||
@ -133,11 +168,11 @@
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-test
|
||||
macos:
|
||||
xcode: "9.4.1"
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
# This workspace also carries binaries from the build job
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- run_brew_for_macos_build
|
||||
- run:
|
||||
name: Test
|
||||
@ -151,19 +186,65 @@
|
||||
|
||||
chmod a+x .jenkins/pytorch/macos-test.sh
|
||||
unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts
|
||||
- store_test_results:
|
||||
path: test/test-reports
|
||||
|
||||
pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-macos-10.13-cuda9.2-cudnn7-py3-build
|
||||
macos:
|
||||
xcode: "9.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- should_run_job
|
||||
- checkout
|
||||
- run_brew_for_macos_build
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
|
||||
export IN_CIRCLECI=1
|
||||
|
||||
# Install CUDA 9.2
|
||||
sudo rm -rf ~/cuda_9.2.64_mac_installer.app || true
|
||||
curl https://s3.amazonaws.com/ossci-macos/cuda_9.2.64_mac_installer.zip -o ~/cuda_9.2.64_mac_installer.zip
|
||||
unzip ~/cuda_9.2.64_mac_installer.zip -d ~/
|
||||
sudo ~/cuda_9.2.64_mac_installer.app/Contents/MacOS/CUDAMacOSXInstaller --accept-eula --no-window
|
||||
sudo cp /usr/local/cuda/lib/libcuda.dylib /Developer/NVIDIA/CUDA-9.2/lib/libcuda.dylib
|
||||
sudo rm -rf /usr/local/cuda || true
|
||||
|
||||
# Install cuDNN 7.1 for CUDA 9.2
|
||||
curl https://s3.amazonaws.com/ossci-macos/cudnn-9.2-osx-x64-v7.1.tgz -o ~/cudnn-9.2-osx-x64-v7.1.tgz
|
||||
rm -rf ~/cudnn-9.2-osx-x64-v7.1 && mkdir ~/cudnn-9.2-osx-x64-v7.1
|
||||
tar -xzvf ~/cudnn-9.2-osx-x64-v7.1.tgz -C ~/cudnn-9.2-osx-x64-v7.1
|
||||
sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/include/
|
||||
sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/lib/libcudnn* /Developer/NVIDIA/CUDA-9.2/lib/
|
||||
sudo chmod a+r /Developer/NVIDIA/CUDA-9.2/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/lib/libcudnn*
|
||||
|
||||
# Install sccache
|
||||
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
|
||||
# This IAM user allows write access to S3 bucket for sccache
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
|
||||
set -x
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive
|
||||
chmod a+x .jenkins/pytorch/macos-build.sh
|
||||
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
|
||||
|
||||
pytorch_android_gradle_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
PYTHON_VERSION: "3.6"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- checkout
|
||||
- setup_ci_environment
|
||||
@ -187,14 +268,14 @@
|
||||
|
||||
# x86_32
|
||||
time docker pull ${docker_image_libtorch_android_x86_32} >/dev/null
|
||||
export id_x86_32=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32})
|
||||
export id_x86_32=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32})
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_x86_32" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
# arm-v7a
|
||||
time docker pull ${docker_image_libtorch_android_arm_v7a} >/dev/null
|
||||
export id_arm_v7a=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_arm_v7a})
|
||||
export id_arm_v7a=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_arm_v7a})
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_arm_v7a" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
@ -204,7 +285,7 @@
|
||||
|
||||
# x86_64
|
||||
time docker pull ${docker_image_libtorch_android_x86_64} >/dev/null
|
||||
export id_x86_64=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_64})
|
||||
export id_x86_64=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_64})
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_x86_64" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
@ -214,7 +295,7 @@
|
||||
|
||||
# arm-v8a
|
||||
time docker pull ${docker_image_libtorch_android_arm_v8a} >/dev/null
|
||||
export id_arm_v8a=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_arm_v8a})
|
||||
export id_arm_v8a=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_arm_v8a})
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace") | docker exec -u jenkins -i "$id_arm_v8a" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
@ -227,7 +308,7 @@
|
||||
docker cp ~/workspace/build_android_install_arm_v8a $id_x86_32:/var/lib/jenkins/workspace/build_android_install_arm_v8a
|
||||
|
||||
# run gradle buildRelease
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GRADLE_OFFLINE=1" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id_x86_32" bash) 2>&1'
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id_x86_32" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
mkdir -p ~/workspace/build_android_artifacts
|
||||
@ -243,13 +324,13 @@
|
||||
pytorch_android_publish_snapshot:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-publish-snapshot
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
PYTHON_VERSION: "3.6"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- checkout
|
||||
- setup_ci_environment
|
||||
@ -267,7 +348,7 @@
|
||||
|
||||
# x86_32
|
||||
time docker pull ${docker_image_libtorch_android_x86_32_gradle} >/dev/null
|
||||
export id_x86_32=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32_gradle})
|
||||
export id_x86_32=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32_gradle})
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" && echo "export SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" && echo "export ANDROID_SIGN_KEY=${ANDROID_SIGN_KEY}" && echo "export ANDROID_SIGN_PASS=${ANDROID_SIGN_PASS}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/publish_android_snapshot.sh") | docker exec -u jenkins -i "$id_x86_32" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
@ -279,13 +360,13 @@
|
||||
pytorch_android_gradle_build-x86_32:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-only-x86_32
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
PYTHON_VERSION: "3.6"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- run:
|
||||
name: filter out not PR runs
|
||||
no_output_timeout: "5m"
|
||||
@ -307,9 +388,9 @@
|
||||
|
||||
# x86
|
||||
time docker pull ${docker_image_libtorch_android_x86_32} >/dev/null
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${docker_image_libtorch_android_x86_32})
|
||||
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export GRADLE_OFFLINE=1" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
export COMMAND='((echo "source ./workspace/env" && echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
mkdir -p ~/workspace/build_android_x86_32_artifacts
|
||||
@ -325,34 +406,12 @@
|
||||
pytorch_ios_build:
|
||||
<<: *pytorch_ios_params
|
||||
macos:
|
||||
xcode: "11.2.1"
|
||||
xcode: "10.2.1"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- checkout
|
||||
- run_brew_for_ios_build
|
||||
- run:
|
||||
name: Run Fastlane
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
# install fastlane
|
||||
sudo gem install bundler && bundle install
|
||||
# install certificates
|
||||
echo ${IOS_CERT_KEY} >> cert.txt
|
||||
base64 --decode cert.txt -o Certificates.p12
|
||||
rm cert.txt
|
||||
bundle exec fastlane install_cert
|
||||
# install the provisioning profile
|
||||
PROFILE=TestApp_CI.mobileprovision
|
||||
PROVISIONING_PROFILES=~/Library/MobileDevice/Provisioning\ Profiles
|
||||
mkdir -pv "${PROVISIONING_PROFILES}"
|
||||
cd "${PROVISIONING_PROFILES}"
|
||||
echo ${IOS_SIGN_KEY} >> cert.txt
|
||||
base64 --decode cert.txt -o ${PROFILE}
|
||||
rm cert.txt
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
@ -364,79 +423,24 @@
|
||||
export TCLLIBPATH="/usr/local/lib"
|
||||
|
||||
# Install conda
|
||||
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/conda.sh
|
||||
/bin/bash ~/conda.sh -b -p ~/anaconda
|
||||
curl -o ~/Downloads/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/Downloads/conda.sh
|
||||
/bin/bash ~/Downloads/conda.sh -b -p ~/anaconda
|
||||
export PATH="~/anaconda/bin:${PATH}"
|
||||
source ~/anaconda/bin/activate
|
||||
|
||||
# Install dependencies
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
retry conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
|
||||
|
||||
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
|
||||
# sync submodules
|
||||
cd ${PROJ_ROOT}
|
||||
git submodule sync
|
||||
git submodule update --init --recursive
|
||||
|
||||
# export
|
||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||
|
||||
# run build script
|
||||
chmod a+x ${PROJ_ROOT}/scripts/build_ios.sh
|
||||
echo "IOS_ARCH: ${IOS_ARCH}"
|
||||
echo "IOS_PLATFORM: ${IOS_PLATFORM}"
|
||||
|
||||
#check the custom build flag
|
||||
echo "SELECTED_OP_LIST: ${SELECTED_OP_LIST}"
|
||||
if [ -n "${SELECTED_OP_LIST}" ]; then
|
||||
export SELECTED_OP_LIST="${PROJ_ROOT}/ios/TestApp/custom_build/${SELECTED_OP_LIST}"
|
||||
fi
|
||||
export BUILD_PYTORCH_MOBILE=1
|
||||
export IOS_ARCH=${IOS_ARCH}
|
||||
export IOS_PLATFORM=${IOS_PLATFORM}
|
||||
unbuffer ${PROJ_ROOT}/scripts/build_ios.sh 2>&1 | ts
|
||||
- run:
|
||||
name: Run Build Test
|
||||
no_output_timeout: "30m"
|
||||
command: |
|
||||
set -e
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
PROFILE=TestApp_CI
|
||||
# run the ruby build script
|
||||
if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||
echo 'Error: xcodebuild is not installed.'
|
||||
exit 1
|
||||
fi
|
||||
echo ${IOS_DEV_TEAM_ID}
|
||||
if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||
else
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM}
|
||||
fi
|
||||
if ! [ "$?" -eq "0" ]; then
|
||||
echo 'xcodebuild failed!'
|
||||
exit 1
|
||||
fi
|
||||
- run:
|
||||
name: Run Simulator Tests
|
||||
no_output_timeout: "2h"
|
||||
command: |
|
||||
set -e
|
||||
if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then
|
||||
echo "not SIMULATOR build, skip it."
|
||||
exit 0
|
||||
fi
|
||||
WORKSPACE=/Users/distiller/workspace
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
source ~/anaconda/bin/activate
|
||||
pip install torch torchvision --progress-bar off
|
||||
#run unit test
|
||||
cd ${PROJ_ROOT}/ios/TestApp/benchmark
|
||||
python trace_model.py
|
||||
ruby setup.rb
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
instruments -s -devices
|
||||
fastlane scan
|
||||
|
||||
@ -4,6 +4,10 @@
|
||||
- image: circleci/python:3.7.3
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Ensure config is up to date
|
||||
command: ./ensure-consistency.py
|
||||
working_directory: .circleci
|
||||
- run:
|
||||
name: Save commit message
|
||||
command: git log --format='%B' -n 1 HEAD > .circleci/scripts/COMMIT_MSG
|
||||
|
||||
@ -12,14 +12,10 @@ pytorch_params: &pytorch_params
|
||||
use_cuda_docker_runtime:
|
||||
type: string
|
||||
default: ""
|
||||
build_only:
|
||||
type: string
|
||||
default: ""
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||
DOCKER_IMAGE: << parameters.docker_image >>
|
||||
USE_CUDA_DOCKER_RUNTIME: << parameters.use_cuda_docker_runtime >>
|
||||
BUILD_ONLY: << parameters.build_only >>
|
||||
resource_class: << parameters.resource_class >>
|
||||
|
||||
pytorch_ios_params: &pytorch_ios_params
|
||||
@ -33,46 +29,11 @@ pytorch_ios_params: &pytorch_ios_params
|
||||
ios_platform:
|
||||
type: string
|
||||
default: ""
|
||||
op_list:
|
||||
type: string
|
||||
default: ""
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||
IOS_ARCH: << parameters.ios_arch >>
|
||||
IOS_PLATFORM: << parameters.ios_platform >>
|
||||
SELECTED_OP_LIST: << parameters.op_list >>
|
||||
|
||||
pytorch_windows_params: &pytorch_windows_params
|
||||
parameters:
|
||||
test_name:
|
||||
type: string
|
||||
default: ""
|
||||
cuda_version:
|
||||
type: string
|
||||
default: "10"
|
||||
python_version:
|
||||
type: string
|
||||
default: "3.6"
|
||||
vc_version:
|
||||
type: string
|
||||
default: "14.11"
|
||||
vc_year:
|
||||
type: string
|
||||
default: "2017"
|
||||
vc_product:
|
||||
type: string
|
||||
default: "BuildTools"
|
||||
use_cuda:
|
||||
type: string
|
||||
default: ""
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: "pytorch-win-ws2019-cuda10-cudnn7-py3"
|
||||
SCCACHE_BUCKET: "ossci-compiler-cache"
|
||||
CUDA_VERSION: <<parameters.cuda_version>>
|
||||
PYTHON_VERSION: <<parameters.python_version>>
|
||||
VC_VERSION: <<parameters.vc_version>>
|
||||
VC_YEAR: <<parameters.vc_year>>
|
||||
VC_PRODUCT: <<parameters.vc_product>>
|
||||
USE_CUDA: <<parameters.use_cuda>>
|
||||
TORCH_CUDA_ARCH_LIST: "7.5"
|
||||
JOB_BASE_NAME: <<parameters.test_name>>
|
||||
|
||||
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ jobs:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- checkout
|
||||
- setup_ci_environment
|
||||
@ -17,56 +17,52 @@ jobs:
|
||||
# Pull Docker image and run build
|
||||
echo "DOCKER_IMAGE: "${DOCKER_IMAGE}
|
||||
time docker pull ${DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE})
|
||||
|
||||
# TODO We may want to move the rebase logic to a separate step after checkout
|
||||
# Rebase to release/1.5 only if in xenial_py3_6_gcc5_4 case
|
||||
if [[ "${CIRCLE_BRANCH}" != "release/1.5" && "${BUILD_ENVIRONMENT}" == *"gcc5"* ]]; then
|
||||
echo "Merge release/1.5 branch into $CIRCLE_BRANCH before build in environment $BUILD_ENVIRONMENT"
|
||||
# Rebase to v1.3.0 only if in xenial_py3_6_gcc5_4 case
|
||||
if [[ "${CIRCLE_BRANCH}" != "v1.3.0" && "${BUILD_ENVIRONMENT}" == *"gcc5"* ]]; then
|
||||
echo "Merge v1.3.0 branch into $CIRCLE_BRANCH before build in environment $BUILD_ENVIRONMENT"
|
||||
set -x
|
||||
git config --global user.email "circleci.ossci@gmail.com"
|
||||
git config --global user.name "CircleCI"
|
||||
git config remote.origin.url https://github.com/pytorch/pytorch.git
|
||||
git config --add remote.origin.fetch +refs/heads/release/1.5:refs/remotes/origin/release/1.5
|
||||
git fetch --tags --progress https://github.com/pytorch/pytorch.git +refs/heads/release/1.5:refs/remotes/origin/release/1.5 --depth=100 --quiet
|
||||
export GIT_MERGE_TARGET=`git log -n 1 --pretty=format:"%H" origin/release/1.5`
|
||||
git config --add remote.origin.fetch +refs/heads/v1.3.0:refs/remotes/origin/v1.3.0
|
||||
git fetch --tags --progress https://github.com/pytorch/pytorch.git +refs/heads/v1.3.0:refs/remotes/origin/v1.3.0 --depth=50 --quiet
|
||||
export GIT_MERGE_TARGET=`git log -n 1 --pretty=format:"%H" origin/v1.3.0`
|
||||
echo "GIT_MERGE_TARGET: " ${GIT_MERGE_TARGET}
|
||||
export GIT_COMMIT=${CIRCLE_SHA1}
|
||||
echo "GIT_COMMIT: " ${GIT_COMMIT}
|
||||
git checkout -f ${GIT_COMMIT}
|
||||
git reset --hard ${GIT_COMMIT}
|
||||
git merge --allow-unrelated-histories --no-edit --no-ff ${GIT_MERGE_TARGET}
|
||||
git merge --no-edit --no-ff ${GIT_MERGE_TARGET}
|
||||
set +x
|
||||
else
|
||||
echo "Do NOT merge release/1.5 branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||
echo "Do NOT merge v1.3.0 branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||
fi
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive
|
||||
|
||||
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export PARALLEL_FLAGS="export ATEN_THREADING=TBB USE_TBB=1 "
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export PARALLEL_FLAGS="export ATEN_THREADING=NATIVE "
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
|
||||
NAMED_FLAG="export BUILD_NAMEDTENSOR=1"
|
||||
fi
|
||||
echo "Parallel backend flags: "${PARALLEL_FLAGS}
|
||||
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$PARALLEL_FLAGS"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
# Push intermediate Docker image for next phase to use
|
||||
if [ -z "${BUILD_ONLY}" ]; then
|
||||
# Note [Special build images]
|
||||
# The xla build uses the same docker image as
|
||||
# The namedtensor and xla builds use the same docker image as
|
||||
# pytorch-linux-trusty-py3.6-gcc5.4-build. In the push step, we have to
|
||||
# distinguish between them so the test can pick up the correct image.
|
||||
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-namedtensor
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-xla
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"libtorch"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-libtorch
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_64"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_64
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v7a"* ]]; then
|
||||
@ -88,7 +84,7 @@ jobs:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- attach_scripts
|
||||
- should_run_job
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
@ -98,152 +94,24 @@ jobs:
|
||||
set -e
|
||||
# See Note [Special build images]
|
||||
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-namedtensor
|
||||
export NAMED_FLAG="export BUILD_NAMEDTENSOR=1 && export TEST_NAMEDTENSOR=1"
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-xla
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"libtorch"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-libtorch
|
||||
else
|
||||
export COMMIT_DOCKER_IMAGE=$output_image
|
||||
fi
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export PARALLEL_FLAGS="export ATEN_THREADING=TBB USE_TBB=1 "
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export PARALLEL_FLAGS="export ATEN_THREADING=NATIVE "
|
||||
fi
|
||||
echo "Parallel backend flags: "${PARALLEL_FLAGS}
|
||||
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
else
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
fi
|
||||
|
||||
retrieve_test_reports() {
|
||||
echo "retrieving test reports"
|
||||
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
|
||||
}
|
||||
trap "retrieve_test_reports" ERR
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"multigpu"* ]]; then
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${PARALLEL_FLAGS}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
else
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "export CIRCLE_PULL_REQUEST=${CIRCLE_PULL_REQUEST}" && echo "${PARALLEL_FLAGS}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
fi
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
retrieve_test_reports
|
||||
- store_test_results:
|
||||
path: test-reports
|
||||
|
||||
pytorch_windows_build:
|
||||
<<: *pytorch_windows_params
|
||||
parameters:
|
||||
test_name:
|
||||
type: string
|
||||
default: ""
|
||||
cuda_version:
|
||||
type: string
|
||||
default: "10"
|
||||
python_version:
|
||||
type: string
|
||||
default: "3.6"
|
||||
vc_version:
|
||||
type: string
|
||||
default: "14.11"
|
||||
vc_year:
|
||||
type: string
|
||||
default: "2017"
|
||||
vc_product:
|
||||
type: string
|
||||
default: "BuildTools"
|
||||
use_cuda:
|
||||
type: string
|
||||
default: ""
|
||||
executor: windows-cpu-with-nvidia-cuda
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install VS2017
|
||||
command: |
|
||||
if [[ "${VC_YEAR}" == "2017" ]]; then
|
||||
powershell .circleci/scripts/vs_install.ps1
|
||||
fi
|
||||
- run:
|
||||
name: Install Cuda
|
||||
no_output_timeout: 30m
|
||||
command: |
|
||||
curl --retry 3 -kLO https://ossci-windows.s3.amazonaws.com/cuda_10.1.243_426.00_win10.exe
|
||||
mkdir cuda_install_logs
|
||||
./cuda_10.1.243_426.00_win10.exe -s -loglevel:6 -log:"$(pwd -W)/cuda_install_logs"
|
||||
cat cuda_install_logs/LOG.setup.exe.log
|
||||
if ! ls "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/bin/nvcc.exe"
|
||||
then
|
||||
echo "CUDA installation failed"
|
||||
exit 1
|
||||
fi
|
||||
rm -rf ./cuda_install_logs
|
||||
rm -f ./cuda_10.1.243_426.00_win10.exe
|
||||
- run:
|
||||
name: Install Cudnn
|
||||
command : |
|
||||
cd c:/
|
||||
curl --retry 3 -O https://ossci-windows.s3.amazonaws.com/cudnn-10.1-windows10-x64-v7.6.4.38.zip
|
||||
7z x cudnn-10.1-windows10-x64-v7.6.4.38.zip -ocudnn
|
||||
cp -r cudnn/cuda/* "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/"
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
set -e
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_WIN_BUILD_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
||||
set -x
|
||||
.jenkins/pytorch/win-build.sh
|
||||
pytorch_windows_test:
|
||||
<<: *pytorch_windows_params
|
||||
parameters:
|
||||
test_name:
|
||||
type: string
|
||||
default: ""
|
||||
cuda_version:
|
||||
type: string
|
||||
default: "10"
|
||||
python_version:
|
||||
type: string
|
||||
default: "3.6"
|
||||
vc_version:
|
||||
type: string
|
||||
default: "14.11"
|
||||
vc_year:
|
||||
type: string
|
||||
default: "2017"
|
||||
vc_product:
|
||||
type: string
|
||||
default: "BuildTools"
|
||||
use_cuda:
|
||||
type: string
|
||||
default: ""
|
||||
executor: windows-with-nvidia-gpu
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install VS2017
|
||||
command: |
|
||||
if [[ "${VC_YEAR}" == "2017" ]]; then
|
||||
powershell .circleci/scripts/vs_install.ps1
|
||||
fi
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "30m"
|
||||
command: |
|
||||
set -e
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_WIN_BUILD_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
||||
set -x
|
||||
.jenkins/pytorch/win-test.sh
|
||||
|
||||
@ -1,140 +0,0 @@
|
||||
# Warning: indentation here matters!
|
||||
|
||||
- pytorch_windows_build:
|
||||
name: pytorch_windows_vs2017_14.11_py36_cuda10.1_build
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: "14.11"
|
||||
vc_year: "2017"
|
||||
vc_product: "BuildTools"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- pytorch_windows_test:
|
||||
name: pytorch_windows_vs2017_14.11_py36_cuda10.1_test1
|
||||
test_name: pytorch-windows-test1
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: "14.11"
|
||||
vc_year: "2017"
|
||||
vc_product: "BuildTools"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_vs2017_14.11_py36_cuda10.1_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- pytorch_windows_test:
|
||||
name: pytorch_windows_vs2017_14.11_py36_cuda10.1_test2
|
||||
test_name: pytorch-windows-test2
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: "14.11"
|
||||
vc_year: "2017"
|
||||
vc_product: "BuildTools"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_vs2017_14.11_py36_cuda10.1_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- pytorch_windows_build:
|
||||
name: pytorch_windows_vs2017_14.16_py36_cuda10.1_build
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: "14.16"
|
||||
vc_year: "2017"
|
||||
vc_product: "BuildTools"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- pytorch_windows_test:
|
||||
name: pytorch_windows_vs2017_14.16_py36_cuda10.1_test1
|
||||
test_name: pytorch-windows-test1
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: "14.16"
|
||||
vc_year: "2017"
|
||||
vc_product: "BuildTools"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_vs2017_14.16_py36_cuda10.1_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- pytorch_windows_test:
|
||||
name: pytorch_windows_vs2017_14.16_py36_cuda10.1_test2
|
||||
test_name: pytorch-windows-test2
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: "14.16"
|
||||
vc_year: "2017"
|
||||
vc_product: "BuildTools"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_vs2017_14.16_py36_cuda10.1_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- pytorch_windows_build:
|
||||
name: pytorch_windows_vs2019_py36_cuda10.1_build
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: ""
|
||||
vc_year: "2019"
|
||||
vc_product: "Community"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_test:
|
||||
name: pytorch_windows_vs2019_py36_cuda10.1_test1
|
||||
test_name: pytorch-windows-test1
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: ""
|
||||
vc_year: "2019"
|
||||
vc_product: "Community"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_vs2019_py36_cuda10.1_build
|
||||
- pytorch_windows_test:
|
||||
name: pytorch_windows_vs2019_py36_cuda10.1_test2
|
||||
test_name: pytorch-windows-test2
|
||||
cuda_version: "10"
|
||||
python_version: "3.6"
|
||||
vc_version: ""
|
||||
vc_year: "2019"
|
||||
vc_product: "Community"
|
||||
use_cuda: "1"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_windows_vs2019_py36_cuda10.1_build
|
||||
@ -1,5 +1,3 @@
|
||||
# TODO: Refactor circleci/cimodel/data/binary_build_data.py to generate this file
|
||||
# instead of doing one offs here
|
||||
# Binary builds (subset, to smoke test that they'll work)
|
||||
#
|
||||
# NB: If you modify this file, you need to also modify
|
||||
@ -8,114 +6,96 @@
|
||||
# at https://github.com/ezyang/pytorch-ci-hud/blob/master/src/BuildHistoryDisplay.js
|
||||
|
||||
- binary_linux_build:
|
||||
name: binary_linux_manywheel_3_7m_cu102_devtoolset7_build
|
||||
build_environment: "manywheel 3.7m cu102 devtoolset7"
|
||||
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_build
|
||||
build_environment: "manywheel 2.7mu cpu devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "pytorch/manylinux-cuda102"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
docker_image: "soumith/manylinux-cuda100"
|
||||
- binary_linux_build:
|
||||
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_build
|
||||
build_environment: "manywheel 3.7m cu100 devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "soumith/manylinux-cuda100"
|
||||
- binary_linux_build:
|
||||
name: binary_linux_conda_2_7_cpu_devtoolset7_build
|
||||
build_environment: "conda 2.7 cpu devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "soumith/conda-cuda"
|
||||
# This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
|
||||
# - binary_linux_conda_3_6_cu90_devtoolset7_build
|
||||
# TODO rename to remove python version for libtorch
|
||||
- binary_linux_build:
|
||||
name: binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build
|
||||
build_environment: "libtorch 3.7m cpu devtoolset7"
|
||||
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_build
|
||||
build_environment: "libtorch 2.7m cpu devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
libtorch_variant: "shared-with-deps"
|
||||
docker_image: "pytorch/manylinux-cuda102"
|
||||
docker_image: "soumith/manylinux-cuda100"
|
||||
- binary_linux_build:
|
||||
name: binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
|
||||
build_environment: "libtorch 3.7m cpu gcc5.4_cxx11-abi"
|
||||
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
|
||||
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
|
||||
requires:
|
||||
- setup
|
||||
libtorch_variant: "shared-with-deps"
|
||||
docker_image: "pytorch/pytorch-binary-docker-image-ubuntu16.04:latest"
|
||||
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
|
||||
# TODO we should test a libtorch cuda build, but they take too long
|
||||
# - binary_linux_libtorch_2_7m_cu90_devtoolset7_static-without-deps_build
|
||||
- binary_mac_build:
|
||||
name: binary_macos_wheel_3_7_cpu_build
|
||||
build_environment: "wheel 3.7 cpu"
|
||||
name: binary_macos_wheel_3_6_cpu_build
|
||||
build_environment: "wheel 3.6 cpu"
|
||||
requires:
|
||||
- setup
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
# This job has an average run time of 3 hours o.O
|
||||
# Now only running this on master to reduce overhead
|
||||
# TODO rename to remove python version for libtorch
|
||||
- binary_mac_build:
|
||||
name: binary_macos_libtorch_3_7_cpu_build
|
||||
build_environment: "libtorch 3.7 cpu"
|
||||
name: binary_macos_conda_2_7_cpu_build
|
||||
build_environment: "conda 2.7 cpu"
|
||||
requires:
|
||||
- setup
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- binary_windows_build:
|
||||
name: binary_windows_libtorch_3_7_cpu_debug_build
|
||||
build_environment: "libtorch 3.7 cpu debug"
|
||||
- binary_mac_build:
|
||||
name: binary_macos_libtorch_2_7_cpu_build
|
||||
build_environment: "libtorch 2.7 cpu"
|
||||
requires:
|
||||
- setup
|
||||
- binary_windows_build:
|
||||
name: binary_windows_libtorch_3_7_cpu_release_build
|
||||
build_environment: "libtorch 3.7 cpu release"
|
||||
requires:
|
||||
- setup
|
||||
- binary_windows_build:
|
||||
name: binary_windows_wheel_3_7_cu102_build
|
||||
build_environment: "wheel 3.7 cu102"
|
||||
requires:
|
||||
- setup
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
|
||||
- binary_linux_test:
|
||||
name: binary_linux_manywheel_3_7m_cu102_devtoolset7_test
|
||||
build_environment: "manywheel 3.7m cu102 devtoolset7"
|
||||
name: binary_linux_manywheel_2_7mu_cpu_devtoolset7_test
|
||||
build_environment: "manywheel 2.7mu cpu devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
- binary_linux_manywheel_3_7m_cu102_devtoolset7_build
|
||||
docker_image: "pytorch/manylinux-cuda102"
|
||||
- binary_linux_manywheel_2_7mu_cpu_devtoolset7_build
|
||||
docker_image: "soumith/manylinux-cuda100"
|
||||
- binary_linux_test:
|
||||
name: binary_linux_manywheel_3_7m_cu100_devtoolset7_test
|
||||
build_environment: "manywheel 3.7m cu100 devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
- binary_linux_manywheel_3_7m_cu100_devtoolset7_build
|
||||
docker_image: "soumith/manylinux-cuda100"
|
||||
use_cuda_docker_runtime: "1"
|
||||
resource_class: gpu.medium
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
- binary_linux_test:
|
||||
name: binary_linux_conda_2_7_cpu_devtoolset7_test
|
||||
build_environment: "conda 2.7 cpu devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
- binary_linux_conda_2_7_cpu_devtoolset7_build
|
||||
docker_image: "soumith/conda-cuda"
|
||||
# This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
|
||||
# - binary_linux_conda_3_6_cu90_devtoolset7_test:
|
||||
# TODO rename to remove python version for libtorch
|
||||
- binary_linux_test:
|
||||
name: binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test
|
||||
build_environment: "libtorch 3.7m cpu devtoolset7"
|
||||
name: binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_test
|
||||
build_environment: "libtorch 2.7m cpu devtoolset7"
|
||||
requires:
|
||||
- setup
|
||||
- binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build
|
||||
- binary_linux_libtorch_2_7m_cpu_devtoolset7_shared-with-deps_build
|
||||
libtorch_variant: "shared-with-deps"
|
||||
docker_image: "pytorch/manylinux-cuda102"
|
||||
docker_image: "soumith/manylinux-cuda100"
|
||||
- binary_linux_test:
|
||||
name: binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test
|
||||
build_environment: "libtorch 3.7m cpu gcc5.4_cxx11-abi"
|
||||
name: binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test
|
||||
build_environment: "libtorch 2.7m cpu gcc5.4_cxx11-abi"
|
||||
requires:
|
||||
- setup
|
||||
- binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
|
||||
- binary_linux_libtorch_2_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build
|
||||
libtorch_variant: "shared-with-deps"
|
||||
docker_image: "pytorch/pytorch-binary-docker-image-ubuntu16.04:latest"
|
||||
docker_image: "yf225/pytorch-binary-docker-image-ubuntu16.04:latest"
|
||||
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
docker_build:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 15 * * 0"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- docker_for_ecr_gc_build_job
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-bionic-clang9-thrift-llvmdev"
|
||||
image_name: "pytorch-linux-bionic-clang9-thrift-llvmdev"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7"
|
||||
image_name: "pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7"
|
||||
image_name: "pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
image_name: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-cuda9-cudnn7-py3"
|
||||
image_name: "pytorch-linux-xenial-cuda9-cudnn7-py3"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7"
|
||||
image_name: "pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||
image_name: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3-clang5-asan"
|
||||
image_name: "pytorch-linux-xenial-py3-clang5-asan"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.5"
|
||||
image_name: "pytorch-linux-xenial-py3.5"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.8"
|
||||
image_name: "pytorch-linux-xenial-py3.8"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.6-clang7"
|
||||
image_name: "pytorch-linux-xenial-py3.6-clang7"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.6-gcc4.8"
|
||||
image_name: "pytorch-linux-xenial-py3.6-gcc4.8"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
image_name: "pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.6-gcc7.2"
|
||||
image_name: "pytorch-linux-xenial-py3.6-gcc7.2"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-py3.6-gcc7"
|
||||
image_name: "pytorch-linux-xenial-py3.6-gcc7"
|
||||
- docker_build_job:
|
||||
name: "pytorch-linux-xenial-pynightly"
|
||||
image_name: "pytorch-linux-xenial-pynightly"
|
||||
@ -1,28 +0,0 @@
|
||||
ecr_gc:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "45 * * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_pytorch
|
||||
project: pytorch
|
||||
tags_to_keep: "271,262,256,278,282,291,300,323,327,347,389,401,402,403,405,a8006f9a-272d-4478-b137-d121c6f05c83,6e7b11da-a919-49e5-b2ba-da66e3d4bb0a,f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_caffe2
|
||||
project: caffe2
|
||||
tags_to_keep: "348,345,336,325,324,315,306,301,287,283,276,273,266,253,248,238,230,213"
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_translate
|
||||
project: translate
|
||||
tags_to_keep: "8"
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_tensorcomp
|
||||
project: tensorcomp
|
||||
tags_to_keep: "34"
|
||||
- docker_hub_index_job
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_32"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
filters:
|
||||
branches:
|
||||
only: nightly
|
||||
@ -12,7 +12,7 @@
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_64"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
filters:
|
||||
branches:
|
||||
only: nightly
|
||||
@ -21,7 +21,7 @@
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-arm-v7a"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
filters:
|
||||
branches:
|
||||
only: nightly
|
||||
@ -30,7 +30,7 @@
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-android-ndk-r19c-arm-v8a"
|
||||
requires:
|
||||
- setup
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:347"
|
||||
filters:
|
||||
branches:
|
||||
only: nightly
|
||||
|
||||
@ -1,8 +1,7 @@
|
||||
# Pytorch iOS binary builds
|
||||
- binary_ios_build:
|
||||
name: pytorch_ios_11_2_1_nightly_x86_64_build
|
||||
build_environment: "libtorch-ios-11.2.1-nightly-x86_64-build"
|
||||
context: org-member
|
||||
name: pytorch_ios_10_2_1_nightly_x86_64_build
|
||||
build_environment: "libtorch-ios-10.2.1-nightly-x86_64-build"
|
||||
ios_platform: "SIMULATOR"
|
||||
ios_arch: "x86_64"
|
||||
requires:
|
||||
@ -11,9 +10,8 @@
|
||||
branches:
|
||||
only: nightly
|
||||
- binary_ios_build:
|
||||
name: pytorch_ios_11_2_1_nightly_arm64_build
|
||||
build_environment: "libtorch-ios-11.2.1-nightly-arm64-build"
|
||||
context: org-member
|
||||
name: pytorch_ios_10_2_1_nightly_arm64_build
|
||||
build_environment: "libtorch-ios-10.2.1-nightly-arm64-build"
|
||||
ios_arch: "arm64"
|
||||
ios_platform: "OS"
|
||||
requires:
|
||||
@ -22,12 +20,12 @@
|
||||
branches:
|
||||
only: nightly
|
||||
- binary_ios_upload:
|
||||
build_environment: "libtorch-ios-11.2.1-nightly-binary-build-upload"
|
||||
build_environment: "libtorch-ios-10.2.1-nightly-binary-build-upload"
|
||||
context: org-member
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_ios_11_2_1_nightly_x86_64_build
|
||||
- pytorch_ios_11_2_1_nightly_arm64_build
|
||||
- pytorch_ios_10_2_1_nightly_x86_64_build
|
||||
- pytorch_ios_10_2_1_nightly_arm64_build
|
||||
filters:
|
||||
branches:
|
||||
only: nightly
|
||||
|
||||
@ -4,5 +4,8 @@
|
||||
#- binary_linux_libtorch_2.7m_cu90_test:
|
||||
# requires:
|
||||
# - binary_linux_libtorch_2.7m_cu90_build
|
||||
#- binary_linux_libtorch_2.7m_cu100_test:
|
||||
# requires:
|
||||
# - binary_linux_libtorch_2.7m_cu100_build
|
||||
|
||||
# Nightly uploads
|
||||
|
||||
@ -1,22 +1,10 @@
|
||||
- pytorch_android_gradle_build-x86_32:
|
||||
name: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-x86_32
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
requires:
|
||||
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build
|
||||
|
||||
- pytorch_android_gradle_build:
|
||||
name: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
requires:
|
||||
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build
|
||||
- pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build
|
||||
|
||||
@ -1,16 +0,0 @@
|
||||
- pytorch_linux_test:
|
||||
name: pytorch_linux_xenial_py3_6_gcc5_4_ge_config_legacy_test
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_linux_xenial_py3_6_gcc5_4_build
|
||||
build_environment: "pytorch-linux-xenial-py3.6-gcc5.4-ge_config_legacy-test"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
resource_class: large
|
||||
- pytorch_linux_test:
|
||||
name: pytorch_linux_xenial_py3_6_gcc5_4_ge_config_profiling_test
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_linux_xenial_py3_6_gcc5_4_build
|
||||
build_environment: "pytorch-linux-xenial-py3.6-gcc5.4-ge_config_profiling-test"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
resource_class: large
|
||||
@ -1,26 +1,13 @@
|
||||
# Pytorch iOS PR builds
|
||||
- pytorch_ios_build:
|
||||
name: pytorch_ios_11_2_1_x86_64_build
|
||||
build_environment: "pytorch-ios-11.2.1-x86_64_build"
|
||||
ios_arch: "x86_64"
|
||||
name: pytorch_ios_10_2_1_x86_64_build
|
||||
build_environment: "pytorch-ios-10.2.1-x86_64_build"
|
||||
ios_platform: "SIMULATOR"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_ios_build:
|
||||
name: pytorch_ios_11_2_1_arm64_build
|
||||
context: org-member
|
||||
build_environment: "pytorch-ios-11.2.1-arm64_build"
|
||||
name: pytorch_ios_10_2_1_arm64_build
|
||||
build_environment: "pytorch-ios-10.2.1-arm64_build"
|
||||
ios_arch: "arm64"
|
||||
ios_platform: "OS"
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_ios_build:
|
||||
name: pytorch_ios_11_2_1_arm64_custom_build
|
||||
context: org-member
|
||||
build_environment: "pytorch-ios-11.2.1-arm64_custom_build"
|
||||
ios_arch: "arm64"
|
||||
ios_platform: "OS"
|
||||
op_list: "mobilenetv2.yaml"
|
||||
requires:
|
||||
- setup
|
||||
|
||||
|
||||
@ -8,3 +8,6 @@
|
||||
requires:
|
||||
- setup
|
||||
- pytorch_macos_10_13_py3_build
|
||||
- pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
|
||||
requires:
|
||||
- setup
|
||||
|
||||
@ -1,38 +0,0 @@
|
||||
# PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
|
||||
- pytorch_linux_build:
|
||||
name: pytorch_linux_xenial_py3_clang5_mobile_build
|
||||
requires:
|
||||
- setup
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-mobile-build"
|
||||
build_only: "1"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
- pytorch_linux_build:
|
||||
name: pytorch_linux_xenial_py3_clang5_mobile_custom_build_static
|
||||
requires:
|
||||
- setup
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-mobile-custom-build-static"
|
||||
build_only: "1"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
- pytorch_linux_build:
|
||||
name: pytorch_linux_xenial_py3_clang5_mobile_custom_build_dynamic
|
||||
requires:
|
||||
- setup
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-mobile-custom-build-dynamic"
|
||||
build_only: "1"
|
||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
- pytorch_linux_build:
|
||||
name: pytorch_linux_xenial_py3_clang5_mobile_code_analysis
|
||||
requires:
|
||||
- setup
|
||||
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /ci-all\/.*/
|
||||
- /release\/.*/
|
||||
build_environment: "pytorch-linux-xenial-py3-clang5-mobile-code-analysis"
|
||||
build_only: "1"
|
||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c:f990c76a-a798-42bb-852f-5be5006f8026"
|
||||
@ -1,3 +1,16 @@
|
||||
|
||||
# Scheduled to run 4 hours after the binary jobs start
|
||||
# These jobs need to run after all the binary jobs run, regardless of if the
|
||||
# jobs failed or not. There's no way to do this in CircleCI right now, so we
|
||||
# just schedule this to run after all the binary jobs should've finished.
|
||||
# These jobs are all idempotent and very lightweight; they just upload html
|
||||
# files that track what binaries are available and what their sizes are.
|
||||
update_s3_htmls:
|
||||
jobs:
|
||||
- setup:
|
||||
filters:
|
||||
branches:
|
||||
only: postnightly
|
||||
- update_s3_htmls_for_nightlies:
|
||||
context: org-member
|
||||
requires:
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
- setup:
|
||||
# Run this job on everything since it is
|
||||
# the dependency for everything.
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
branches:
|
||||
only: /.*/
|
||||
56
.clang-tidy
56
.clang-tidy
@ -1,32 +1,34 @@
|
||||
---
|
||||
# NOTE there must be no spaces before the '-', so put the comma last.
|
||||
Checks: '-*,
|
||||
bugprone-*,
|
||||
-bugprone-forward-declaration-namespace,
|
||||
-bugprone-macro-parentheses,
|
||||
-bugprone-lambda-function-name,
|
||||
cppcoreguidelines-*,
|
||||
-cppcoreguidelines-interfaces-global-init,
|
||||
-cppcoreguidelines-owning-memory,
|
||||
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
||||
-cppcoreguidelines-pro-bounds-constant-array-index,
|
||||
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
|
||||
-cppcoreguidelines-pro-type-cstyle-cast,
|
||||
-cppcoreguidelines-pro-type-reinterpret-cast,
|
||||
-cppcoreguidelines-pro-type-static-cast-downcast,
|
||||
-cppcoreguidelines-pro-type-union-access,
|
||||
-cppcoreguidelines-pro-type-vararg,
|
||||
-cppcoreguidelines-special-member-functions,
|
||||
hicpp-exception-baseclass,
|
||||
hicpp-avoid-goto,
|
||||
modernize-*,
|
||||
-modernize-return-braced-init-list,
|
||||
-modernize-use-auto,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-use-using,
|
||||
performance-*,
|
||||
-performance-noexcept-move-constructor,
|
||||
# NOTE there must be no spaces before the '-', so put the comma first.
|
||||
Checks: '
|
||||
-*
|
||||
,bugprone-*
|
||||
,-bugprone-forward-declaration-namespace
|
||||
,-bugprone-macro-parentheses
|
||||
,-bugprone-lambda-function-name
|
||||
,cppcoreguidelines-*
|
||||
,-cppcoreguidelines-interfaces-global-init
|
||||
,-cppcoreguidelines-owning-memory
|
||||
,-cppcoreguidelines-pro-bounds-array-to-pointer-decay
|
||||
,-cppcoreguidelines-pro-bounds-constant-array-index
|
||||
,-cppcoreguidelines-pro-bounds-pointer-arithmetic
|
||||
,-cppcoreguidelines-pro-type-cstyle-cast
|
||||
,-cppcoreguidelines-pro-type-reinterpret-cast
|
||||
,-cppcoreguidelines-pro-type-static-cast-downcast
|
||||
,-cppcoreguidelines-pro-type-union-access
|
||||
,-cppcoreguidelines-pro-type-vararg
|
||||
,-cppcoreguidelines-special-member-functions
|
||||
,hicpp-exception-baseclass
|
||||
,hicpp-avoid-goto
|
||||
,modernize-*
|
||||
,-modernize-return-braced-init-list
|
||||
,-modernize-use-auto
|
||||
,-modernize-use-default-member-init
|
||||
,-modernize-use-using
|
||||
,performance-*
|
||||
,-performance-noexcept-move-constructor
|
||||
'
|
||||
WarningsAsErrors: '*'
|
||||
HeaderFilterRegex: 'torch/csrc/.*'
|
||||
AnalyzeTemporaryDtors: false
|
||||
CheckOptions:
|
||||
|
||||
7
.flake8
7
.flake8
@ -5,9 +5,10 @@ max-line-length = 120
|
||||
# E501 is not flexible enough, we're using B950 instead
|
||||
ignore =
|
||||
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
|
||||
# EXE001 is skipped for now because some files use shebang to determine Python version.
|
||||
EXE001,
|
||||
# these ignores are from flake8-bugbear; please fix!
|
||||
B007,B008,
|
||||
# these ignores are from flake8-comprehensions; please fix!
|
||||
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
|
||||
per-file-ignores = __init__.py: F401
|
||||
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi,.git
|
||||
C400,C401,C402,C403,C404,C405,C407,C411,
|
||||
exclude = docs/src,venv,third_party,caffe2,scripts,docs/caffe2,tools/amd_build/pyHIPIFY,torch/lib/include,torch/lib/tmp_install,build,torch/include,*.pyi
|
||||
|
||||
150
.github/workflows/lint.yml
vendored
150
.github/workflows/lint.yml
vendored
@ -7,68 +7,34 @@ on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
quick-checks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.x
|
||||
architecture: x64
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v1
|
||||
- name: Ensure consistent CircleCI YAML config
|
||||
run: |
|
||||
pip install -r requirements.txt
|
||||
cd .circleci && ./ensure-consistency.py
|
||||
- name: Ensure Docker version is correctly deployed
|
||||
run: |
|
||||
pip install pyyaml
|
||||
.circleci/validate-docker-version.py
|
||||
- name: Shellcheck Jenkins scripts
|
||||
run: |
|
||||
sudo apt-get install -y shellcheck
|
||||
.jenkins/run-shellcheck.sh
|
||||
- name: Ensure no tabs
|
||||
run: |
|
||||
(! git grep -I -l $'\t' -- . ':(exclude)*.svg' ':(exclude)**Makefile' ':(exclude)**/contrib/**' ':(exclude)third_party' ':(exclude).gitattributes' ':(exclude).gitmodules' || (echo "The above files have tabs; please convert them to spaces"; false))
|
||||
- name: Ensure C++ source files are not executable
|
||||
run: |
|
||||
(! find . \( -path ./third_party -o -path ./.git -o -path ./torch/bin -o -path ./build \) -prune -o -type f -executable -regextype posix-egrep -not -regex '.+(\.(bash|sh|py|so)|git-pre-commit)$' -print | grep . || (echo 'The above files have executable permission; please remove their executable permission by using `chmod -x`'; false))
|
||||
- name: MyPy typecheck
|
||||
run: |
|
||||
pip install mypy mypy-extensions
|
||||
mypy @mypy-files.txt
|
||||
- name: C++ docs check
|
||||
run: |
|
||||
sudo apt-get install -y doxygen && pip install -r requirements.txt
|
||||
cd docs/cpp/source && ./check-doxygen.sh
|
||||
|
||||
flake8-py3:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.x
|
||||
python-version: 3.7.4
|
||||
architecture: x64
|
||||
- name: Fetch PyTorch
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@master
|
||||
- name: Checkout PR tip
|
||||
run: |
|
||||
set -eux
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||
if [ -z "${GITHUB_HEAD_REF}" ]; then
|
||||
# We are on master, just set the SHA from our current location
|
||||
echo ::set-output name=commit_sha::${GITHUB_SHA}
|
||||
else
|
||||
# We are on a PR, so actions/checkout leaves us on merge commit.
|
||||
# Check out the actual tip of the branch.
|
||||
git checkout ${{ github.event.pull_request.head.sha }}
|
||||
PR_TIP=$(git rev-parse HEAD^2)
|
||||
git checkout ${PR_TIP}
|
||||
echo ::set-output name=commit_sha::${PR_TIP}
|
||||
fi
|
||||
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||
id: get_pr_tip
|
||||
- name: Run flake8
|
||||
run: |
|
||||
set -eux
|
||||
pip install flake8==3.7.9 flake8-mypy flake8-bugbear flake8-comprehensions flake8-executable flake8-pyi mccabe pycodestyle==2.5.0 pyflakes==2.1.1
|
||||
flake8 --version
|
||||
pip install flake8
|
||||
flake8 --exit-zero > ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||
cat ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||
- name: Add annotations
|
||||
@ -80,97 +46,3 @@ jobs:
|
||||
regex: '^(?<filename>.*?):(?<lineNumber>\d+):(?<columnNumber>\d+): (?<errorCode>\w\d+) (?<errorDesc>.*)'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
clang-tidy:
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.x
|
||||
architecture: x64
|
||||
- name: Checkout PyTorch
|
||||
uses: actions/checkout@v1
|
||||
- name: Checkout PR tip
|
||||
run: |
|
||||
set -eux
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||
# Check out the actual tip of the branch.
|
||||
git checkout ${{ github.event.pull_request.head.sha }}
|
||||
fi
|
||||
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||
id: get_pr_tip
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
set -eux
|
||||
# Install CUDA
|
||||
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
|
||||
sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
|
||||
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub
|
||||
sudo add-apt-repository "deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ /"
|
||||
sudo apt-get update
|
||||
sudo apt-get --no-install-recommends -y install cuda
|
||||
# Install dependencies
|
||||
pip install pyyaml
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
sudo apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y clang-tidy-8
|
||||
sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-8 1000
|
||||
- name: Run clang-tidy
|
||||
run: |
|
||||
set -eux
|
||||
git remote add upstream https://github.com/pytorch/pytorch
|
||||
git fetch upstream "$GITHUB_BASE_REF"
|
||||
BASE_SHA=${{ github.event.pull_request.base.sha }}
|
||||
HEAD_SHA=${{ github.event.pull_request.head.sha }}
|
||||
MERGE_BASE=$(git merge-base $BASE_SHA $HEAD_SHA)
|
||||
|
||||
if [[ ! -d build ]]; then
|
||||
git submodule update --init --recursive
|
||||
|
||||
export USE_NCCL=0
|
||||
# We really only need compile_commands.json, so no need to build!
|
||||
time python setup.py --cmake-only build
|
||||
|
||||
# Generate ATen files.
|
||||
time python aten/src/ATen/gen.py \
|
||||
-s aten/src/ATen \
|
||||
-d build/aten/src/ATen \
|
||||
aten/src/ATen/Declarations.cwrap \
|
||||
aten/src/THCUNN/generic/THCUNN.h \
|
||||
aten/src/ATen/nn.yaml \
|
||||
aten/src/ATen/native/native_functions.yaml
|
||||
|
||||
# Generate PyTorch files.
|
||||
time python tools/setup_helpers/generate_code.py \
|
||||
--declarations-path build/aten/src/ATen/Declarations.yaml \
|
||||
--nn-path aten/src
|
||||
fi
|
||||
|
||||
# Run Clang-Tidy
|
||||
# The negative filters below are to exclude files that include onnx_pb.h or
|
||||
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
|
||||
python tools/clang_tidy.py \
|
||||
--verbose \
|
||||
--paths torch/csrc/ \
|
||||
--diff "$MERGE_BASE" \
|
||||
-g"-torch/csrc/jit/export.cpp" \
|
||||
-g"-torch/csrc/jit/import.cpp" \
|
||||
-g"-torch/csrc/jit/netdef_converter.cpp" \
|
||||
-g"-torch/csrc/cuda/nccl.*" \
|
||||
-g"-torch/csrc/cuda/python_nccl.cpp" \
|
||||
"$@" > ${GITHUB_WORKSPACE}/clang-tidy-output.txt
|
||||
|
||||
cat ${GITHUB_WORKSPACE}/clang-tidy-output.txt
|
||||
- name: Add annotations
|
||||
uses: suo/add-annotations-github-action@master
|
||||
with:
|
||||
check_name: 'clang-tidy'
|
||||
linter_output_path: 'clang-tidy-output.txt'
|
||||
commit_sha: ${{ steps.get_pr_tip.outputs.commit_sha }}
|
||||
regex: '^(?<filename>.*?):(?<lineNumber>\d+):(?<columnNumber>\d+): (?<errorDesc>.*?) \[(?<errorCode>.*)\]'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
16
.gitignore
vendored
16
.gitignore
vendored
@ -42,7 +42,6 @@ dropout_model.pt
|
||||
test/generated_type_hints_smoketest.py
|
||||
test/htmlcov
|
||||
test/cpp_extensions/install/
|
||||
test/test-reports/
|
||||
third_party/build/
|
||||
tools/shared/_utils_internal.py
|
||||
torch.egg-info/
|
||||
@ -57,6 +56,11 @@ torch/csrc/jit/generated/*
|
||||
torch/csrc/jit/fuser/config.h
|
||||
torch/csrc/nn/THCUNN.cpp
|
||||
torch/csrc/nn/THCUNN.cwrap
|
||||
torch/csrc/nn/THNN_generic.cpp
|
||||
torch/csrc/nn/THNN_generic.cwrap
|
||||
torch/csrc/nn/THNN_generic.h
|
||||
torch/csrc/nn/THNN.cpp
|
||||
torch/csrc/nn/THNN.cwrap
|
||||
torch/bin/
|
||||
torch/cmake/
|
||||
torch/lib/*.a*
|
||||
@ -245,13 +249,3 @@ GSYMS
|
||||
GPATH
|
||||
tags
|
||||
TAGS
|
||||
|
||||
|
||||
# ccls file
|
||||
.ccls-cache/
|
||||
|
||||
# clang-format storage location used by apply_clang_format.py
|
||||
.clang-format-bin
|
||||
|
||||
# clangd background index
|
||||
.clangd/
|
||||
|
||||
12
.gitmodules
vendored
12
.gitmodules
vendored
@ -111,14 +111,10 @@
|
||||
path = third_party/foxi
|
||||
url = https://github.com/houseroad/foxi.git
|
||||
[submodule "third_party/tbb"]
|
||||
path = third_party/tbb
|
||||
url = https://github.com/01org/tbb
|
||||
branch = tbb_2018
|
||||
path = third_party/tbb
|
||||
url = https://github.com/01org/tbb
|
||||
branch = tbb_2018
|
||||
[submodule "android/libs/fbjni"]
|
||||
ignore = dirty
|
||||
path = android/libs/fbjni
|
||||
url = https://github.com/facebookincubator/fbjni.git
|
||||
[submodule "third_party/XNNPACK"]
|
||||
ignore = dirty
|
||||
path = third_party/XNNPACK
|
||||
url = https://github.com/google/XNNPACK.git
|
||||
url = https://github.com/IvanKobzarev/fbjni.git
|
||||
|
||||
@ -4,6 +4,14 @@ set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# TODO: Migrate all centos jobs to use proper devtoolset
|
||||
if [[ "$BUILD_ENVIRONMENT" == *py2-cuda9.0-cudnn7-centos7* ]]; then
|
||||
# There is a bug in pango packge on Centos7 that causes undefined
|
||||
# symbols, upgrading glib2 to >=2.56.1 solves the issue. See
|
||||
# https://bugs.centos.org/view.php?id=15495
|
||||
sudo yum install -y -q glib2-2.56.1
|
||||
fi
|
||||
|
||||
# CMAKE_ARGS are only passed to 'cmake' and the -Dfoo=bar does not work with
|
||||
# setup.py, so we build a list of foo=bars and then either convert it to
|
||||
# -Dfoo=bars or export them before running setup.py
|
||||
@ -104,7 +112,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
|
||||
build_args+=("BUILD_TEST=ON")
|
||||
build_args+=("USE_OBSERVERS=ON")
|
||||
build_args+=("USE_ZSTD=ON")
|
||||
BUILD_CAFFE2_MOBILE=1 "${ROOT_DIR}/scripts/build_android.sh" $(build_to_cmake ${build_args[@]}) "$@"
|
||||
"${ROOT_DIR}/scripts/build_android.sh" $(build_to_cmake ${build_args[@]}) "$@"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@ -130,7 +138,7 @@ if [[ $BUILD_ENVIRONMENT == *py2-cuda9.0-cudnn7-ubuntu16.04* ]]; then
|
||||
# removing http:// duplicate in favor of nvidia-ml.list
|
||||
# which is https:// version of the same repo
|
||||
sudo rm -f /etc/apt/sources.list.d/nvidia-machine-learning.list
|
||||
curl --retry 3 -o ./nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb
|
||||
curl -o ./nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb
|
||||
sudo dpkg -i ./nvinfer-runtime-trt-repo-ubuntu1604-5.0.2-ga-cuda9.0_1-1_amd64.deb
|
||||
sudo apt-key add /var/nvinfer-runtime-trt-repo-5.0.2-ga-cuda9.0/7fa2af80.pub
|
||||
sudo apt-get -qq update
|
||||
@ -161,6 +169,7 @@ if [[ $BUILD_ENVIRONMENT == *cuda* ]]; then
|
||||
export PATH="/usr/local/cuda/bin:$PATH"
|
||||
fi
|
||||
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
|
||||
build_args+=("USE_ROCM=ON")
|
||||
# This is needed to enable ImageInput operator in resnet50_trainer
|
||||
build_args+=("USE_OPENCV=ON")
|
||||
# This is needed to read datasets from https://download.caffe2.ai/databases/resnet_trainer.zip
|
||||
@ -175,12 +184,18 @@ if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
|
||||
${PYTHON} "${ROOT_DIR}/tools/amd_build/build_amd.py"
|
||||
fi
|
||||
|
||||
# building bundled nccl in this config triggers a bug in nvlink. For
|
||||
# more, see https://github.com/pytorch/pytorch/issues/14486
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-cuda8*-cudnn7* ]]; then
|
||||
build_args+=("USE_SYSTEM_NCCL=ON")
|
||||
fi
|
||||
|
||||
# Try to include Redis support for Linux builds
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
build_args+=("USE_REDIS=ON")
|
||||
fi
|
||||
|
||||
# Use a specialized onnx namespace in CI to catch hardcoded onnx namespace
|
||||
# Use a speciallized onnx namespace in CI to catch hardcoded onnx namespace
|
||||
build_args+=("ONNX_NAMESPACE=ONNX_NAMESPACE_FOR_C2_CI")
|
||||
|
||||
###############################################################################
|
||||
|
||||
@ -6,69 +6,6 @@ TEST_DIR="$ROOT_DIR/caffe2_tests"
|
||||
gtest_reports_dir="${TEST_DIR}/cpp"
|
||||
pytest_reports_dir="${TEST_DIR}/python"
|
||||
|
||||
# This is needed to work around ROCm using old docker images until
|
||||
# the transition to new images is complete.
|
||||
# TODO: Remove once ROCm CI is using new images.
|
||||
if [[ $BUILD_ENVIRONMENT == py3.6-devtoolset7-rocmrpm-centos* ]]; then
|
||||
# This file is sourced multiple times, only install conda the first time.
|
||||
# We must install conda where we have write access.
|
||||
CONDA_DIR="$ROOT_DIR/conda"
|
||||
if [[ ! -d $CONDA_DIR ]]; then
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
mkdir $CONDA_DIR
|
||||
pushd /tmp
|
||||
wget -q "${BASE_URL}/${CONDA_FILE}"
|
||||
chmod +x "${CONDA_FILE}"
|
||||
./"${CONDA_FILE}" -b -f -p "$CONDA_DIR"
|
||||
popd
|
||||
export PATH="$CONDA_DIR/bin:$PATH"
|
||||
# Ensure we run conda in a directory that jenkins has write access to
|
||||
pushd $CONDA_DIR
|
||||
# Track latest conda update
|
||||
conda update -n base conda
|
||||
# Install correct Python version
|
||||
conda install python="$ANACONDA_PYTHON_VERSION"
|
||||
|
||||
conda_install() {
|
||||
# Ensure that the install command don't upgrade/downgrade Python
|
||||
# This should be called as
|
||||
# conda_install pkg1 pkg2 ... [-c channel]
|
||||
conda install -q -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
conda_install numpy pyyaml mkl mkl-include setuptools cffi typing future six
|
||||
|
||||
# TODO: This isn't working atm
|
||||
conda_install nnpack -c killeent
|
||||
|
||||
# Install some other packages
|
||||
|
||||
# Need networkx 2.0 because bellmand_ford was moved in 2.1 . Scikit-image by
|
||||
# defaults installs the most recent networkx version, so we install this lower
|
||||
# version explicitly before scikit-image pulls it in as a dependency
|
||||
pip install networkx==2.0
|
||||
|
||||
# TODO: Why is scipy pinned
|
||||
# numba & llvmlite is pinned because of https://github.com/numba/numba/issues/4368
|
||||
# scikit-learn is pinned because of
|
||||
# https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5
|
||||
# only)
|
||||
pip install --progress-bar off pytest scipy==1.1.0 scikit-learn==0.20.3 scikit-image librosa>=0.6.2 psutil numba==0.46.0 llvmlite==0.30.0
|
||||
|
||||
# click - onnx
|
||||
# hypothesis - tests
|
||||
# jupyter - for tutorials
|
||||
pip install --progress-bar off click hypothesis jupyter protobuf tabulate virtualenv mock typing-extensions
|
||||
|
||||
popd
|
||||
else
|
||||
export PATH="$CONDA_DIR/bin:$PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Figure out which Python to use
|
||||
PYTHON="$(which python)"
|
||||
if [[ "${BUILD_ENVIRONMENT}" =~ py((2|3)\.?[0-9]?\.?[0-9]?) ]]; then
|
||||
|
||||
@ -40,9 +40,6 @@ for test in $(find "$cpp_test_dir" -executable -type f); do
|
||||
LD_LIBRARY_PATH="$ld_library_path" "$test"
|
||||
fi
|
||||
;;
|
||||
*/*_benchmark)
|
||||
LD_LIBRARY_PATH="$ld_library_path" "$test" --benchmark_color=false
|
||||
;;
|
||||
*)
|
||||
# Currently, we use a mixture of gtest (caffe2) and Catch2 (ATen). While
|
||||
# planning to migrate to gtest as the common PyTorch c++ test suite, we
|
||||
@ -67,7 +64,7 @@ if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# if [[ "$BUILD_ENVIRONMENT" == *ubuntu14.04* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" == *ubuntu14.04* ]]; then
|
||||
# Hotfix, use hypothesis 3.44.6 on Ubuntu 14.04
|
||||
# See comments on
|
||||
# https://github.com/HypothesisWorks/hypothesis-python/commit/eadd62e467d6cee6216e71b391951ec25b4f5830
|
||||
@ -77,15 +74,15 @@ fi
|
||||
sudo pip -q install attrs==18.1.0 -f https://s3.amazonaws.com/ossci-linux/wheels/attrs-18.1.0-py2.py3-none-any.whl
|
||||
sudo pip -q install coverage==4.5.1 -f https://s3.amazonaws.com/ossci-linux/wheels/coverage-4.5.1-cp36-cp36m-macosx_10_12_x86_64.whl
|
||||
sudo pip -q install hypothesis==3.44.6 -f https://s3.amazonaws.com/ossci-linux/wheels/hypothesis-3.44.6-py3-none-any.whl
|
||||
# else
|
||||
# pip install --user --no-cache-dir hypothesis==3.59.0
|
||||
# fi
|
||||
else
|
||||
pip install --user --no-cache-dir hypothesis==3.59.0
|
||||
fi
|
||||
|
||||
# Collect additional tests to run (outside caffe2/python)
|
||||
EXTRA_TESTS=()
|
||||
|
||||
# CUDA builds always include NCCL support
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-cuda* ]] || [[ "$BUILD_ENVIRONMENT" == *-rocm* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-cuda* ]]; then
|
||||
EXTRA_TESTS+=("$caffe2_pypath/contrib/nccl")
|
||||
fi
|
||||
|
||||
@ -96,10 +93,6 @@ if [[ $BUILD_ENVIRONMENT == *-rocm* ]]; then
|
||||
# On ROCm, RCCL (distributed) development isn't complete.
|
||||
# https://github.com/ROCmSoftwarePlatform/rccl
|
||||
rocm_ignore_test+=("--ignore $caffe2_pypath/python/data_parallel_model_test.py")
|
||||
|
||||
# This test has been flaky in ROCm CI (but note the tests are
|
||||
# cpu-only so should be unrelated to ROCm)
|
||||
rocm_ignore_test+=("--ignore $caffe2_pypath/python/operator_test/blobs_queue_db_test.py")
|
||||
fi
|
||||
|
||||
# NB: Warnings are disabled because they make it harder to see what
|
||||
@ -107,13 +100,8 @@ fi
|
||||
echo "Running Python tests.."
|
||||
if [[ "$BUILD_ENVIRONMENT" == *py3* ]]; then
|
||||
# locale setting is required by click package with py3
|
||||
for loc in "en_US.utf8" "C.UTF-8"; do
|
||||
if locale -a | grep "$loc" >/dev/null 2>&1; then
|
||||
export LC_ALL="$loc"
|
||||
export LANG="$loc"
|
||||
break;
|
||||
fi
|
||||
done
|
||||
export LC_ALL=C.UTF-8
|
||||
export LANG=C.UTF-8
|
||||
fi
|
||||
|
||||
pip install --user pytest-sugar
|
||||
@ -127,7 +115,6 @@ pip install --user pytest-sugar
|
||||
--ignore "$caffe2_pypath/python/operator_test/matmul_op_test.py" \
|
||||
--ignore "$caffe2_pypath/python/operator_test/pack_ops_test.py" \
|
||||
--ignore "$caffe2_pypath/python/mkl/mkl_sbn_speed_test.py" \
|
||||
--ignore "$caffe2_pypath/python/trt/test_pt_onnx_trt.py" \
|
||||
${rocm_ignore_test[@]} \
|
||||
"$caffe2_pypath/python" \
|
||||
"${EXTRA_TESTS[@]}"
|
||||
@ -136,7 +123,7 @@ pip install --user pytest-sugar
|
||||
# torchvision tests #
|
||||
#####################
|
||||
if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
|
||||
pip install -q --user git+https://github.com/pytorch/vision.git@v0.5.0
|
||||
pip install -q --user git+https://github.com/pytorch/vision.git
|
||||
pip install -q --user ninja
|
||||
# JIT C++ extensions require ninja, so put it into PATH.
|
||||
export PATH="/var/lib/jenkins/.local/bin:$PATH"
|
||||
@ -144,7 +131,8 @@ if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
|
||||
# default pip version is too old(9.0.2), unable to support tag `manylinux2010`.
|
||||
# Fix the pip error: Couldn't find a version that satisfies the requirement
|
||||
sudo pip install --upgrade pip
|
||||
pip install -q --user -i https://test.pypi.org/simple/ ort-nightly==1.3.0.dev202005123
|
||||
pip install -q --user -i https://test.pypi.org/simple/ ort-nightly==0.5.0.dev905
|
||||
fi
|
||||
"$ROOT_DIR/scripts/onnx/test.sh"
|
||||
fi
|
||||
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# DO NOT ADD 'set -x' not to reveal CircleCI secret context environment variables
|
||||
set -eu -o pipefail
|
||||
|
||||
# This script builds and runs code analyzer tool to generate aten op dependency
|
||||
# graph for custom mobile build.
|
||||
|
||||
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
echo "Clang version:"
|
||||
clang --version
|
||||
|
||||
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
|
||||
echo "LLVM_DIR: ${LLVM_DIR}"
|
||||
|
||||
# Run the following 2 steps together because they share the same (reusable) time
|
||||
# consuming process to build LibTorch into LLVM assembly.
|
||||
|
||||
# 1. Run code analysis test first to fail fast
|
||||
time ANALYZE_TEST=1 CHECK_RESULT=1 tools/code_analyzer/build.sh
|
||||
|
||||
# 2. Run code analysis on mobile LibTorch
|
||||
time ANALYZE_TORCH=1 tools/code_analyzer/build.sh -closure=false
|
||||
@ -1,26 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# DO NOT ADD 'set -x' not to reveal CircleCI secret context environment variables
|
||||
set -eu -o pipefail
|
||||
|
||||
# This script uses linux host toolchain + mobile build options in order to
|
||||
# build & test mobile libtorch without having to setup Android/iOS
|
||||
# toolchain/simulator.
|
||||
|
||||
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
# Install torch & torchvision - used to download & trace test model.
|
||||
pip install torch torchvision --progress-bar off
|
||||
|
||||
# Run end-to-end process of building mobile library, linking into the predictor
|
||||
# binary, and running forward pass with a real model.
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-static* ]]; then
|
||||
TEST_CUSTOM_BUILD_STATIC=1 test/mobile/custom_build/build.sh
|
||||
elif [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
|
||||
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
|
||||
echo "LLVM_DIR: ${LLVM_DIR}"
|
||||
TEST_CUSTOM_BUILD_DYNAMIC=1 test/mobile/custom_build/build.sh
|
||||
else
|
||||
TEST_DEFAULT_BUILD=1 test/mobile/custom_build/build.sh
|
||||
fi
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user