U/kostmo/gen circle conf (#17189)

Summary:
Diagram preview:
![binarysmoketests-config-dimensions](https://user-images.githubusercontent.com/261693/53040977-a0f88d00-3437-11e9-9190-796cc243e0f9.png)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17189

Differential Revision: D14141362

Pulled By: kostmo

fbshipit-source-id: 0625a1234d0307c6be79f17e756ddb1cc445b374
This commit is contained in:
Karl Ostmo
2019-02-19 15:33:58 -08:00
committed by Facebook Github Bot
parent f827f9f77a
commit 09c9af9451
32 changed files with 1666 additions and 2564 deletions

2
.circleci/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*.svg
*.png

View File

@ -0,0 +1,187 @@
from collections import OrderedDict
import conf_tree
import miniutils
import make_build_configs
class Conf:
def __init__(self, os, cuda_version, pydistro, parms, smoke=False, libtorch_variant=None):
self.os = os
self.cuda_version = cuda_version
self.pydistro = pydistro
self.parms = parms
self.smoke = smoke
self.libtorch_variant = libtorch_variant
def genBuildEnvParms(self):
return [self.pydistro] + self.parms + [make_build_configs.get_processor_arch_name(self.cuda_version)]
def genDockerImage(self):
docker_word_substitution = {
"manywheel": "manylinux",
"libtorch": "manylinux",
}
docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)
alt_docker_suffix = self.cuda_version or "80"
docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
def getNamePrefix(self):
return "smoke" if self.smoke else "binary"
def genBuildName(self, build_or_test):
parts = [self.getNamePrefix(), self.os] + self.genBuildEnvParms()
if self.smoke:
if self.libtorch_variant:
parts.append(self.libtorch_variant)
else:
parts.append(build_or_test)
return "_".join(parts)
def genYamlTree(self, build_or_test):
env_dict = OrderedDict({
"BUILD_ENVIRONMENT": miniutils.quote(" ".join(self.genBuildEnvParms())),
})
if self.libtorch_variant:
env_dict["LIBTORCH_VARIANT"] = miniutils.quote(self.libtorch_variant)
os_word_substitution = {
"macos": "mac",
}
os_name = miniutils.override(self.os, os_word_substitution)
d = {
"environment": env_dict,
"<<": "*" + "_".join([self.getNamePrefix(), os_name, build_or_test]),
}
if build_or_test == "test":
tuples = []
if self.cuda_version:
tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))
if not (self.smoke and self.os == "macos"):
tuples.append(("DOCKER_IMAGE", self.genDockerImage()))
if self.smoke:
# TODO: Fix this discrepancy upstream
tuples.reverse()
for (k, v) in tuples:
env_dict[k] = v
else:
if self.os == "linux" and build_or_test != "upload":
d["docker"] = [{"image": self.genDockerImage()}]
if build_or_test == "test":
if self.cuda_version:
d["resource_class"] = "gpu.medium"
return d
def gen_build_env_list(smoke):
root = make_build_configs.TopLevelNode(
"Builds",
make_build_configs.CONFIG_TREE_DATA,
smoke,
)
config_list, dot = conf_tree.dfs(root)
newlist = []
for c in config_list:
conf = Conf(
c.find_prop("os_name"),
c.find_prop("cu"),
c.find_prop("package_format"),
[c.find_prop("pyver")],
c.find_prop("smoke"),
c.find_prop("libtorch_variant")
)
newlist.append(conf)
return newlist, dot
def add_build_entries(jobs_dict, phase, smoke):
configs, _ = gen_build_env_list(smoke)
for conf_options in configs:
jobs_dict[conf_options.genBuildName(phase)] = conf_options.genYamlTree(phase)
def add_binary_build_specs(jobs_dict):
add_build_entries(jobs_dict, "build", False)
def add_binary_build_uploads(jobs_dict):
add_build_entries(jobs_dict, "upload", False)
def add_smoke_test_specs(jobs_dict):
add_build_entries(jobs_dict, "test", True)
def add_binary_build_tests(jobs_dict):
def testable_binary_predicate(x):
return x.os == "linux" and (x.smoke or x.pydistro != "libtorch")
configs, _ = gen_build_env_list(False)
filtered_configs = filter(testable_binary_predicate, configs)
for conf_options in filtered_configs:
jobs_dict[conf_options.genBuildName("test")] = conf_options.genYamlTree("test")
def gen_schedule_tree(cron_timing):
return [{
"schedule": {
"cron": miniutils.quote(cron_timing),
"filters": {
"branches": {
"only": ["master"],
},
},
},
}]
def add_jobs_and_render(jobs_dict, toplevel_key, smoke, cron_schedule):
jobs_list = []
configs, graph = gen_build_env_list(smoke)
for build_config in configs:
build_name = build_config.genBuildName("build")
jobs_list.append(build_name)
d = OrderedDict(
triggers=gen_schedule_tree(cron_schedule),
jobs=jobs_list,
)
jobs_dict[toplevel_key] = d
graph.draw(toplevel_key + "-config-dimensions.png", prog="twopi")
def add_binary_build_jobs(jobs_dict):
add_jobs_and_render(jobs_dict, "binarybuilds", False, "5 5 * * *")
def add_binary_smoke_test_jobs(jobs_dict):
add_jobs_and_render(jobs_dict, "binarysmoketests", True, "15 16 * * *")

View File

@ -0,0 +1,195 @@
from collections import OrderedDict
import miniutils
DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"
DEFAULT_DOCKER_VERSION = 282
class DockerHide:
"""Hides element for construction of docker path"""
def __init__(self, val):
self.val = val
def __str__(self):
return self.val
class Conf:
def __init__(self,
distro,
parms,
pyver=None,
cuda_version=None,
is_xla=False,
restrict_phases=None,
cuda_docker_phases=None,
gpu_resource=None,
docker_version_override=None):
self.distro = distro
self.pyver = pyver
self.parms = parms
self.cuda_version = cuda_version
self.is_xla = is_xla
self.restrict_phases = restrict_phases
# FIXME does the build phase ever need CUDA runtime?
self.cuda_docker_phases = cuda_docker_phases or []
self.gpu_resource = gpu_resource
# FIXME is this different docker version intentional?
self.docker_version_override = docker_version_override
def getParms(self):
leading = ["pytorch"]
if self.is_xla:
leading.append(DockerHide("xla"))
cuda_parms = []
if self.cuda_version:
cuda_parms.extend(["cuda" + self.cuda_version, "cudnn7"])
return leading + ["linux", self.distro] + cuda_parms + self.parms
# TODO: Eliminate this special casing in docker paths
def genDockerImagePath(self, build_or_test):
build_env_pieces = self.getParms()
build_env_pieces = list(filter(lambda x: type(x) is not DockerHide, build_env_pieces))
build_job_name_pieces = build_env_pieces + [build_or_test]
base_build_env_name = "-".join(build_env_pieces)
docker_version = self.docker_version_override or DEFAULT_DOCKER_VERSION
return miniutils.quote(DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(docker_version))
def getBuildJobNamePieces(self, build_or_test):
return self.getParms() + [build_or_test]
def genBuildName(self, build_or_test):
return ("_".join(map(str, self.getBuildJobNamePieces(build_or_test)))).replace(".", "_")
def genYamlTree(self, build_or_test):
build_job_name_pieces = self.getBuildJobNamePieces(build_or_test)
base_build_env_name = "-".join(map(str, self.getParms()))
build_env_name = "-".join(map(str, build_job_name_pieces))
env_dict = {
"BUILD_ENVIRONMENT": build_env_name,
"DOCKER_IMAGE": self.genDockerImagePath(build_or_test),
}
if self.pyver:
env_dict["PYTHON_VERSION"] = miniutils.quote(self.pyver)
if build_or_test in self.cuda_docker_phases:
env_dict["USE_CUDA_DOCKER_RUNTIME"] = miniutils.quote("1")
d = {
"environment": env_dict,
"<<": "*" + "_".join(["pytorch", "linux", build_or_test, "defaults"]),
}
if build_or_test == "test":
resource_class = "large"
if self.gpu_resource:
resource_class = "gpu." + self.gpu_resource
if self.gpu_resource == "large":
env_dict["MULTI_GPU"] = miniutils.quote("1")
d["resource_class"] = resource_class
return d
BUILD_ENV_LIST = [
Conf("trusty", ["py2.7.9"]),
Conf("trusty", ["py2.7"]),
Conf("trusty", ["py3.5"]),
Conf("trusty", ["py3.5"]),
Conf("trusty", ["py3.6", "gcc4.8"]),
Conf("trusty", ["py3.6", "gcc5.4"]),
Conf("trusty", ["py3.6", "gcc5.4"], is_xla=True, docker_version_override=278),
Conf("trusty", ["py3.6", "gcc7"]),
Conf("trusty", ["pynightly"]),
Conf("xenial", ["py3", "clang5", "asan"], pyver="3.6"),
Conf("xenial",
["py3"],
pyver="3.6",
cuda_version="8",
gpu_resource="medium",
cuda_docker_phases=["test"]),
Conf("xenial",
["py3", DockerHide("multigpu")],
pyver="3.6",
cuda_version="8",
restrict_phases=["test"],
cuda_docker_phases=["build", "test"],
gpu_resource="large"),
Conf("xenial",
["py3", DockerHide("NO_AVX2")],
pyver="3.6",
cuda_version="8",
restrict_phases=["test"],
cuda_docker_phases=["build", "test"],
gpu_resource="medium"),
Conf("xenial",
["py3", DockerHide("NO_AVX"), DockerHide("NO_AVX2")],
pyver="3.6",
cuda_version="8",
restrict_phases=["test"],
cuda_docker_phases=["build", "test"],
gpu_resource="medium"),
Conf("xenial",
["py2"],
pyver="2.7",
cuda_version="9",
cuda_docker_phases=["test"],
gpu_resource="medium"),
Conf("xenial",
["py3"],
pyver="3.6",
cuda_version="9",
gpu_resource="medium",
cuda_docker_phases=["test"]),
Conf("xenial",
["py3", "gcc7"],
pyver="3.6",
cuda_version="9.2",
gpu_resource="medium",
cuda_docker_phases=["test"]),
Conf("xenial",
["py3", "gcc7"],
pyver="3.6",
cuda_version="10",
restrict_phases=["build"]),
]
def add_build_env_defs(jobs_dict):
mydict = OrderedDict()
for conf_options in BUILD_ENV_LIST:
def append_environment_dict(build_or_test):
d = conf_options.genYamlTree(build_or_test)
mydict[conf_options.genBuildName(build_or_test)] = d
phases = ["build", "test"]
if conf_options.restrict_phases:
phases = conf_options.restrict_phases
for phase in phases:
append_environment_dict(phase)
jobs_dict["version"] = 2
jobs_dict["jobs"] = mydict

98
.circleci/conf_tree.py Normal file
View File

@ -0,0 +1,98 @@
import colorsys
import sys
from pygraphviz import AGraph
class ConfigNode:
def __init__(self, parent, node_name):
self.parent = parent
self.node_name = node_name
self.props = {}
def get_label(self):
label = self.node_name
if not label:
# FIXME this shouldn't be necessary
label = "<None>"
return label
def get_children(self):
return []
def get_parents(self):
return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else []
def get_depth(self):
return len(self.get_parents())
def get_node_key(self):
return "%".join(self.get_parents() + [self.get_label()])
def find_prop(self, propname, searched=None):
"""
Checks if its own dictionary has
the property, otherwise asks parent node.
"""
if searched is None:
searched = []
searched.append(self.node_name)
if propname in self.props:
return self.props[propname]
elif self.parent:
return self.parent.find_prop(propname, searched)
else:
# raise Exception('Property "%s" does not exist anywhere in the tree! Searched: %s' % (propname, searched))
return None
def rgb2hex(rgb_tuple):
def toHex(f):
return "%02x" % int(f * 255)
return "#" + "".join(map(toHex, list(rgb_tuple)))
def dfs(toplevel_config_node):
dot = AGraph()
config_list = []
MAX_DEPTH = 7 # FIXME traverse once beforehand to find max depth
def dfs_recurse(node):
this_node_key = node.get_node_key()
depth = node.get_depth()
rgb_tuple = colorsys.hsv_to_rgb(depth / float(MAX_DEPTH), 0.5, 1)
hex_color = rgb2hex(rgb_tuple)
dot.add_node(
this_node_key,
label=node.get_label(),
style="filled",
color="black",
# fillcolor=hex_color + ":orange",
fillcolor=hex_color,
)
node_children = node.get_children()
if node_children:
for child in node_children:
child_node_key = child.get_node_key()
dot.add_edge((this_node_key, child_node_key))
dfs_recurse(child)
else:
config_list.append(node)
dfs_recurse(toplevel_config_node)
return config_list, dot

View File

@ -10,244 +10,74 @@ import os
import sys import sys
from collections import OrderedDict from collections import OrderedDict
import build_env_definitions
DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/" import binary_build_definitions
import miniyaml
class DockerHide: class File:
"""Hides element for construction of docker path""" def __init__(self, filename):
def __init__(self, val): self.filename = filename
self.val = val
def __str__(self): def write(self, output_filehandle):
return self.val with open(os.path.join("verbatim-sources", self.filename)) as fh:
output_filehandle.write(fh.read())
class Conf: class Treegen:
def __init__(self, def __init__(self, function, depth):
distro, self.function = function
parms, self.depth = depth
pyver=None,
use_cuda=False,
is_xla=False,
restrict_phases=None,
cuda_docker_phases=None,
gpu_resource=None,
docker_version_override=None):
self.distro = distro def write(self, output_filehandle):
self.pyver = pyver build_dict = OrderedDict()
self.parms = parms self.function(build_dict)
miniyaml.render(output_filehandle, None, build_dict, self.depth)
self.use_cuda = use_cuda
self.is_xla = is_xla
self.restrict_phases = restrict_phases
# FIXME does the build phase ever need CUDA runtime?
self.cuda_docker_phases = cuda_docker_phases or []
self.gpu_resource = gpu_resource
# FIXME is this different docker version intentional?
self.docker_version_override = docker_version_override
def getParms(self):
leading = ["pytorch"]
if self.is_xla:
leading.append(DockerHide("xla"))
return leading + ["linux", self.distro] + self.parms
# TODO: Eliminate this special casing in docker paths
def genDockerImagePath(self, build_or_test):
build_env_pieces = self.getParms()
build_env_pieces = list(filter(lambda x: type(x) is not DockerHide, build_env_pieces))
build_job_name_pieces = build_env_pieces + [build_or_test]
base_build_env_name = "-".join(build_env_pieces)
docker_version = 282
if self.docker_version_override is not None:
docker_version = self.docker_version_override
return DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(docker_version)
BUILD_ENV_LIST = [ YAML_SOURCES = [
Conf("trusty", ["py2.7.9"]), File("header-section.yml"),
Conf("trusty", ["py2.7"]), File("linux-build-defaults.yml"),
Conf("trusty", ["py3.5"]), File("macos-build-defaults.yml"),
Conf("trusty", ["py3.5"]), File("nightly-binary-build-defaults.yml"),
Conf("trusty", ["py3.6", "gcc4.8"]), File("linux-binary-build-defaults.yml"),
Conf("trusty", ["py3.6", "gcc5.4"]), File("macos-binary-build-defaults.yml"),
Conf("trusty", ["py3.6", "gcc5.4"], is_xla=True, docker_version_override=278), File("nightly-build-smoke-tests-defaults.yml"),
Conf("trusty", ["py3.6", "gcc7"]), File("job-specs-header.yml"),
Conf("trusty", ["pynightly"]), Treegen(build_env_definitions.add_build_env_defs, 0),
Conf("xenial", ["py3", "clang5", "asan"], pyver="3.6"), File("job-specs-custom.yml"),
Conf("xenial", ["cuda8", "cudnn7", "py3"], pyver="3.6", use_cuda=True, gpu_resource="medium", File("job-specs-caffe2-builds.yml"),
cuda_docker_phases=["test"]), File("job-specs-html-update.yml"),
Conf("xenial", ["cuda8", "cudnn7", "py3", DockerHide("multigpu")], pyver="3.6", use_cuda=True, File("binary-build-specs-header.yml"),
restrict_phases=["test"], cuda_docker_phases=["build", "test"], gpu_resource="large"), Treegen(binary_build_definitions.add_binary_build_specs, 1),
Conf("xenial", ["cuda8", "cudnn7", "py3", DockerHide("NO_AVX2")], pyver="3.6", use_cuda=True, File("binary-build-tests-header.yml"),
restrict_phases=["test"], cuda_docker_phases=["build", "test"], gpu_resource="medium"), Treegen(binary_build_definitions.add_binary_build_tests, 1),
Conf("xenial", ["cuda8", "cudnn7", "py3", DockerHide("NO_AVX"), DockerHide("NO_AVX2")], pyver="3.6", use_cuda=True, File("binary-build-tests.yml"),
restrict_phases=["test"], cuda_docker_phases=["build", "test"], gpu_resource="medium"), File("binary-build-uploads-header.yml"),
Conf("xenial", ["cuda9", "cudnn7", "py2"], pyver="2.7", use_cuda=True, cuda_docker_phases=["test"], Treegen(binary_build_definitions.add_binary_build_uploads, 1),
gpu_resource="medium"), File("smoke-test-specs-header.yml"),
Conf("xenial", ["cuda9", "cudnn7", "py3"], pyver="3.6", use_cuda=True, gpu_resource="medium", Treegen(binary_build_definitions.add_smoke_test_specs, 1),
cuda_docker_phases=["test"]), File("workflows.yml"),
Conf("xenial", ["cuda9.2", "cudnn7", "py3", "gcc7"], pyver="3.6", use_cuda=True, gpu_resource="medium", File("workflows-pytorch-linux-builds.yml"),
cuda_docker_phases=["test"]), File("workflows-pytorch-macos-builds.yml"),
Conf("xenial", ["cuda10", "cudnn7", "py3", "gcc7"], pyver="3.6", use_cuda=True, restrict_phases=["build"]), File("workflows-caffe2-builds.yml"),
File("workflows-caffe2-macos-builds.yml"),
File("workflows-binary-builds-smoke-subset.yml"),
File("workflows-binary-smoke-header.yml"),
Treegen(binary_build_definitions.add_binary_smoke_test_jobs, 1),
File("workflows-binary-build-header.yml"),
Treegen(binary_build_definitions.add_binary_build_jobs, 1),
File("workflows-nightly-tests-header.yml"),
File("workflows-nightly-tests.yml"),
File("workflows-nightly-uploads-header.yml"),
File("workflows-nightly-uploads.yml"),
File("workflows-s3-html.yml"),
] ]
def is_dict_like(data):
return type(data) is dict or type(data) is OrderedDict
FORCED_QUOTED_VALUE_KEYS = {
"DOCKER_IMAGE",
"PYTHON_VERSION",
"USE_CUDA_DOCKER_RUNTIME",
"MULTI_GPU",
}
def render_yaml(key, data, fh, depth=0):
"""
PyYaml does not allow precise control over the quoting
behavior, especially for merge references.
Therefore, we use this custom YAML renderer.
"""
indentation = " " * depth
if is_dict_like(data):
tuples = list(data.items())
if type(data) is not OrderedDict:
tuples.sort(key=lambda x: (x[0] == "<<", x[0]))
for k, v in tuples:
fh.write(indentation + k + ":")
whitespace = "\n" if is_dict_like(v) else " "
fh.write(whitespace)
render_yaml(k, v, fh, depth + 1)
if depth == 2:
fh.write("\n")
else:
if type(data) is str:
maybe_quoted = data
if key in FORCED_QUOTED_VALUE_KEYS:
maybe_quoted = '"' + data + '"'
fh.write(maybe_quoted)
else:
fh.write(str(data))
fh.write("\n")
def generate_config_dict():
jobs_dict = OrderedDict()
for conf_options in BUILD_ENV_LIST:
build_env_pieces = conf_options.getParms()
def append_environment_dict(build_or_test):
build_job_name_pieces = build_env_pieces + [build_or_test]
base_build_env_name = "-".join(map(str, build_env_pieces))
build_env_name = "-".join(map(str, build_job_name_pieces))
env_dict = {
"BUILD_ENVIRONMENT": build_env_name,
"DOCKER_IMAGE": conf_options.genDockerImagePath(build_or_test),
}
if conf_options.pyver:
env_dict["PYTHON_VERSION"] = conf_options.pyver
if build_or_test in conf_options.cuda_docker_phases:
env_dict["USE_CUDA_DOCKER_RUNTIME"] = "1"
d = {
"environment": env_dict,
"<<": "*" + "_".join(["pytorch", "linux", build_or_test, "defaults"]),
}
if build_or_test == "test":
resource_class = "large"
if conf_options.gpu_resource:
resource_class = "gpu." + conf_options.gpu_resource
if conf_options.gpu_resource == "large":
env_dict["MULTI_GPU"] = "1"
d["resource_class"] = resource_class
job_name = ("_".join(map(str, build_job_name_pieces))).replace(".", "_")
jobs_dict[job_name] = d
phases = ["build", "test"]
if conf_options.restrict_phases:
phases = conf_options.restrict_phases
for phase in phases:
append_environment_dict(phase)
data = OrderedDict([
("version", 2),
("jobs", jobs_dict),
])
return data
VERBATIM_SOURCE_FILES = [
"header-section.yml",
"linux-build-defaults.yml",
"macos-build-defaults.yml",
"nightly-binary-build-defaults.yml",
"linux-binary-build-defaults.yml",
"macos-binary-build-defaults.yml",
"nightly-build-smoke-tests-defaults.yml",
]
YAML_GENERATOR_FUNCTIONS = [
generate_config_dict,
]
def comment_divider(output_filehandle):
for _i in range(2):
output_filehandle.write("#" * 78)
output_filehandle.write("\n")
def stitch_sources(output_filehandle): def stitch_sources(output_filehandle):
for f in YAML_SOURCES:
for f in VERBATIM_SOURCE_FILES: f.write(output_filehandle)
with open(os.path.join("verbatim-sources", f)) as fh:
output_filehandle.write(fh.read())
comment_divider(output_filehandle)
output_filehandle.write("# Job specifications job specs\n")
comment_divider(output_filehandle)
for f in YAML_GENERATOR_FUNCTIONS:
render_yaml(None, f(), output_filehandle)
with open("verbatim-sources/remaining-sections.yml") as fh:
output_filehandle.write(fh.read())
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -0,0 +1,134 @@
from collections import OrderedDict
from conf_tree import ConfigNode
LINKING_DIMENSIONS = [
"shared",
"static",
]
DEPS_INCLUSION_DIMENSIONS = [
"with-deps",
"without-deps",
]
def get_processor_arch_name(cuda_version):
return "cpu" if not cuda_version else "cu" + cuda_version
CUDA_VERSIONS = [
None, # cpu build
"80",
"90",
"100",
]
STANDARD_PYTHON_VERSIONS = [
"2.7",
"3.5",
"3.6",
"3.7",
]
CONFIG_TREE_DATA = OrderedDict(
linux=(CUDA_VERSIONS, OrderedDict(
manywheel=[
"2.7m",
"2.7mu",
"3.5m",
"3.6m",
"3.7m",
],
conda=STANDARD_PYTHON_VERSIONS,
libtorch=[
"2.7m",
]
)),
macos=([None], OrderedDict(
wheel=STANDARD_PYTHON_VERSIONS,
conda=STANDARD_PYTHON_VERSIONS,
libtorch=[
"2.7",
],
)),
)
class TopLevelNode(ConfigNode):
def __init__(self, node_name, config_tree_data, smoke):
super().__init__(None, node_name)
self.config_tree_data = config_tree_data
self.props["smoke"] = smoke
def get_children(self):
return [OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()]
class OSConfigNode(ConfigNode):
def __init__(self, parent, os_name, cuda_versions, py_tree):
super().__init__(parent, os_name)
self.py_tree = py_tree
self.props["os_name"] = os_name
self.props["cuda_versions"] = cuda_versions
def get_children(self):
return [PackageFormatConfigNode(self, k, v) for k, v in self.py_tree.items()]
class PackageFormatConfigNode(ConfigNode):
def __init__(self, parent, package_format, python_versions):
super().__init__(parent, package_format)
self.props["python_versions"] = python_versions
self.props["package_format"] = package_format
def get_children(self):
return [ArchConfigNode(self, v) for v in self.find_prop("cuda_versions")]
class ArchConfigNode(ConfigNode):
def __init__(self, parent, cu):
super().__init__(parent, get_processor_arch_name(cu))
self.props["cu"] = cu
def get_children(self):
return [PyVersionConfigNode(self, v) for v in self.find_prop("python_versions")]
class PyVersionConfigNode(ConfigNode):
def __init__(self, parent, pyver):
super().__init__(parent, pyver)
self.props["pyver"] = pyver
def get_children(self):
smoke = self.find_prop("smoke")
package_format = self.find_prop("package_format")
os_name = self.find_prop("os_name")
has_libtorch_variants = smoke and package_format == "libtorch" and os_name == "linux"
linking_variants = LINKING_DIMENSIONS if has_libtorch_variants else []
return [LinkingVariantConfigNode(self, v) for v in linking_variants]
class LinkingVariantConfigNode(ConfigNode):
def __init__(self, parent, linking_variant):
super().__init__(parent, linking_variant)
def get_children(self):
return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS]
class DependencyInclusionConfigNode(ConfigNode):
def __init__(self, parent, deps_variant):
super().__init__(parent, deps_variant)
self.props["libtorch_variant"] = "-".join([self.parent.get_label(), self.get_label()])

6
.circleci/miniutils.py Normal file
View File

@ -0,0 +1,6 @@
def quote(s):
return '"' + s + '"'
def override(word, substitutions):
return substitutions.get(word, word)

61
.circleci/miniyaml.py Normal file
View File

@ -0,0 +1,61 @@
from collections import OrderedDict
LIST_MARKER = "- "
INDENTATION_WIDTH = 2
def is_dict(data):
return type(data) is dict or type(data) is OrderedDict
def is_collection(data):
return is_dict(data) or type(data) is list
# TODO can eventually drop this custom sorting
def sortkey(x):
k = x[0]
return (
k == "<<",
k != "environment",
k,
)
def render(fh, key, data, depth, is_list_member=False):
"""
PyYaml does not allow precise control over the quoting
behavior, especially for merge references.
Therefore, we use this custom YAML renderer.
"""
indentation = " " * INDENTATION_WIDTH * depth
if is_dict(data):
tuples = list(data.items())
if type(data) is not OrderedDict:
tuples.sort(key=sortkey)
for i, (k, v) in enumerate(tuples):
# If this dict is itself a list member, the first key gets prefixed with a list marker
list_marker_prefix = LIST_MARKER if is_list_member and not i else ""
trailing_whitespace = "\n" if is_collection(v) else " "
fh.write(indentation + list_marker_prefix + k + ":" + trailing_whitespace)
render(fh, k, v, depth + 1 + int(is_list_member))
# TODO Could eventually drop this cosmetic convention
if depth == 2:
fh.write("\n")
elif type(data) is list:
for v in data:
render(fh, None, v, depth, True)
else:
list_member_prefix = indentation + LIST_MARKER if is_list_member else ""
fh.write(list_member_prefix + str(data) + "\n")

View File

@ -0,0 +1,3 @@
##############################################################################
# Binary build specs individual job specifications
##############################################################################

View File

@ -0,0 +1,4 @@
# Binary build tests
# These are the smoke tests run right after the build, before the upload. If
# these fail, the upload doesn't happen
#############################################################################

View File

@ -0,0 +1,25 @@
# There is currently no testing for libtorch TODO
# binary_linux_libtorch_2.7m_cpu_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cpu"
# resource_class: gpu.medium
# <<: *binary_linux_test
#
# binary_linux_libtorch_2.7m_cu80_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cu80"
# resource_class: gpu.medium
# <<: *binary_linux_test
#
# binary_linux_libtorch_2.7m_cu90_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cu90"
# resource_class: gpu.medium
# <<: *binary_linux_test
#
# binary_linux_libtorch_2.7m_cu100_test:
# environment:
# BUILD_ENVIRONMENT: "libtorch 2.7m cu100"
# resource_class: gpu.medium
# <<: *binary_linux_test

View File

@ -0,0 +1,2 @@
# Binary build uploads
#############################################################################

View File

@ -0,0 +1,159 @@
caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build:
environment:
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-build"
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:248"
<<: *caffe2_linux_build_defaults
caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:248"
USE_CUDA_DOCKER_RUNTIME: "1"
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-test"
resource_class: gpu.medium
<<: *caffe2_linux_test_defaults
caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04-build"
<<: *caffe2_linux_build_defaults
caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:248"
USE_CUDA_DOCKER_RUNTIME: "1"
BUILD_ENVIRONMENT: "caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04-test"
resource_class: gpu.medium
<<: *caffe2_linux_test_defaults
caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build:
environment:
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.1-cudnn7-ubuntu16.04-build"
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.1-cudnn7-ubuntu16.04:248"
<<: *caffe2_linux_build_defaults
caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.1-cudnn7-ubuntu16.04:248"
USE_CUDA_DOCKER_RUNTIME: "1"
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.1-cudnn7-ubuntu16.04-test"
resource_class: gpu.medium
<<: *caffe2_linux_test_defaults
caffe2_py2_mkl_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-mkl-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-mkl-ubuntu16.04-build"
<<: *caffe2_linux_build_defaults
caffe2_py2_mkl_ubuntu16_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-mkl-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-mkl-ubuntu16.04-test"
resource_class: large
<<: *caffe2_linux_test_defaults
caffe2_py2_gcc4_8_ubuntu14_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build"
<<: *caffe2_linux_build_defaults
caffe2_py2_gcc4_8_ubuntu14_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-test"
resource_class: large
<<: *caffe2_linux_test_defaults
caffe2_onnx_py2_gcc5_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc5-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-onnx-py2-gcc5-ubuntu16.04-build"
<<: *caffe2_linux_build_defaults
caffe2_onnx_py2_gcc5_ubuntu16_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc5-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-onnx-py2-gcc5-ubuntu16.04-test"
resource_class: large
<<: *caffe2_linux_test_defaults
caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-build"
<<: *caffe2_linux_build_defaults
caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:248"
USE_CUDA_DOCKER_RUNTIME: "1"
BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-test"
resource_class: gpu.medium
<<: *caffe2_linux_test_defaults
caffe2_py2_gcc4_9_ubuntu14_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.9-ubuntu14.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-gcc4.9-ubuntu14.04-build"
BUILD_ONLY: "1"
<<: *caffe2_linux_build_defaults
caffe2_py2_clang3_8_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang3.8-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-clang3.8-ubuntu16.04-build"
BUILD_ONLY: "1"
<<: *caffe2_linux_build_defaults
caffe2_py2_clang3_9_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang3.9-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-clang3.9-ubuntu16.04-build"
BUILD_ONLY: "1"
<<: *caffe2_linux_build_defaults
caffe2_py2_clang7_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang7-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-clang7-ubuntu16.04-build"
BUILD_ONLY: "1"
<<: *caffe2_linux_build_defaults
caffe2_py2_android_ubuntu16_04_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-android-ubuntu16.04:248"
BUILD_ENVIRONMENT: "caffe2-py2-android-ubuntu16.04-build"
BUILD_ONLY: "1"
<<: *caffe2_linux_build_defaults
caffe2_py2_cuda9_0_cudnn7_centos7_build:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-centos7:248"
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-centos7-build"
<<: *caffe2_linux_build_defaults
caffe2_py2_cuda9_0_cudnn7_centos7_test:
environment:
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-centos7:248"
USE_CUDA_DOCKER_RUNTIME: "1"
BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-centos7-test"
resource_class: gpu.medium
<<: *caffe2_linux_test_defaults
caffe2_py2_ios_macos10_13_build:
environment:
BUILD_ENVIRONMENT: caffe2-py2-ios-macos10.13-build
BUILD_IOS: "1"
PYTHON_INSTALLATION: "system"
PYTHON_VERSION: "2"
<<: *caffe2_macos_build_defaults
caffe2_py2_system_macos10_13_build:
environment:
BUILD_ENVIRONMENT: caffe2-py2-system-macos10.13-build
PYTHON_INSTALLATION: "system"
PYTHON_VERSION: "2"
<<: *caffe2_macos_build_defaults

View File

@ -0,0 +1,193 @@
pytorch_short_perf_test_gpu:
environment:
BUILD_ENVIRONMENT: pytorch-short-perf-test-gpu
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda8-cudnn7-py3:282"
PYTHON_VERSION: "3.6"
USE_CUDA_DOCKER_RUNTIME: "1"
resource_class: gpu.medium
machine:
image: default
steps:
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
name: Perf Test
no_output_timeout: "1h"
command: |
set -e
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
docker cp $id:/var/lib/jenkins/workspace/env /home/circleci/project/env
# This IAM user allows write access to S3 bucket for perf test numbers
echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PERF_TEST_S3_BUCKET_V3}" >> /home/circleci/project/env
echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PERF_TEST_S3_BUCKET_V3}" >> /home/circleci/project/env
docker cp /home/circleci/project/env $id:/var/lib/jenkins/workspace/env
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/short-perf-test-gpu.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
pytorch_doc_push:
environment:
BUILD_ENVIRONMENT: pytorch-doc-push
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda8-cudnn7-py3:282"
resource_class: large
machine:
image: default
steps:
- run:
<<: *setup_linux_system_environment
- run:
<<: *setup_ci_environment
- run:
<<: *install_doc_push_script
- run:
name: Doc Build and Push
no_output_timeout: "1h"
command: |
set -e
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1}
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
docker cp /home/circleci/project/doc_push_script.sh $id:/var/lib/jenkins/workspace/doc_push_script.sh
# master branch docs push
if [[ "${CIRCLE_BRANCH}" == "master" ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./doc_push_script.sh docs/master master") | docker exec -u jenkins -i "$id" bash) 2>&1'
# stable release docs push. Due to some circleci limitations, we keep
# an eternal PR open (#16502) for merging v1.0.1 -> master for this job.
# XXX: The following code is only run on the v1.0.1 branch, which might
# not be exactly the same as what you see here.
elif [[ "${CIRCLE_BRANCH}" == "v1.0.1" ]]; then
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./doc_push_script.sh docs/stable 1.0.1") | docker exec -u jenkins -i "$id" bash) 2>&1'
# For open PRs: Do a dry_run of the docs build, don't push build
else
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./doc_push_script.sh docs/master master dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1'
fi
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
# Save the docs build so we can debug any problems
export DEBUG_COMMIT_DOCKER_IMAGE=${COMMIT_DOCKER_IMAGE}-debug
docker commit "$id" ${DEBUG_COMMIT_DOCKER_IMAGE}
docker push ${DEBUG_COMMIT_DOCKER_IMAGE}
pytorch_macos_10_13_py3_build:
macos:
xcode: "9.0"
steps:
- checkout
- run:
<<: *macos_brew_update
- run:
name: Build
environment:
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-build
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
# Install sccache
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
sudo chmod +x /usr/local/bin/sccache
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
# This IAM user allows write access to S3 bucket for sccache
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3}
chmod a+x .jenkins/pytorch/macos-build.sh
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
mkdir -p /Users/distiller/pytorch-ci-env/workspace
# copy with -a to preserve relative structure (e.g., symlinks), and be recursive
cp -a /Users/distiller/project/. /Users/distiller/pytorch-ci-env/workspace
- persist_to_workspace:
root: /Users/distiller/pytorch-ci-env
paths:
- "*"
pytorch_macos_10_13_py3_test:
macos:
xcode: "9.0"
steps:
- run:
name: Prepare workspace
command: |
sudo mkdir -p /Users/distiller/pytorch-ci-env
sudo chmod -R 777 /Users/distiller/pytorch-ci-env
- attach_workspace:
at: /Users/distiller/pytorch-ci-env
- run:
<<: *macos_brew_update
- run:
name: Test
environment:
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-test
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
# copy with -a to preserve relative structure (e.g., symlinks), and be recursive
cp -a /Users/distiller/pytorch-ci-env/workspace/. /Users/distiller/project
chmod a+x .jenkins/pytorch/macos-test.sh
unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts
pytorch_macos_10_13_cuda9_2_cudnn7_py3_build:
macos:
xcode: "9.0"
steps:
- checkout
- run:
<<: *macos_brew_update
- run:
name: Build
environment:
BUILD_ENVIRONMENT: pytorch-macos-10.13-cuda9.2-cudnn7-py3-build
no_output_timeout: "1h"
command: |
set -e
export IN_CIRCLECI=1
# Install CUDA 9.2
sudo rm -rf ~/cuda_9.2.64_mac_installer.app || true
curl https://s3.amazonaws.com/ossci-macos/cuda_9.2.64_mac_installer.zip -o ~/cuda_9.2.64_mac_installer.zip
unzip ~/cuda_9.2.64_mac_installer.zip -d ~/
sudo ~/cuda_9.2.64_mac_installer.app/Contents/MacOS/CUDAMacOSXInstaller --accept-eula --no-window
sudo cp /usr/local/cuda/lib/libcuda.dylib /Developer/NVIDIA/CUDA-9.2/lib/libcuda.dylib
sudo rm -rf /usr/local/cuda || true
# Install cuDNN 7.1 for CUDA 9.2
curl https://s3.amazonaws.com/ossci-macos/cudnn-9.2-osx-x64-v7.1.tgz -o ~/cudnn-9.2-osx-x64-v7.1.tgz
rm -rf ~/cudnn-9.2-osx-x64-v7.1 && mkdir ~/cudnn-9.2-osx-x64-v7.1
tar -xzvf ~/cudnn-9.2-osx-x64-v7.1.tgz -C ~/cudnn-9.2-osx-x64-v7.1
sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/include/
sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/lib/libcudnn* /Developer/NVIDIA/CUDA-9.2/lib/
sudo chmod a+r /Developer/NVIDIA/CUDA-9.2/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/lib/libcudnn*
# Install sccache
sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
sudo chmod +x /usr/local/bin/sccache
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
# This IAM user allows write access to S3 bucket for sccache
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3}
git submodule sync && git submodule update -q --init
chmod a+x .jenkins/pytorch/macos-build.sh
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts

View File

@ -0,0 +1,5 @@
##############################################################################
##############################################################################
# Job specifications job specs
##############################################################################
##############################################################################

View File

@ -0,0 +1,24 @@
# update_s3_htmls job
update_s3_htmls:
machine:
image: default
steps:
- run:
<<: *setup_linux_system_environment
- run:
<<: *binary_populate_env
- run:
<<: *binary_checkout
- run:
name: Update s3 htmls
no_output_timeout: "1h"
command: |
echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env
echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env
source /home/circleci/project/env
set -ex
retry pip install awscli==1.6
"$BUILDER_ROOT/cron/update_s3_htmls.sh"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,4 @@
##############################################################################
# Smoke test specs individual job specifications
##############################################################################

View File

@ -0,0 +1,4 @@
##############################################################################
# Daily binary build trigger
##############################################################################

View File

@ -0,0 +1,26 @@
# Binary builds (subset, to smoke test that they'll work)
- binary_linux_manywheel_2.7mu_cpu_build
- binary_linux_manywheel_3.7m_cu100_build
- binary_linux_conda_2.7_cpu_build
# This binary build is currently broken, see https://github.com/pytorch/pytorch/issues/16710
# - binary_linux_conda_3.6_cu90_build
- binary_linux_libtorch_2.7m_cu80_build
- binary_macos_wheel_3.6_cpu_build
- binary_macos_conda_2.7_cpu_build
- binary_macos_libtorch_2.7_cpu_build
- binary_linux_manywheel_2.7mu_cpu_test:
requires:
- binary_linux_manywheel_2.7mu_cpu_build
- binary_linux_manywheel_3.7m_cu100_test:
requires:
- binary_linux_manywheel_3.7m_cu100_build
- binary_linux_conda_2.7_cpu_test:
requires:
- binary_linux_conda_2.7_cpu_build
# This binary build is currently broken, see https://github.com/pytorch/pytorch/issues/16710
# - binary_linux_conda_3.6_cu90_test:
# requires:
# - binary_linux_conda_3.6_cu90_build

View File

@ -0,0 +1,3 @@
##############################################################################
# Daily smoke test trigger
##############################################################################

View File

@ -0,0 +1,37 @@
# Caffe2 builds
- caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build
- caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_test:
requires:
- caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test:
requires:
- caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_test:
requires:
- caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build
- caffe2_py2_mkl_ubuntu16_04_build
- caffe2_py2_mkl_ubuntu16_04_test:
requires:
- caffe2_py2_mkl_ubuntu16_04_build
- caffe2_py2_gcc4_8_ubuntu14_04_build
- caffe2_py2_gcc4_8_ubuntu14_04_test:
requires:
- caffe2_py2_gcc4_8_ubuntu14_04_build
- caffe2_onnx_py2_gcc5_ubuntu16_04_build
- caffe2_onnx_py2_gcc5_ubuntu16_04_test:
requires:
- caffe2_onnx_py2_gcc5_ubuntu16_04_build
- caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
- caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test:
requires:
- caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build
- caffe2_py2_clang3_8_ubuntu16_04_build
- caffe2_py2_clang3_9_ubuntu16_04_build
- caffe2_py2_clang7_ubuntu16_04_build
- caffe2_py2_android_ubuntu16_04_build
- caffe2_py2_cuda9_0_cudnn7_centos7_build
- caffe2_py2_cuda9_0_cudnn7_centos7_test:
requires:
- caffe2_py2_cuda9_0_cudnn7_centos7_build

View File

@ -0,0 +1,4 @@
# Caffe2 MacOS builds
- caffe2_py2_ios_macos10_13_build
- caffe2_py2_system_macos10_13_build

View File

@ -0,0 +1 @@
# Nightly tests

View File

@ -0,0 +1,108 @@
- binary_linux_manywheel_2.7m_cpu_test:
requires:
- binary_linux_manywheel_2.7m_cpu_build
- binary_linux_manywheel_2.7mu_cpu_test:
requires:
- binary_linux_manywheel_2.7mu_cpu_build
- binary_linux_manywheel_3.5m_cpu_test:
requires:
- binary_linux_manywheel_3.5m_cpu_build
- binary_linux_manywheel_3.6m_cpu_test:
requires:
- binary_linux_manywheel_3.6m_cpu_build
- binary_linux_manywheel_3.7m_cpu_test:
requires:
- binary_linux_manywheel_3.7m_cpu_build
- binary_linux_manywheel_2.7m_cu80_test:
requires:
- binary_linux_manywheel_2.7m_cu80_build
- binary_linux_manywheel_2.7mu_cu80_test:
requires:
- binary_linux_manywheel_2.7mu_cu80_build
- binary_linux_manywheel_3.5m_cu80_test:
requires:
- binary_linux_manywheel_3.5m_cu80_build
- binary_linux_manywheel_3.6m_cu80_test:
requires:
- binary_linux_manywheel_3.6m_cu80_build
- binary_linux_manywheel_3.7m_cu80_test:
requires:
- binary_linux_manywheel_3.7m_cu80_build
- binary_linux_manywheel_2.7m_cu90_test:
requires:
- binary_linux_manywheel_2.7m_cu90_build
- binary_linux_manywheel_2.7mu_cu90_test:
requires:
- binary_linux_manywheel_2.7mu_cu90_build
- binary_linux_manywheel_3.5m_cu90_test:
requires:
- binary_linux_manywheel_3.5m_cu90_build
- binary_linux_manywheel_3.6m_cu90_test:
requires:
- binary_linux_manywheel_3.6m_cu90_build
- binary_linux_manywheel_3.7m_cu90_test:
requires:
- binary_linux_manywheel_3.7m_cu90_build
- binary_linux_manywheel_2.7m_cu100_test:
requires:
- binary_linux_manywheel_2.7m_cu100_build
- binary_linux_manywheel_2.7mu_cu100_test:
requires:
- binary_linux_manywheel_2.7mu_cu100_build
- binary_linux_manywheel_3.5m_cu100_test:
requires:
- binary_linux_manywheel_3.5m_cu100_build
- binary_linux_manywheel_3.6m_cu100_test:
requires:
- binary_linux_manywheel_3.6m_cu100_build
- binary_linux_manywheel_3.7m_cu100_test:
requires:
- binary_linux_manywheel_3.7m_cu100_build
- binary_linux_conda_2.7_cpu_test:
requires:
- binary_linux_conda_2.7_cpu_build
- binary_linux_conda_3.5_cpu_test:
requires:
- binary_linux_conda_3.5_cpu_build
- binary_linux_conda_3.6_cpu_test:
requires:
- binary_linux_conda_3.6_cpu_build
- binary_linux_conda_3.7_cpu_test:
requires:
- binary_linux_conda_3.7_cpu_build
- binary_linux_conda_2.7_cu80_test:
requires:
- binary_linux_conda_2.7_cu80_build
- binary_linux_conda_3.5_cu80_test:
requires:
- binary_linux_conda_3.5_cu80_build
- binary_linux_conda_3.6_cu80_test:
requires:
- binary_linux_conda_3.6_cu80_build
- binary_linux_conda_3.7_cu80_test:
requires:
- binary_linux_conda_3.7_cu80_build
- binary_linux_conda_2.7_cu90_test:
requires:
- binary_linux_conda_2.7_cu90_build
- binary_linux_conda_3.5_cu90_test:
requires:
- binary_linux_conda_3.5_cu90_build
- binary_linux_conda_3.6_cu90_test:
requires:
- binary_linux_conda_3.6_cu90_build
- binary_linux_conda_3.7_cu90_test:
requires:
- binary_linux_conda_3.7_cu90_build
- binary_linux_conda_2.7_cu100_test:
requires:
- binary_linux_conda_2.7_cu100_build
- binary_linux_conda_3.5_cu100_test:
requires:
- binary_linux_conda_3.5_cu100_build
- binary_linux_conda_3.6_cu100_test:
requires:
- binary_linux_conda_3.6_cu100_build
- binary_linux_conda_3.7_cu100_test:
requires:
- binary_linux_conda_3.7_cu100_build

View File

@ -0,0 +1,14 @@
#- binary_linux_libtorch_2.7m_cpu_test:
# requires:
# - binary_linux_libtorch_2.7m_cpu_build
#- binary_linux_libtorch_2.7m_cu80_test:
# requires:
# - binary_linux_libtorch_2.7m_cu80_build
#- binary_linux_libtorch_2.7m_cu90_test:
# requires:
# - binary_linux_libtorch_2.7m_cu90_build
#- binary_linux_libtorch_2.7m_cu100_test:
# requires:
# - binary_linux_libtorch_2.7m_cu100_build
# Nightly uploads

View File

@ -0,0 +1,197 @@
- binary_linux_manywheel_2.7m_cpu_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7m_cpu_test
- binary_linux_manywheel_2.7mu_cpu_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7mu_cpu_test
- binary_linux_manywheel_3.5m_cpu_upload:
context: org-member
requires:
- binary_linux_manywheel_3.5m_cpu_test
- binary_linux_manywheel_3.6m_cpu_upload:
context: org-member
requires:
- binary_linux_manywheel_3.6m_cpu_test
- binary_linux_manywheel_3.7m_cpu_upload:
context: org-member
requires:
- binary_linux_manywheel_3.7m_cpu_test
- binary_linux_manywheel_2.7m_cu80_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7m_cu80_test
- binary_linux_manywheel_2.7mu_cu80_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7mu_cu80_test
- binary_linux_manywheel_3.5m_cu80_upload:
context: org-member
requires:
- binary_linux_manywheel_3.5m_cu80_test
- binary_linux_manywheel_3.6m_cu80_upload:
context: org-member
requires:
- binary_linux_manywheel_3.6m_cu80_test
- binary_linux_manywheel_3.7m_cu80_upload:
context: org-member
requires:
- binary_linux_manywheel_3.7m_cu80_test
- binary_linux_manywheel_2.7m_cu90_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7m_cu90_test
- binary_linux_manywheel_2.7mu_cu90_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7mu_cu90_test
- binary_linux_manywheel_3.5m_cu90_upload:
context: org-member
requires:
- binary_linux_manywheel_3.5m_cu90_test
- binary_linux_manywheel_3.6m_cu90_upload:
context: org-member
requires:
- binary_linux_manywheel_3.6m_cu90_test
- binary_linux_manywheel_3.7m_cu90_upload:
context: org-member
requires:
- binary_linux_manywheel_3.7m_cu90_test
- binary_linux_manywheel_2.7m_cu100_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7m_cu100_test
- binary_linux_manywheel_2.7mu_cu100_upload:
context: org-member
requires:
- binary_linux_manywheel_2.7mu_cu100_test
- binary_linux_manywheel_3.5m_cu100_upload:
context: org-member
requires:
- binary_linux_manywheel_3.5m_cu100_test
- binary_linux_manywheel_3.6m_cu100_upload:
context: org-member
requires:
- binary_linux_manywheel_3.6m_cu100_test
- binary_linux_manywheel_3.7m_cu100_upload:
context: org-member
requires:
- binary_linux_manywheel_3.7m_cu100_test
- binary_linux_conda_2.7_cpu_upload:
context: org-member
requires:
- binary_linux_conda_2.7_cpu_test
- binary_linux_conda_3.5_cpu_upload:
context: org-member
requires:
- binary_linux_conda_3.5_cpu_test
- binary_linux_conda_3.6_cpu_upload:
context: org-member
requires:
- binary_linux_conda_3.6_cpu_test
- binary_linux_conda_3.7_cpu_upload:
context: org-member
requires:
- binary_linux_conda_3.7_cpu_test
- binary_linux_conda_2.7_cu80_upload:
context: org-member
requires:
- binary_linux_conda_2.7_cu80_test
- binary_linux_conda_3.5_cu80_upload:
context: org-member
requires:
- binary_linux_conda_3.5_cu80_test
- binary_linux_conda_3.6_cu80_upload:
context: org-member
requires:
- binary_linux_conda_3.6_cu80_test
- binary_linux_conda_3.7_cu80_upload:
context: org-member
requires:
- binary_linux_conda_3.7_cu80_test
- binary_linux_conda_2.7_cu90_upload:
context: org-member
requires:
- binary_linux_conda_2.7_cu90_test
- binary_linux_conda_3.5_cu90_upload:
context: org-member
requires:
- binary_linux_conda_3.5_cu90_test
- binary_linux_conda_3.6_cu90_upload:
context: org-member
requires:
- binary_linux_conda_3.6_cu90_test
- binary_linux_conda_3.7_cu90_upload:
context: org-member
requires:
- binary_linux_conda_3.7_cu90_test
- binary_linux_conda_2.7_cu100_upload:
context: org-member
requires:
- binary_linux_conda_2.7_cu100_test
- binary_linux_conda_3.5_cu100_upload:
context: org-member
requires:
- binary_linux_conda_3.5_cu100_test
- binary_linux_conda_3.6_cu100_upload:
context: org-member
requires:
- binary_linux_conda_3.6_cu100_test
- binary_linux_conda_3.7_cu100_upload:
context: org-member
requires:
- binary_linux_conda_3.7_cu100_test
- binary_linux_libtorch_2.7m_cpu_upload:
context: org-member
requires:
- binary_linux_libtorch_2.7m_cpu_build
- binary_linux_libtorch_2.7m_cu80_upload:
context: org-member
requires:
- binary_linux_libtorch_2.7m_cu80_build
- binary_linux_libtorch_2.7m_cu90_upload:
context: org-member
requires:
- binary_linux_libtorch_2.7m_cu90_build
- binary_linux_libtorch_2.7m_cu100_upload:
context: org-member
requires:
- binary_linux_libtorch_2.7m_cu100_build
- binary_macos_wheel_2.7_cpu_upload:
context: org-member
requires:
- binary_macos_wheel_2.7_cpu_build
- binary_macos_wheel_3.5_cpu_upload:
context: org-member
requires:
- binary_macos_wheel_3.5_cpu_build
- binary_macos_wheel_3.6_cpu_upload:
context: org-member
requires:
- binary_macos_wheel_3.6_cpu_build
- binary_macos_wheel_3.7_cpu_upload:
context: org-member
requires:
- binary_macos_wheel_3.7_cpu_build
- binary_macos_conda_2.7_cpu_upload:
context: org-member
requires:
- binary_macos_conda_2.7_cpu_build
- binary_macos_conda_3.5_cpu_upload:
context: org-member
requires:
- binary_macos_conda_3.5_cpu_build
- binary_macos_conda_3.6_cpu_upload:
context: org-member
requires:
- binary_macos_conda_3.6_cpu_build
- binary_macos_conda_3.7_cpu_upload:
context: org-member
requires:
- binary_macos_conda_3.7_cpu_build
- binary_macos_libtorch_2.7_cpu_upload:
context: org-member
requires:
- binary_macos_libtorch_2.7_cpu_build

View File

@ -0,0 +1,78 @@
# Pytorch linux builds
- pytorch_linux_trusty_py2_7_9_build
- pytorch_linux_trusty_py2_7_9_test:
requires:
- pytorch_linux_trusty_py2_7_9_build
- pytorch_linux_trusty_py2_7_build
- pytorch_linux_trusty_py2_7_test:
requires:
- pytorch_linux_trusty_py2_7_build
- pytorch_linux_trusty_py3_5_build
- pytorch_linux_trusty_py3_5_test:
requires:
- pytorch_linux_trusty_py3_5_build
- pytorch_linux_trusty_py3_6_gcc4_8_build
- pytorch_linux_trusty_py3_6_gcc4_8_test:
requires:
- pytorch_linux_trusty_py3_6_gcc4_8_build
- pytorch_linux_trusty_py3_6_gcc5_4_build
- pytorch_linux_trusty_py3_6_gcc5_4_test:
requires:
- pytorch_linux_trusty_py3_6_gcc5_4_build
- pytorch_linux_trusty_py3_6_gcc7_build
- pytorch_linux_trusty_py3_6_gcc7_test:
requires:
- pytorch_linux_trusty_py3_6_gcc7_build
- pytorch_xla_linux_trusty_py3_6_gcc5_4_build:
filters:
branches:
only:
- master
- pytorch_xla_linux_trusty_py3_6_gcc5_4_test:
filters:
branches:
only:
- master
requires:
- pytorch_xla_linux_trusty_py3_6_gcc5_4_build
- pytorch_linux_trusty_pynightly_build
- pytorch_linux_trusty_pynightly_test:
requires:
- pytorch_linux_trusty_pynightly_build
- pytorch_linux_xenial_py3_clang5_asan_build
- pytorch_linux_xenial_py3_clang5_asan_test:
requires:
- pytorch_linux_xenial_py3_clang5_asan_build
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_linux_xenial_cuda8_cudnn7_py3_test:
requires:
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_linux_xenial_cuda8_cudnn7_py3_multigpu_test:
requires:
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_linux_xenial_cuda8_cudnn7_py3_NO_AVX2_test:
requires:
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_linux_xenial_cuda8_cudnn7_py3_NO_AVX_NO_AVX2_test:
requires:
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_short_perf_test_gpu:
requires:
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_doc_push:
requires:
- pytorch_linux_xenial_cuda8_cudnn7_py3_build
- pytorch_linux_xenial_cuda9_cudnn7_py2_build
- pytorch_linux_xenial_cuda9_cudnn7_py2_test:
requires:
- pytorch_linux_xenial_cuda9_cudnn7_py2_build
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
- pytorch_linux_xenial_cuda9_cudnn7_py3_test:
requires:
- pytorch_linux_xenial_cuda9_cudnn7_py3_build
- pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_build
- pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_test:
requires:
- pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_build
- pytorch_linux_xenial_cuda10_cudnn7_py3_gcc7_build

View File

@ -0,0 +1,7 @@
# Pytorch MacOS builds
- pytorch_macos_10_13_py3_build
- pytorch_macos_10_13_py3_test:
requires:
- pytorch_macos_10_13_py3_build
- pytorch_macos_10_13_cuda9_2_cudnn7_py3_build

View File

@ -0,0 +1,12 @@
# Scheduled to run 4 hours after the binary jobs start
update_s3_htmls:
triggers:
- schedule:
cron: "0 9 * * *"
filters:
branches:
only:
- master
jobs:
- update_s3_htmls:
context: org-member

View File

@ -0,0 +1,12 @@
##############################################################################
##############################################################################
# Workflows
##############################################################################
##############################################################################
# PR jobs pr builds
workflows:
version: 2
build:
jobs:

View File

@ -15,6 +15,11 @@ matrix:
- name: "Ensure consistent CircleCI YAML" - name: "Ensure consistent CircleCI YAML"
python: "3.6" python: "3.6"
dist: xenial dist: xenial
install:
- sudo add-apt-repository universe
- sudo apt update
- sudo apt install graphviz
- pip3 install pygraphviz
script: cd .circleci && ./ensure-consistency.sh script: cd .circleci && ./ensure-consistency.sh
- name: "Python 2.7 Lint" - name: "Python 2.7 Lint"
python: "2.7" python: "2.7"