mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 20:27:50 +08:00 
			
		
		
		
	Compare commits
	
		
			81 Commits
		
	
	
		
			v1.12.1-rc
			...
			v0.3.0
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| af3964a872 | |||
| 1645546aa9 | |||
| 350fad8a22 | |||
| 565d183042 | |||
| 2ebda372f6 | |||
| 28b846c486 | |||
| 9622eaa6fa | |||
| db8154df32 | |||
| b6eeea343d | |||
| 1fe9991554 | |||
| 00118024f3 | |||
| 87edf5a349 | |||
| 20972878cc | |||
| 0d1128d25c | |||
| 81dc60493d | |||
| b18df1cedf | |||
| 3976d77509 | |||
| 09c83673bf | |||
| 5b9a8f918e | |||
| f20fb2c1a1 | |||
| 4e00120117 | |||
| 2b3f35daea | |||
| c580437342 | |||
| 455e788fe6 | |||
| c980fb359b | |||
| bae45bb106 | |||
| 34557d80f4 | |||
| 1e77879b2a | |||
| ff52d424b2 | |||
| 4b7aa13b30 | |||
| e1f2d0916e | |||
| 4b5b7e53f6 | |||
| db66fa9436 | |||
| 392c89ab6a | |||
| cddf501fc5 | |||
| d0907d2c34 | |||
| 448a85a8e0 | |||
| ea3138fd09 | |||
| b89c96fe58 | |||
| 088f47bb89 | |||
| ddb3804f87 | |||
| a896311d06 | |||
| 937b634b5d | |||
| 004dfdc7cc | |||
| f8aa5e2ed7 | |||
| 8a49309f81 | |||
| 14de24d89c | |||
| c7cccc250e | |||
| 1f694e9a6e | |||
| 1108bced80 | |||
| c36d452224 | |||
| 11955b86d2 | |||
| 9a6788202b | |||
| d58bad4073 | |||
| f95e252984 | |||
| b49f0f8154 | |||
| 269c25267b | |||
| fde471ee2a | |||
| eb24d2ff6e | |||
| f768068c3b | |||
| c456451915 | |||
| f282d1dc7c | |||
| 2a3cae0f3e | |||
| 3d9630abc2 | |||
| da7a5147db | |||
| 5df8e582cd | |||
| 5dff261598 | |||
| aa0c8920af | |||
| a3b658bf3b | |||
| 94e89f3911 | |||
| f0956ad9ec | |||
| 452ea78f43 | |||
| 3d5d66868e | |||
| cf373e25e2 | |||
| 91d764c781 | |||
| 524235bb71 | |||
| e035fa028b | |||
| 58a928c3b9 | |||
| 4f1eefa8ad | |||
| 4251c151e3 | |||
| c0931a3a4d | 
							
								
								
									
										27
									
								
								.bazelrc
									
									
									
									
									
								
							
							
						
						
									
										27
									
								
								.bazelrc
									
									
									
									
									
								
							| @ -1,27 +0,0 @@ | |||||||
| build --cxxopt=--std=c++14 |  | ||||||
| build --copt=-I. |  | ||||||
| # Bazel does not support including its cc_library targets as system |  | ||||||
| # headers. We work around this for generated code |  | ||||||
| # (e.g. c10/macros/cmake_macros.h) by making the generated directory a |  | ||||||
| # system include path. |  | ||||||
| build --copt=-isystem --copt bazel-out/k8-fastbuild/bin |  | ||||||
| build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin |  | ||||||
| build --experimental_ui_max_stdouterr_bytes=2048576 |  | ||||||
|  |  | ||||||
| # Configuration to disable tty features for environments like CI |  | ||||||
| build:no-tty --curses no |  | ||||||
| build:no-tty --progress_report_interval 10 |  | ||||||
| build:no-tty --show_progress_rate_limit 10 |  | ||||||
|  |  | ||||||
| # Configuration to build with GPU support |  | ||||||
| build:gpu --define=cuda=true |  | ||||||
| # define a separate build folder for faster switching between configs |  | ||||||
| build:gpu --platform_suffix=-gpu |  | ||||||
| # See the note on the config-less build for details about why we are |  | ||||||
| # doing this. We must also do it for the "-gpu" platform suffix. |  | ||||||
| build --copt=-isystem --copt=bazel-out/k8-fastbuild-gpu/bin |  | ||||||
| # rules_cuda configuration |  | ||||||
| build:gpu --@rules_cuda//cuda:enable_cuda |  | ||||||
| build:gpu --@rules_cuda//cuda:cuda_targets=sm_52 |  | ||||||
| build:gpu --@rules_cuda//cuda:compiler=nvcc |  | ||||||
| build:gpu --repo_env=CUDA_PATH=/usr/local/cuda |  | ||||||
| @ -1 +0,0 @@ | |||||||
| 4.2.1 |  | ||||||
| @ -1,15 +0,0 @@ | |||||||
| [buildfile] |  | ||||||
| name = BUILD.buck |  | ||||||
|  |  | ||||||
| [repositories] |  | ||||||
|   bazel_skylib = third_party/bazel-skylib/ |  | ||||||
|  |  | ||||||
| [download] |  | ||||||
|   in_build = true |  | ||||||
|  |  | ||||||
| [cxx] |  | ||||||
|   cxxflags = -std=c++17 |  | ||||||
|   should_remap_host_platform = true |  | ||||||
|  |  | ||||||
| [project] |  | ||||||
|   default_flavors_mode=all |  | ||||||
							
								
								
									
										2
									
								
								.circleci/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.circleci/.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1,2 +0,0 @@ | |||||||
| *.svg |  | ||||||
| *.png |  | ||||||
| @ -1,171 +0,0 @@ | |||||||
| """ |  | ||||||
| This module models the tree of configuration variants |  | ||||||
| for "smoketest" builds. |  | ||||||
|  |  | ||||||
| Each subclass of ConfigNode represents a layer of the configuration hierarchy. |  | ||||||
| These tree nodes encapsulate the logic for whether a branch of the hierarchy |  | ||||||
| should be "pruned". |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| from cimodel.lib.conf_tree import ConfigNode |  | ||||||
| import cimodel.data.dimensions as dimensions |  | ||||||
|  |  | ||||||
|  |  | ||||||
| LINKING_DIMENSIONS = [ |  | ||||||
|     "shared", |  | ||||||
|     "static", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| DEPS_INCLUSION_DIMENSIONS = [ |  | ||||||
|     "with-deps", |  | ||||||
|     "without-deps", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_processor_arch_name(gpu_version): |  | ||||||
|     return "cpu" if not gpu_version else ( |  | ||||||
|         "cu" + gpu_version.strip("cuda") if gpu_version.startswith("cuda") else gpu_version |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
| CONFIG_TREE_DATA = OrderedDict( |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| # GCC config variants: |  | ||||||
| # |  | ||||||
| # All the nightlies (except libtorch with new gcc ABI) are built with devtoolset7, |  | ||||||
| # which can only build with old gcc ABI. It is better than devtoolset3 |  | ||||||
| # because it understands avx512, which is needed for good fbgemm performance. |  | ||||||
| # |  | ||||||
| # Libtorch with new gcc ABI is built with gcc 5.4 on Ubuntu 16.04. |  | ||||||
| LINUX_GCC_CONFIG_VARIANTS = OrderedDict( |  | ||||||
|     manywheel=['devtoolset7'], |  | ||||||
|     conda=['devtoolset7'], |  | ||||||
|     libtorch=[ |  | ||||||
|         "devtoolset7", |  | ||||||
|         "gcc5.4_cxx11-abi", |  | ||||||
|     ], |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| WINDOWS_LIBTORCH_CONFIG_VARIANTS = [ |  | ||||||
|     "debug", |  | ||||||
|     "release", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class TopLevelNode(ConfigNode): |  | ||||||
|     def __init__(self, node_name, config_tree_data, smoke): |  | ||||||
|         super(TopLevelNode, self).__init__(None, node_name) |  | ||||||
|  |  | ||||||
|         self.config_tree_data = config_tree_data |  | ||||||
|         self.props["smoke"] = smoke |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class OSConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, os_name, gpu_versions, py_tree): |  | ||||||
|         super(OSConfigNode, self).__init__(parent, os_name) |  | ||||||
|  |  | ||||||
|         self.py_tree = py_tree |  | ||||||
|         self.props["os_name"] = os_name |  | ||||||
|         self.props["gpu_versions"] = gpu_versions |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [PackageFormatConfigNode(self, k, v) for k, v in self.py_tree.items()] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PackageFormatConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, package_format, python_versions): |  | ||||||
|         super(PackageFormatConfigNode, self).__init__(parent, package_format) |  | ||||||
|  |  | ||||||
|         self.props["python_versions"] = python_versions |  | ||||||
|         self.props["package_format"] = package_format |  | ||||||
|  |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         if self.find_prop("os_name") == "linux": |  | ||||||
|             return [LinuxGccConfigNode(self, v) for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]] |  | ||||||
|         elif self.find_prop("os_name") == "windows" and self.find_prop("package_format") == "libtorch": |  | ||||||
|             return [WindowsLibtorchConfigNode(self, v) for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS] |  | ||||||
|         else: |  | ||||||
|             return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LinuxGccConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, gcc_config_variant): |  | ||||||
|         super(LinuxGccConfigNode, self).__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant)) |  | ||||||
|  |  | ||||||
|         self.props["gcc_config_variant"] = gcc_config_variant |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         gpu_versions = self.find_prop("gpu_versions") |  | ||||||
|  |  | ||||||
|         # XXX devtoolset7 on CUDA 9.0 is temporarily disabled |  | ||||||
|         # see https://github.com/pytorch/pytorch/issues/20066 |  | ||||||
|         if self.find_prop("gcc_config_variant") == 'devtoolset7': |  | ||||||
|             gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions) |  | ||||||
|  |  | ||||||
|         # XXX disabling conda rocm build since docker images are not there |  | ||||||
|         if self.find_prop("package_format") == 'conda': |  | ||||||
|             gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions) |  | ||||||
|  |  | ||||||
|         # XXX libtorch rocm build  is temporarily disabled |  | ||||||
|         if self.find_prop("package_format") == 'libtorch': |  | ||||||
|             gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions) |  | ||||||
|  |  | ||||||
|         return [ArchConfigNode(self, v) for v in gpu_versions] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class WindowsLibtorchConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, libtorch_config_variant): |  | ||||||
|         super(WindowsLibtorchConfigNode, self).__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant)) |  | ||||||
|  |  | ||||||
|         self.props["libtorch_config_variant"] = libtorch_config_variant |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ArchConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, gpu): |  | ||||||
|         super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(gpu)) |  | ||||||
|  |  | ||||||
|         self.props["gpu"] = gpu |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [PyVersionConfigNode(self, v) for v in self.find_prop("python_versions")] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PyVersionConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, pyver): |  | ||||||
|         super(PyVersionConfigNode, self).__init__(parent, pyver) |  | ||||||
|  |  | ||||||
|         self.props["pyver"] = pyver |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         package_format = self.find_prop("package_format") |  | ||||||
|         os_name = self.find_prop("os_name") |  | ||||||
|  |  | ||||||
|         has_libtorch_variants = package_format == "libtorch" and os_name == "linux" |  | ||||||
|         linking_variants = LINKING_DIMENSIONS if has_libtorch_variants else [] |  | ||||||
|  |  | ||||||
|         return [LinkingVariantConfigNode(self, v) for v in linking_variants] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LinkingVariantConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, linking_variant): |  | ||||||
|         super(LinkingVariantConfigNode, self).__init__(parent, linking_variant) |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DependencyInclusionConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, deps_variant): |  | ||||||
|         super(DependencyInclusionConfigNode, self).__init__(parent, deps_variant) |  | ||||||
|  |  | ||||||
|         self.props["libtorch_variant"] = "-".join([self.parent.get_label(), self.get_label()]) |  | ||||||
| @ -1,243 +0,0 @@ | |||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| import cimodel.data.simple.util.branch_filters as branch_filters |  | ||||||
| import cimodel.data.binary_build_data as binary_build_data |  | ||||||
| import cimodel.lib.conf_tree as conf_tree |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
|  |  | ||||||
| class Conf(object): |  | ||||||
|     def __init__(self, os, gpu_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant, libtorch_config_variant): |  | ||||||
|  |  | ||||||
|         self.os = os |  | ||||||
|         self.gpu_version = gpu_version |  | ||||||
|         self.pydistro = pydistro |  | ||||||
|         self.parms = parms |  | ||||||
|         self.smoke = smoke |  | ||||||
|         self.libtorch_variant = libtorch_variant |  | ||||||
|         self.gcc_config_variant = gcc_config_variant |  | ||||||
|         self.libtorch_config_variant = libtorch_config_variant |  | ||||||
|  |  | ||||||
|     def gen_build_env_parms(self): |  | ||||||
|         elems = [self.pydistro] + self.parms + [binary_build_data.get_processor_arch_name(self.gpu_version)] |  | ||||||
|         if self.gcc_config_variant is not None: |  | ||||||
|             elems.append(str(self.gcc_config_variant)) |  | ||||||
|         if self.libtorch_config_variant is not None: |  | ||||||
|             elems.append(str(self.libtorch_config_variant)) |  | ||||||
|         return elems |  | ||||||
|  |  | ||||||
|     def gen_docker_image(self): |  | ||||||
|         if self.gcc_config_variant == 'gcc5.4_cxx11-abi': |  | ||||||
|             if self.gpu_version is None: |  | ||||||
|                 return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu") |  | ||||||
|             else: |  | ||||||
|                 return miniutils.quote( |  | ||||||
|                     f"pytorch/libtorch-cxx11-builder:{self.gpu_version}" |  | ||||||
|                 ) |  | ||||||
|         if self.pydistro == "conda": |  | ||||||
|             if self.gpu_version is None: |  | ||||||
|                 return miniutils.quote("pytorch/conda-builder:cpu") |  | ||||||
|             else: |  | ||||||
|                 return miniutils.quote( |  | ||||||
|                     f"pytorch/conda-builder:{self.gpu_version}" |  | ||||||
|                 ) |  | ||||||
|  |  | ||||||
|         docker_word_substitution = { |  | ||||||
|             "manywheel": "manylinux", |  | ||||||
|             "libtorch": "manylinux", |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) |  | ||||||
|  |  | ||||||
|         # The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image |  | ||||||
|         # TODO cuda images should consolidate into tag-base images similar to rocm |  | ||||||
|         alt_docker_suffix = "cuda102" if not self.gpu_version else ( |  | ||||||
|             "rocm:" + self.gpu_version.strip("rocm") if self.gpu_version.startswith("rocm") else self.gpu_version) |  | ||||||
|         docker_distro_suffix = alt_docker_suffix if self.pydistro != "conda" else ( |  | ||||||
|             "cuda" if alt_docker_suffix.startswith("cuda") else "rocm") |  | ||||||
|         return miniutils.quote("pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix) |  | ||||||
|  |  | ||||||
|     def get_name_prefix(self): |  | ||||||
|         return "smoke" if self.smoke else "binary" |  | ||||||
|  |  | ||||||
|     def gen_build_name(self, build_or_test, nightly): |  | ||||||
|  |  | ||||||
|         parts = [self.get_name_prefix(), self.os] + self.gen_build_env_parms() |  | ||||||
|  |  | ||||||
|         if nightly: |  | ||||||
|             parts.append("nightly") |  | ||||||
|  |  | ||||||
|         if self.libtorch_variant: |  | ||||||
|             parts.append(self.libtorch_variant) |  | ||||||
|  |  | ||||||
|         if not self.smoke: |  | ||||||
|             parts.append(build_or_test) |  | ||||||
|  |  | ||||||
|         joined = "_".join(parts) |  | ||||||
|         return joined.replace(".", "_") |  | ||||||
|  |  | ||||||
|     def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False): |  | ||||||
|         job_def = OrderedDict() |  | ||||||
|         job_def["name"] = self.gen_build_name(phase, nightly) |  | ||||||
|         job_def["build_environment"] = miniutils.quote(" ".join(self.gen_build_env_parms())) |  | ||||||
|         if self.smoke: |  | ||||||
|             job_def["requires"] = [ |  | ||||||
|                 "update_s3_htmls", |  | ||||||
|             ] |  | ||||||
|             job_def["filters"] = branch_filters.gen_filter_dict( |  | ||||||
|                 branches_list=["postnightly"], |  | ||||||
|             ) |  | ||||||
|         else: |  | ||||||
|             filter_branch = r"/.*/" |  | ||||||
|             job_def["filters"] = branch_filters.gen_filter_dict( |  | ||||||
|                 branches_list=[filter_branch], |  | ||||||
|                 tags_list=[branch_filters.RC_PATTERN], |  | ||||||
|             ) |  | ||||||
|         if self.libtorch_variant: |  | ||||||
|             job_def["libtorch_variant"] = miniutils.quote(self.libtorch_variant) |  | ||||||
|         if phase == "test": |  | ||||||
|             if not self.smoke: |  | ||||||
|                 job_def["requires"] = [self.gen_build_name("build", nightly)] |  | ||||||
|             if not (self.smoke and self.os == "macos") and self.os != "windows": |  | ||||||
|                 job_def["docker_image"] = self.gen_docker_image() |  | ||||||
|  |  | ||||||
|             # fix this. only works on cuda not rocm |  | ||||||
|             if self.os != "windows" and self.gpu_version: |  | ||||||
|                 job_def["use_cuda_docker_runtime"] = miniutils.quote("1") |  | ||||||
|         else: |  | ||||||
|             if self.os == "linux" and phase != "upload": |  | ||||||
|                 job_def["docker_image"] = self.gen_docker_image() |  | ||||||
|  |  | ||||||
|         if phase == "test": |  | ||||||
|             if self.gpu_version: |  | ||||||
|                 if self.os == "windows": |  | ||||||
|                     job_def["executor"] = "windows-with-nvidia-gpu" |  | ||||||
|                 else: |  | ||||||
|                     job_def["resource_class"] = "gpu.medium" |  | ||||||
|  |  | ||||||
|         os_name = miniutils.override(self.os, {"macos": "mac"}) |  | ||||||
|         job_name = "_".join([self.get_name_prefix(), os_name, phase]) |  | ||||||
|         return {job_name : job_def} |  | ||||||
|  |  | ||||||
|     def gen_upload_job(self, phase, requires_dependency): |  | ||||||
|         """Generate binary_upload job for configuration |  | ||||||
|  |  | ||||||
|         Output looks similar to: |  | ||||||
|  |  | ||||||
|       - binary_upload: |  | ||||||
|           name: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_upload |  | ||||||
|           context: org-member |  | ||||||
|           requires: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_test |  | ||||||
|           filters: |  | ||||||
|             branches: |  | ||||||
|               only: |  | ||||||
|                 - nightly |  | ||||||
|             tags: |  | ||||||
|               only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/ |  | ||||||
|           package_type: manywheel |  | ||||||
|           upload_subfolder: cu113 |  | ||||||
|         """ |  | ||||||
|         return { |  | ||||||
|             "binary_upload": OrderedDict({ |  | ||||||
|                 "name": self.gen_build_name(phase, nightly=True), |  | ||||||
|                 "context": "org-member", |  | ||||||
|                 "requires": [self.gen_build_name( |  | ||||||
|                     requires_dependency, |  | ||||||
|                     nightly=True |  | ||||||
|                 )], |  | ||||||
|                 "filters": branch_filters.gen_filter_dict( |  | ||||||
|                     branches_list=["nightly"], |  | ||||||
|                     tags_list=[branch_filters.RC_PATTERN], |  | ||||||
|                 ), |  | ||||||
|                 "package_type": self.pydistro, |  | ||||||
|                 "upload_subfolder": binary_build_data.get_processor_arch_name( |  | ||||||
|                     self.gpu_version, |  | ||||||
|                 ), |  | ||||||
|             }) |  | ||||||
|         } |  | ||||||
|  |  | ||||||
| def get_root(smoke, name): |  | ||||||
|  |  | ||||||
|     return binary_build_data.TopLevelNode( |  | ||||||
|         name, |  | ||||||
|         binary_build_data.CONFIG_TREE_DATA, |  | ||||||
|         smoke, |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def gen_build_env_list(smoke): |  | ||||||
|  |  | ||||||
|     root = get_root(smoke, "N/A") |  | ||||||
|     config_list = conf_tree.dfs(root) |  | ||||||
|  |  | ||||||
|     newlist = [] |  | ||||||
|     for c in config_list: |  | ||||||
|         conf = Conf( |  | ||||||
|             c.find_prop("os_name"), |  | ||||||
|             c.find_prop("gpu"), |  | ||||||
|             c.find_prop("package_format"), |  | ||||||
|             [c.find_prop("pyver")], |  | ||||||
|             c.find_prop("smoke") and not (c.find_prop("os_name") == "macos_arm64"),  # don't test arm64 |  | ||||||
|             c.find_prop("libtorch_variant"), |  | ||||||
|             c.find_prop("gcc_config_variant"), |  | ||||||
|             c.find_prop("libtorch_config_variant"), |  | ||||||
|         ) |  | ||||||
|         newlist.append(conf) |  | ||||||
|  |  | ||||||
|     return newlist |  | ||||||
|  |  | ||||||
| def predicate_exclude_macos(config): |  | ||||||
|     return config.os == "linux" or config.os == "windows" |  | ||||||
|  |  | ||||||
| def get_nightly_uploads(): |  | ||||||
|     configs = gen_build_env_list(False) |  | ||||||
|     mylist = [] |  | ||||||
|     for conf in configs: |  | ||||||
|         phase_dependency = "test" if predicate_exclude_macos(conf) else "build" |  | ||||||
|         mylist.append(conf.gen_upload_job("upload", phase_dependency)) |  | ||||||
|  |  | ||||||
|     return mylist |  | ||||||
|  |  | ||||||
| def get_post_upload_jobs(): |  | ||||||
|     return [ |  | ||||||
|         { |  | ||||||
|             "update_s3_htmls": { |  | ||||||
|                 "name": "update_s3_htmls", |  | ||||||
|                 "context": "org-member", |  | ||||||
|                 "filters": branch_filters.gen_filter_dict( |  | ||||||
|                     branches_list=["postnightly"], |  | ||||||
|                 ), |  | ||||||
|             }, |  | ||||||
|         }, |  | ||||||
|     ] |  | ||||||
|  |  | ||||||
| def get_nightly_tests(): |  | ||||||
|  |  | ||||||
|     configs = gen_build_env_list(False) |  | ||||||
|     filtered_configs = filter(predicate_exclude_macos, configs) |  | ||||||
|  |  | ||||||
|     tests = [] |  | ||||||
|     for conf_options in filtered_configs: |  | ||||||
|         yaml_item = conf_options.gen_workflow_job("test", nightly=True) |  | ||||||
|         tests.append(yaml_item) |  | ||||||
|  |  | ||||||
|     return tests |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_jobs(toplevel_key, smoke): |  | ||||||
|     jobs_list = [] |  | ||||||
|     configs = gen_build_env_list(smoke) |  | ||||||
|     phase = "build" if toplevel_key == "binarybuilds" else "test" |  | ||||||
|     for build_config in configs: |  | ||||||
|         # don't test for macos_arm64 as it's cross compiled |  | ||||||
|         if phase != "test" or build_config.os != "macos_arm64": |  | ||||||
|             jobs_list.append(build_config.gen_workflow_job(phase, nightly=True)) |  | ||||||
|  |  | ||||||
|     return jobs_list |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_binary_build_jobs(): |  | ||||||
|     return get_jobs("binarybuilds", False) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_binary_smoke_test_jobs(): |  | ||||||
|     return get_jobs("binarysmoketests", True) |  | ||||||
| @ -1,23 +0,0 @@ | |||||||
| PHASES = ["build", "test"] |  | ||||||
|  |  | ||||||
| CUDA_VERSIONS = [ |  | ||||||
|     "102", |  | ||||||
|     "113", |  | ||||||
|     "116", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| ROCM_VERSIONS = [ |  | ||||||
|     "4.3.1", |  | ||||||
|     "4.5.2", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS] |  | ||||||
|  |  | ||||||
| GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS |  | ||||||
|  |  | ||||||
| STANDARD_PYTHON_VERSIONS = [ |  | ||||||
|     "3.7", |  | ||||||
|     "3.8", |  | ||||||
|     "3.9", |  | ||||||
|     "3.10" |  | ||||||
| ] |  | ||||||
| @ -1,280 +0,0 @@ | |||||||
| from cimodel.lib.conf_tree import ConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| CONFIG_TREE_DATA = [ |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_major_pyver(dotted_version): |  | ||||||
|     parts = dotted_version.split(".") |  | ||||||
|     return "py" + parts[0] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class TreeConfigNode(ConfigNode): |  | ||||||
|     def __init__(self, parent, node_name, subtree): |  | ||||||
|         super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name)) |  | ||||||
|         self.subtree = subtree |  | ||||||
|         self.init2(node_name) |  | ||||||
|  |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return label |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         pass |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [self.child_constructor()(self, k, v) for (k, v) in self.subtree] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class TopLevelNode(TreeConfigNode): |  | ||||||
|     def __init__(self, node_name, subtree): |  | ||||||
|         super(TopLevelNode, self).__init__(None, node_name, subtree) |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return DistroConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class DistroConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["distro_name"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         distro = self.find_prop("distro_name") |  | ||||||
|  |  | ||||||
|         next_nodes = { |  | ||||||
|             "xenial": XenialCompilerConfigNode, |  | ||||||
|             "bionic": BionicCompilerConfigNode, |  | ||||||
|         } |  | ||||||
|         return next_nodes[distro] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class PyVerConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["pyver"] = node_name |  | ||||||
|         self.props["abbreviated_pyver"] = get_major_pyver(node_name) |  | ||||||
|         if node_name == "3.9": |  | ||||||
|             self.props["abbreviated_pyver"] = "py3.9" |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ExperimentalFeatureConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ExperimentalFeatureConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["experimental_feature"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         experimental_feature = self.find_prop("experimental_feature") |  | ||||||
|  |  | ||||||
|         next_nodes = { |  | ||||||
|             "asan": AsanConfigNode, |  | ||||||
|             "xla": XlaConfigNode, |  | ||||||
|             "mps": MPSConfigNode, |  | ||||||
|             "vulkan": VulkanConfigNode, |  | ||||||
|             "parallel_tbb": ParallelTBBConfigNode, |  | ||||||
|             "crossref": CrossRefConfigNode, |  | ||||||
|             "parallel_native": ParallelNativeConfigNode, |  | ||||||
|             "onnx": ONNXConfigNode, |  | ||||||
|             "libtorch": LibTorchConfigNode, |  | ||||||
|             "important": ImportantConfigNode, |  | ||||||
|             "build_only": BuildOnlyConfigNode, |  | ||||||
|             "shard_test": ShardTestConfigNode, |  | ||||||
|             "cuda_gcc_override": CudaGccOverrideConfigNode, |  | ||||||
|             "pure_torch": PureTorchConfigNode, |  | ||||||
|             "slow_gradcheck": SlowGradcheckConfigNode, |  | ||||||
|         } |  | ||||||
|         return next_nodes[experimental_feature] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class SlowGradcheckConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_slow_gradcheck"] = True |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ExperimentalFeatureConfigNode |  | ||||||
|  |  | ||||||
| class PureTorchConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "PURE_TORCH=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_pure_torch"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class XlaConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "XLA=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_xla"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
| class MPSConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "MPS=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_mps"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class AsanConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "Asan=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_asan"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ExperimentalFeatureConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ONNXConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "Onnx=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_onnx"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class VulkanConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "Vulkan=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_vulkan"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ParallelTBBConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "PARALLELTBB=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["parallel_backend"] = "paralleltbb" |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CrossRefConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_crossref"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ParallelNativeConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "PARALLELNATIVE=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["parallel_backend"] = "parallelnative" |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class LibTorchConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "BUILD_TEST_LIBTORCH=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_libtorch"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ExperimentalFeatureConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CudaGccOverrideConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["cuda_gcc_override"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ExperimentalFeatureConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class BuildOnlyConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["build_only"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ExperimentalFeatureConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ShardTestConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["shard_test"] = node_name |  | ||||||
|  |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return ImportantConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ImportantConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return "IMPORTANT=" + str(label) |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["is_important"] = node_name |  | ||||||
|  |  | ||||||
|     def get_children(self): |  | ||||||
|         return [] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class XenialCompilerConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return label or "<unspecified>" |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["compiler_name"] = node_name |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def child_constructor(self): |  | ||||||
|  |  | ||||||
|         return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class BionicCompilerConfigNode(TreeConfigNode): |  | ||||||
|     def modify_label(self, label): |  | ||||||
|         return label or "<unspecified>" |  | ||||||
|  |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["compiler_name"] = node_name |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def child_constructor(self): |  | ||||||
|  |  | ||||||
|         return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class XenialCompilerVersionConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["compiler_version"] = node_name |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return PyVerConfigNode |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class BionicCompilerVersionConfigNode(TreeConfigNode): |  | ||||||
|     def init2(self, node_name): |  | ||||||
|         self.props["compiler_version"] = node_name |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def child_constructor(self): |  | ||||||
|         return PyVerConfigNode |  | ||||||
| @ -1,380 +0,0 @@ | |||||||
| from collections import OrderedDict |  | ||||||
| from dataclasses import dataclass, field |  | ||||||
| from typing import List, Optional |  | ||||||
|  |  | ||||||
| import cimodel.data.dimensions as dimensions |  | ||||||
| import cimodel.lib.conf_tree as conf_tree |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
| from cimodel.data.pytorch_build_data import CONFIG_TREE_DATA, TopLevelNode |  | ||||||
| from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN |  | ||||||
| from cimodel.data.simple.util.docker_constants import gen_docker_image |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class Conf: |  | ||||||
|     distro: str |  | ||||||
|     parms: List[str] |  | ||||||
|     parms_list_ignored_for_docker_image: Optional[List[str]] = None |  | ||||||
|     pyver: Optional[str] = None |  | ||||||
|     cuda_version: Optional[str] = None |  | ||||||
|     rocm_version: Optional[str] = None |  | ||||||
|     # TODO expand this to cover all the USE_* that we want to test for |  | ||||||
|     #  tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc. |  | ||||||
|     # (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608) |  | ||||||
|     is_xla: bool = False |  | ||||||
|     is_vulkan: bool = False |  | ||||||
|     is_pure_torch: bool = False |  | ||||||
|     restrict_phases: Optional[List[str]] = None |  | ||||||
|     gpu_resource: Optional[str] = None |  | ||||||
|     dependent_tests: List = field(default_factory=list) |  | ||||||
|     parent_build: Optional["Conf"] = None |  | ||||||
|     is_libtorch: bool = False |  | ||||||
|     is_important: bool = False |  | ||||||
|     parallel_backend: Optional[str] = None |  | ||||||
|     build_only: bool = False |  | ||||||
|  |  | ||||||
|     @staticmethod |  | ||||||
|     def is_test_phase(phase): |  | ||||||
|         return "test" in phase |  | ||||||
|  |  | ||||||
|     # TODO: Eliminate the special casing for docker paths |  | ||||||
|     # In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch |  | ||||||
|     def get_parms(self, for_docker): |  | ||||||
|         leading = [] |  | ||||||
|         # We just don't run non-important jobs on pull requests; |  | ||||||
|         # previously we also named them in a way to make it obvious |  | ||||||
|         # if self.is_important and not for_docker: |  | ||||||
|         #    leading.append("AAA") |  | ||||||
|         leading.append("pytorch") |  | ||||||
|         if self.is_xla and not for_docker: |  | ||||||
|             leading.append("xla") |  | ||||||
|         if self.is_vulkan and not for_docker: |  | ||||||
|             leading.append("vulkan") |  | ||||||
|         if self.is_libtorch and not for_docker: |  | ||||||
|             leading.append("libtorch") |  | ||||||
|         if self.is_pure_torch and not for_docker: |  | ||||||
|             leading.append("pure_torch") |  | ||||||
|         if self.parallel_backend is not None and not for_docker: |  | ||||||
|             leading.append(self.parallel_backend) |  | ||||||
|  |  | ||||||
|         cuda_parms = [] |  | ||||||
|         if self.cuda_version: |  | ||||||
|             cudnn = "cudnn8" if self.cuda_version.startswith("11.") else "cudnn7" |  | ||||||
|             cuda_parms.extend(["cuda" + self.cuda_version, cudnn]) |  | ||||||
|         if self.rocm_version: |  | ||||||
|             cuda_parms.extend([f"rocm{self.rocm_version}"]) |  | ||||||
|         result = leading + ["linux", self.distro] + cuda_parms + self.parms |  | ||||||
|         if not for_docker and self.parms_list_ignored_for_docker_image is not None: |  | ||||||
|             result = result + self.parms_list_ignored_for_docker_image |  | ||||||
|         return result |  | ||||||
|  |  | ||||||
|     def gen_docker_image_path(self): |  | ||||||
|         parms_source = self.parent_build or self |  | ||||||
|         base_build_env_name = "-".join(parms_source.get_parms(True)) |  | ||||||
|         image_name, _ = gen_docker_image(base_build_env_name) |  | ||||||
|         return miniutils.quote(image_name) |  | ||||||
|  |  | ||||||
|     def gen_docker_image_requires(self): |  | ||||||
|         parms_source = self.parent_build or self |  | ||||||
|         base_build_env_name = "-".join(parms_source.get_parms(True)) |  | ||||||
|         _, requires = gen_docker_image(base_build_env_name) |  | ||||||
|         return miniutils.quote(requires) |  | ||||||
|  |  | ||||||
|     def get_build_job_name_pieces(self, build_or_test): |  | ||||||
|         return self.get_parms(False) + [build_or_test] |  | ||||||
|  |  | ||||||
|     def gen_build_name(self, build_or_test): |  | ||||||
|         return ( |  | ||||||
|             ("_".join(map(str, self.get_build_job_name_pieces(build_or_test)))) |  | ||||||
|             .replace(".", "_") |  | ||||||
|             .replace("-", "_") |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|     def get_dependents(self): |  | ||||||
|         return self.dependent_tests or [] |  | ||||||
|  |  | ||||||
|     def gen_workflow_params(self, phase): |  | ||||||
|         parameters = OrderedDict() |  | ||||||
|         build_job_name_pieces = self.get_build_job_name_pieces(phase) |  | ||||||
|  |  | ||||||
|         build_env_name = "-".join(map(str, build_job_name_pieces)) |  | ||||||
|         parameters["build_environment"] = miniutils.quote(build_env_name) |  | ||||||
|         parameters["docker_image"] = self.gen_docker_image_path() |  | ||||||
|         if Conf.is_test_phase(phase) and self.gpu_resource: |  | ||||||
|             parameters["use_cuda_docker_runtime"] = miniutils.quote("1") |  | ||||||
|         if Conf.is_test_phase(phase): |  | ||||||
|             resource_class = "large" |  | ||||||
|             if self.gpu_resource: |  | ||||||
|                 resource_class = "gpu." + self.gpu_resource |  | ||||||
|             if self.rocm_version is not None: |  | ||||||
|                 resource_class = "pytorch/amd-gpu" |  | ||||||
|             parameters["resource_class"] = resource_class |  | ||||||
|         if phase == "build" and self.rocm_version is not None: |  | ||||||
|             parameters["resource_class"] = "xlarge" |  | ||||||
|         if hasattr(self, 'filters'): |  | ||||||
|             parameters['filters'] = self.filters |  | ||||||
|         if self.build_only: |  | ||||||
|             parameters['build_only'] = miniutils.quote(str(int(True))) |  | ||||||
|         return parameters |  | ||||||
|  |  | ||||||
|     def gen_workflow_job(self, phase): |  | ||||||
|         job_def = OrderedDict() |  | ||||||
|         job_def["name"] = self.gen_build_name(phase) |  | ||||||
|  |  | ||||||
|         if Conf.is_test_phase(phase): |  | ||||||
|  |  | ||||||
|             # TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a |  | ||||||
|             #  caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated |  | ||||||
|             #  build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed |  | ||||||
|             #  pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641) |  | ||||||
|  |  | ||||||
|             dependency_build = self.parent_build or self |  | ||||||
|             job_def["requires"] = [dependency_build.gen_build_name("build")] |  | ||||||
|             job_name = "pytorch_linux_test" |  | ||||||
|         else: |  | ||||||
|             job_name = "pytorch_linux_build" |  | ||||||
|             job_def["requires"] = [self.gen_docker_image_requires()] |  | ||||||
|  |  | ||||||
|         if not self.is_important: |  | ||||||
|             job_def["filters"] = gen_filter_dict() |  | ||||||
|         job_def.update(self.gen_workflow_params(phase)) |  | ||||||
|  |  | ||||||
|         return {job_name: job_def} |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # TODO This is a hack to special case some configs just for the workflow list |  | ||||||
| class HiddenConf(object): |  | ||||||
|     def __init__(self, name, parent_build=None, filters=None): |  | ||||||
|         self.name = name |  | ||||||
|         self.parent_build = parent_build |  | ||||||
|         self.filters = filters |  | ||||||
|  |  | ||||||
|     def gen_workflow_job(self, phase): |  | ||||||
|         return { |  | ||||||
|             self.gen_build_name(phase): { |  | ||||||
|                 "requires": [self.parent_build.gen_build_name("build")], |  | ||||||
|                 "filters": self.filters, |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|     def gen_build_name(self, _): |  | ||||||
|         return self.name |  | ||||||
|  |  | ||||||
| class DocPushConf(object): |  | ||||||
|     def __init__(self, name, parent_build=None, branch="master"): |  | ||||||
|         self.name = name |  | ||||||
|         self.parent_build = parent_build |  | ||||||
|         self.branch = branch |  | ||||||
|  |  | ||||||
|     def gen_workflow_job(self, phase): |  | ||||||
|         return { |  | ||||||
|             "pytorch_doc_push": { |  | ||||||
|                 "name": self.name, |  | ||||||
|                 "branch": self.branch, |  | ||||||
|                 "requires": [self.parent_build], |  | ||||||
|                 "context": "org-member", |  | ||||||
|                 "filters": gen_filter_dict(branches_list=["nightly"], |  | ||||||
|                                            tags_list=RC_PATTERN) |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
|  |  | ||||||
| def gen_docs_configs(xenial_parent_config): |  | ||||||
|     configs = [] |  | ||||||
|  |  | ||||||
|     configs.append( |  | ||||||
|         HiddenConf( |  | ||||||
|             "pytorch_python_doc_build", |  | ||||||
|             parent_build=xenial_parent_config, |  | ||||||
|             filters=gen_filter_dict(branches_list=["master", "main", "nightly"], |  | ||||||
|                                     tags_list=RC_PATTERN), |  | ||||||
|         ) |  | ||||||
|     ) |  | ||||||
|     configs.append( |  | ||||||
|         DocPushConf( |  | ||||||
|             "pytorch_python_doc_push", |  | ||||||
|             parent_build="pytorch_python_doc_build", |  | ||||||
|             branch="site", |  | ||||||
|         ) |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|     configs.append( |  | ||||||
|         HiddenConf( |  | ||||||
|             "pytorch_cpp_doc_build", |  | ||||||
|             parent_build=xenial_parent_config, |  | ||||||
|             filters=gen_filter_dict(branches_list=["master", "main", "nightly"], |  | ||||||
|                                     tags_list=RC_PATTERN), |  | ||||||
|         ) |  | ||||||
|     ) |  | ||||||
|     configs.append( |  | ||||||
|         DocPushConf( |  | ||||||
|             "pytorch_cpp_doc_push", |  | ||||||
|             parent_build="pytorch_cpp_doc_build", |  | ||||||
|             branch="master", |  | ||||||
|         ) |  | ||||||
|     ) |  | ||||||
|     return configs |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_root(): |  | ||||||
|     return TopLevelNode("PyTorch Builds", CONFIG_TREE_DATA) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def gen_tree(): |  | ||||||
|     root = get_root() |  | ||||||
|     configs_list = conf_tree.dfs(root) |  | ||||||
|     return configs_list |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def instantiate_configs(only_slow_gradcheck): |  | ||||||
|  |  | ||||||
|     config_list = [] |  | ||||||
|  |  | ||||||
|     root = get_root() |  | ||||||
|     found_configs = conf_tree.dfs(root) |  | ||||||
|     for fc in found_configs: |  | ||||||
|  |  | ||||||
|         restrict_phases = None |  | ||||||
|         distro_name = fc.find_prop("distro_name") |  | ||||||
|         compiler_name = fc.find_prop("compiler_name") |  | ||||||
|         compiler_version = fc.find_prop("compiler_version") |  | ||||||
|         is_xla = fc.find_prop("is_xla") or False |  | ||||||
|         is_asan = fc.find_prop("is_asan") or False |  | ||||||
|         is_crossref = fc.find_prop("is_crossref") or False |  | ||||||
|         is_onnx = fc.find_prop("is_onnx") or False |  | ||||||
|         is_pure_torch = fc.find_prop("is_pure_torch") or False |  | ||||||
|         is_vulkan = fc.find_prop("is_vulkan") or False |  | ||||||
|         is_slow_gradcheck = fc.find_prop("is_slow_gradcheck") or False |  | ||||||
|         parms_list_ignored_for_docker_image = [] |  | ||||||
|  |  | ||||||
|         if only_slow_gradcheck ^ is_slow_gradcheck: |  | ||||||
|             continue |  | ||||||
|  |  | ||||||
|         python_version = None |  | ||||||
|         if compiler_name == "cuda" or compiler_name == "android": |  | ||||||
|             python_version = fc.find_prop("pyver") |  | ||||||
|             parms_list = [fc.find_prop("abbreviated_pyver")] |  | ||||||
|         else: |  | ||||||
|             parms_list = ["py" + fc.find_prop("pyver")] |  | ||||||
|  |  | ||||||
|         cuda_version = None |  | ||||||
|         rocm_version = None |  | ||||||
|         if compiler_name == "cuda": |  | ||||||
|             cuda_version = fc.find_prop("compiler_version") |  | ||||||
|  |  | ||||||
|         elif compiler_name == "rocm": |  | ||||||
|             rocm_version = fc.find_prop("compiler_version") |  | ||||||
|             restrict_phases = ["build", "test1", "test2", "caffe2_test"] |  | ||||||
|  |  | ||||||
|         elif compiler_name == "android": |  | ||||||
|             android_ndk_version = fc.find_prop("compiler_version") |  | ||||||
|             # TODO: do we need clang to compile host binaries like protoc? |  | ||||||
|             parms_list.append("clang5") |  | ||||||
|             parms_list.append("android-ndk-" + android_ndk_version) |  | ||||||
|             android_abi = fc.find_prop("android_abi") |  | ||||||
|             parms_list_ignored_for_docker_image.append(android_abi) |  | ||||||
|             restrict_phases = ["build"] |  | ||||||
|  |  | ||||||
|         elif compiler_name: |  | ||||||
|             gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") |  | ||||||
|             parms_list.append(gcc_version) |  | ||||||
|  |  | ||||||
|         if is_asan: |  | ||||||
|             parms_list.append("asan") |  | ||||||
|             python_version = fc.find_prop("pyver") |  | ||||||
|             parms_list[0] = fc.find_prop("abbreviated_pyver") |  | ||||||
|  |  | ||||||
|         if is_crossref: |  | ||||||
|             parms_list_ignored_for_docker_image.append("crossref") |  | ||||||
|  |  | ||||||
|         if is_onnx: |  | ||||||
|             parms_list.append("onnx") |  | ||||||
|             python_version = fc.find_prop("pyver") |  | ||||||
|             parms_list[0] = fc.find_prop("abbreviated_pyver") |  | ||||||
|             restrict_phases = ["build", "ort_test1", "ort_test2"] |  | ||||||
|  |  | ||||||
|         if cuda_version: |  | ||||||
|             cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7" |  | ||||||
|             parms_list.append(cuda_gcc_version) |  | ||||||
|  |  | ||||||
|         is_libtorch = fc.find_prop("is_libtorch") or False |  | ||||||
|         is_important = fc.find_prop("is_important") or False |  | ||||||
|         parallel_backend = fc.find_prop("parallel_backend") or None |  | ||||||
|         build_only = fc.find_prop("build_only") or False |  | ||||||
|         shard_test = fc.find_prop("shard_test") or False |  | ||||||
|         # TODO: fix pure_torch python test packaging issue. |  | ||||||
|         if shard_test: |  | ||||||
|             restrict_phases = ["build"] if restrict_phases is None else restrict_phases |  | ||||||
|             restrict_phases.extend(["test1", "test2"]) |  | ||||||
|         if build_only or is_pure_torch: |  | ||||||
|             restrict_phases = ["build"] |  | ||||||
|  |  | ||||||
|         if is_slow_gradcheck: |  | ||||||
|             parms_list_ignored_for_docker_image.append("old") |  | ||||||
|             parms_list_ignored_for_docker_image.append("gradcheck") |  | ||||||
|  |  | ||||||
|         gpu_resource = None |  | ||||||
|         if cuda_version and cuda_version != "10": |  | ||||||
|             gpu_resource = "medium" |  | ||||||
|  |  | ||||||
|         c = Conf( |  | ||||||
|             distro_name, |  | ||||||
|             parms_list, |  | ||||||
|             parms_list_ignored_for_docker_image, |  | ||||||
|             python_version, |  | ||||||
|             cuda_version, |  | ||||||
|             rocm_version, |  | ||||||
|             is_xla, |  | ||||||
|             is_vulkan, |  | ||||||
|             is_pure_torch, |  | ||||||
|             restrict_phases, |  | ||||||
|             gpu_resource, |  | ||||||
|             is_libtorch=is_libtorch, |  | ||||||
|             is_important=is_important, |  | ||||||
|             parallel_backend=parallel_backend, |  | ||||||
|             build_only=build_only, |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|         # run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds |  | ||||||
|         # should run on a CPU-only build that runs on all PRs. |  | ||||||
|         # XXX should this be updated to a more modern build? |  | ||||||
|         if ( |  | ||||||
|             distro_name == "xenial" |  | ||||||
|             and fc.find_prop("pyver") == "3.7" |  | ||||||
|             and cuda_version is None |  | ||||||
|             and parallel_backend is None |  | ||||||
|             and not is_vulkan |  | ||||||
|             and not is_pure_torch |  | ||||||
|             and compiler_name == "gcc" |  | ||||||
|             and fc.find_prop("compiler_version") == "5.4" |  | ||||||
|         ): |  | ||||||
|             c.filters = gen_filter_dict(branches_list=r"/.*/", |  | ||||||
|                                         tags_list=RC_PATTERN) |  | ||||||
|             c.dependent_tests = gen_docs_configs(c) |  | ||||||
|  |  | ||||||
|         config_list.append(c) |  | ||||||
|  |  | ||||||
|     return config_list |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(only_slow_gradcheck=False): |  | ||||||
|  |  | ||||||
|     config_list = instantiate_configs(only_slow_gradcheck) |  | ||||||
|  |  | ||||||
|     x = [] |  | ||||||
|     for conf_options in config_list: |  | ||||||
|  |  | ||||||
|         phases = conf_options.restrict_phases or dimensions.PHASES |  | ||||||
|  |  | ||||||
|         for phase in phases: |  | ||||||
|  |  | ||||||
|             # TODO why does this not have a test? |  | ||||||
|             if Conf.is_test_phase(phase) and conf_options.cuda_version == "10": |  | ||||||
|                 continue |  | ||||||
|  |  | ||||||
|             x.append(conf_options.gen_workflow_job(phase)) |  | ||||||
|  |  | ||||||
|         # TODO convert to recursion |  | ||||||
|         for conf in conf_options.get_dependents(): |  | ||||||
|             x.append(conf.gen_workflow_job("test")) |  | ||||||
|  |  | ||||||
|     return x |  | ||||||
| @ -1,28 +0,0 @@ | |||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| from cimodel.data.simple.util.branch_filters import gen_filter_dict |  | ||||||
| from cimodel.lib.miniutils import quote |  | ||||||
|  |  | ||||||
|  |  | ||||||
| CHANNELS_TO_PRUNE = ["pytorch-nightly", "pytorch-test"] |  | ||||||
| PACKAGES_TO_PRUNE = "pytorch torchvision torchaudio torchtext ignite torchcsprng" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def gen_workflow_job(channel: str): |  | ||||||
|     return OrderedDict( |  | ||||||
|         { |  | ||||||
|             "anaconda_prune": OrderedDict( |  | ||||||
|                 { |  | ||||||
|                     "name": f"anaconda-prune-{channel}", |  | ||||||
|                     "context": quote("org-member"), |  | ||||||
|                     "packages": quote(PACKAGES_TO_PRUNE), |  | ||||||
|                     "channel": channel, |  | ||||||
|                     "filters": gen_filter_dict(branches_list=["postnightly"]), |  | ||||||
|                 } |  | ||||||
|             ) |  | ||||||
|         } |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(): |  | ||||||
|     return [gen_workflow_job(channel) for channel in CHANNELS_TO_PRUNE] |  | ||||||
| @ -1,39 +0,0 @@ | |||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| from cimodel.lib.miniutils import quote |  | ||||||
| from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # NOTE: All hardcoded docker image builds have been migrated to GHA |  | ||||||
| IMAGE_NAMES = [ |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| # This entry should be an element from the list above |  | ||||||
| # This should contain the image matching the "slow_gradcheck" entry in |  | ||||||
| # pytorch_build_data.py |  | ||||||
| SLOW_GRADCHECK_IMAGE_NAME = "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7" |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False): |  | ||||||
|     """Generates a list of docker image build definitions""" |  | ||||||
|     ret = [] |  | ||||||
|     for image_name in images: |  | ||||||
|         if image_name.startswith('docker-'): |  | ||||||
|             image_name = image_name.lstrip('docker-') |  | ||||||
|         if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME: |  | ||||||
|             continue |  | ||||||
|  |  | ||||||
|         parameters = OrderedDict({ |  | ||||||
|             "name": quote(f"docker-{image_name}"), |  | ||||||
|             "image_name": quote(image_name), |  | ||||||
|         }) |  | ||||||
|         if image_name == "pytorch-linux-xenial-py3.7-gcc5.4": |  | ||||||
|             # pushing documentation on tags requires CircleCI to also |  | ||||||
|             # build all the dependencies on tags, including this docker image |  | ||||||
|             parameters['filters'] = gen_filter_dict(branches_list=r"/.*/", |  | ||||||
|                                                     tags_list=RC_PATTERN) |  | ||||||
|         ret.append(OrderedDict( |  | ||||||
|             { |  | ||||||
|                 "docker_build_job": parameters |  | ||||||
|             } |  | ||||||
|         )) |  | ||||||
|     return ret |  | ||||||
| @ -1,88 +0,0 @@ | |||||||
| from cimodel.data.simple.util.versions import MultiPartVersion |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
|  |  | ||||||
| XCODE_VERSION = MultiPartVersion([12, 5, 1]) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ArchVariant: |  | ||||||
|     def __init__(self, name, custom_build_name=""): |  | ||||||
|         self.name = name |  | ||||||
|         self.custom_build_name = custom_build_name |  | ||||||
|  |  | ||||||
|     def render(self): |  | ||||||
|         extra_parts = [self.custom_build_name] if len(self.custom_build_name) > 0 else [] |  | ||||||
|         return "_".join([self.name] + extra_parts) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_platform(arch_variant_name): |  | ||||||
|     return "SIMULATOR" if arch_variant_name == "x86_64" else "OS" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class IOSJob: |  | ||||||
|     def __init__(self, xcode_version, arch_variant, is_org_member_context=True, extra_props=None): |  | ||||||
|         self.xcode_version = xcode_version |  | ||||||
|         self.arch_variant = arch_variant |  | ||||||
|         self.is_org_member_context = is_org_member_context |  | ||||||
|         self.extra_props = extra_props |  | ||||||
|  |  | ||||||
|     def gen_name_parts(self, with_version_dots): |  | ||||||
|  |  | ||||||
|         version_parts = self.xcode_version.render_dots_or_parts(with_version_dots) |  | ||||||
|         build_variant_suffix = "_".join([self.arch_variant.render(), "build"]) |  | ||||||
|  |  | ||||||
|         return [ |  | ||||||
|             "pytorch", |  | ||||||
|             "ios", |  | ||||||
|         ] + version_parts + [ |  | ||||||
|             build_variant_suffix, |  | ||||||
|         ] |  | ||||||
|  |  | ||||||
|     def gen_job_name(self): |  | ||||||
|         return "_".join(self.gen_name_parts(False)) |  | ||||||
|  |  | ||||||
|     def gen_tree(self): |  | ||||||
|  |  | ||||||
|         platform_name = get_platform(self.arch_variant.name) |  | ||||||
|  |  | ||||||
|         props_dict = { |  | ||||||
|             "build_environment": "-".join(self.gen_name_parts(True)), |  | ||||||
|             "ios_arch": self.arch_variant.name, |  | ||||||
|             "ios_platform": platform_name, |  | ||||||
|             "name": self.gen_job_name(), |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         if self.is_org_member_context: |  | ||||||
|             props_dict["context"] = "org-member" |  | ||||||
|  |  | ||||||
|         if self.extra_props: |  | ||||||
|             props_dict.update(self.extra_props) |  | ||||||
|  |  | ||||||
|         return [{"pytorch_ios_build": props_dict}] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| WORKFLOW_DATA = [ |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False, extra_props={ |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(True)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("x86_64", "full_jit"), is_org_member_context=False, extra_props={ |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(False)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={ |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(True)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={ |  | ||||||
|         "use_metal": miniutils.quote(str(int(True))), |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(True)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("arm64", "full_jit"), extra_props={ |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(False)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={ |  | ||||||
|         "op_list": "mobilenetv2.yaml", |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(True)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("x86_64", "coreml"), is_org_member_context=False, extra_props={ |  | ||||||
|         "use_coreml": miniutils.quote(str(int(True))), |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(True)))}), |  | ||||||
|     IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={ |  | ||||||
|         "use_coreml": miniutils.quote(str(int(True))), |  | ||||||
|         "lite_interpreter": miniutils.quote(str(int(True)))}), |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(): |  | ||||||
|     return [item.gen_tree() for item in WORKFLOW_DATA] |  | ||||||
| @ -1,52 +0,0 @@ | |||||||
| class MacOsJob: |  | ||||||
|     def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()): |  | ||||||
|         # extra_props is tuple type, because mutable data structures for argument defaults |  | ||||||
|         # is not recommended. |  | ||||||
|         self.os_version = os_version |  | ||||||
|         self.is_build = is_build |  | ||||||
|         self.is_test = is_test |  | ||||||
|         self.extra_props = dict(extra_props) |  | ||||||
|  |  | ||||||
|     def gen_tree(self): |  | ||||||
|         non_phase_parts = ["pytorch", "macos", self.os_version, "py3"] |  | ||||||
|  |  | ||||||
|         extra_name_list = [name for name, exist in self.extra_props.items() if exist] |  | ||||||
|         full_job_name_list = non_phase_parts + extra_name_list + [ |  | ||||||
|             'build' if self.is_build else None, |  | ||||||
|             'test' if self.is_test else None, |  | ||||||
|         ] |  | ||||||
|  |  | ||||||
|         full_job_name = "_".join(list(filter(None, full_job_name_list))) |  | ||||||
|  |  | ||||||
|         test_build_dependency = "_".join(non_phase_parts + ["build"]) |  | ||||||
|         extra_dependencies = [test_build_dependency] if self.is_test else [] |  | ||||||
|         job_dependencies = extra_dependencies |  | ||||||
|  |  | ||||||
|         # Yes we name the job after itself, it needs a non-empty value in here |  | ||||||
|         # for the YAML output to work. |  | ||||||
|         props_dict = {"requires": job_dependencies, "name": full_job_name} |  | ||||||
|  |  | ||||||
|         return [{full_job_name: props_dict}] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| WORKFLOW_DATA = [ |  | ||||||
|     MacOsJob("10_15", is_build=True), |  | ||||||
|     MacOsJob("10_13", is_build=True), |  | ||||||
|     MacOsJob( |  | ||||||
|         "10_13", |  | ||||||
|         is_build=False, |  | ||||||
|         is_test=True, |  | ||||||
|     ), |  | ||||||
|     MacOsJob( |  | ||||||
|         "10_13", |  | ||||||
|         is_build=True, |  | ||||||
|         is_test=True, |  | ||||||
|         extra_props=tuple({ |  | ||||||
|             "lite_interpreter": True |  | ||||||
|         }.items()), |  | ||||||
|     ) |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(): |  | ||||||
|     return [item.gen_tree() for item in WORKFLOW_DATA] |  | ||||||
| @ -1,53 +0,0 @@ | |||||||
| """ |  | ||||||
| PyTorch Mobile PR builds (use linux host toolchain + mobile build options) |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
| import cimodel.data.simple.util.branch_filters |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class MobileJob: |  | ||||||
|     def __init__( |  | ||||||
|             self, |  | ||||||
|             docker_image, |  | ||||||
|             docker_requires, |  | ||||||
|             variant_parts, |  | ||||||
|             is_master_only=False): |  | ||||||
|         self.docker_image = docker_image |  | ||||||
|         self.docker_requires = docker_requires |  | ||||||
|         self.variant_parts = variant_parts |  | ||||||
|         self.is_master_only = is_master_only |  | ||||||
|  |  | ||||||
|     def gen_tree(self): |  | ||||||
|         non_phase_parts = [ |  | ||||||
|             "pytorch", |  | ||||||
|             "linux", |  | ||||||
|             "xenial", |  | ||||||
|             "py3", |  | ||||||
|             "clang5", |  | ||||||
|             "mobile", |  | ||||||
|         ] + self.variant_parts |  | ||||||
|  |  | ||||||
|         full_job_name = "_".join(non_phase_parts) |  | ||||||
|         build_env_name = "-".join(non_phase_parts) |  | ||||||
|  |  | ||||||
|         props_dict = { |  | ||||||
|             "build_environment": build_env_name, |  | ||||||
|             "build_only": miniutils.quote(str(int(True))), |  | ||||||
|             "docker_image": self.docker_image, |  | ||||||
|             "requires": self.docker_requires, |  | ||||||
|             "name": full_job_name, |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         if self.is_master_only: |  | ||||||
|             props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict() |  | ||||||
|  |  | ||||||
|         return [{"pytorch_linux_build": props_dict}] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| WORKFLOW_DATA = [ |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(): |  | ||||||
|     return [item.gen_tree() for item in WORKFLOW_DATA] |  | ||||||
| @ -1,85 +0,0 @@ | |||||||
| import cimodel.data.simple.ios_definitions as ios_definitions |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class IOSNightlyJob: |  | ||||||
|     def __init__(self, |  | ||||||
|                  variant, |  | ||||||
|                  is_full_jit=False, |  | ||||||
|                  is_upload=False): |  | ||||||
|  |  | ||||||
|         self.variant = variant |  | ||||||
|         self.is_full_jit = is_full_jit |  | ||||||
|         self.is_upload = is_upload |  | ||||||
|  |  | ||||||
|     def get_phase_name(self): |  | ||||||
|         return "upload" if self.is_upload else "build" |  | ||||||
|  |  | ||||||
|     def get_common_name_pieces(self, with_version_dots): |  | ||||||
|  |  | ||||||
|         extra_name_suffix = [self.get_phase_name()] if self.is_upload else [] |  | ||||||
|  |  | ||||||
|         extra_name = ["full_jit"] if self.is_full_jit else [] |  | ||||||
|  |  | ||||||
|         common_name_pieces = [ |  | ||||||
|             "ios", |  | ||||||
|         ] + extra_name + [ |  | ||||||
|         ] + ios_definitions.XCODE_VERSION.render_dots_or_parts(with_version_dots) + [ |  | ||||||
|             "nightly", |  | ||||||
|             self.variant, |  | ||||||
|             "build", |  | ||||||
|         ] + extra_name_suffix |  | ||||||
|  |  | ||||||
|         return common_name_pieces |  | ||||||
|  |  | ||||||
|     def gen_job_name(self): |  | ||||||
|         return "_".join(["pytorch"] + self.get_common_name_pieces(False)) |  | ||||||
|  |  | ||||||
|     def gen_tree(self): |  | ||||||
|         build_configs = BUILD_CONFIGS_FULL_JIT if self.is_full_jit else BUILD_CONFIGS |  | ||||||
|         extra_requires = [x.gen_job_name() for x in build_configs] if self.is_upload else [] |  | ||||||
|  |  | ||||||
|         props_dict = { |  | ||||||
|             "build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(True)), |  | ||||||
|             "requires": extra_requires, |  | ||||||
|             "context": "org-member", |  | ||||||
|             "filters": {"branches": {"only": "nightly"}}, |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         if not self.is_upload: |  | ||||||
|             props_dict["ios_arch"] = self.variant |  | ||||||
|             props_dict["ios_platform"] = ios_definitions.get_platform(self.variant) |  | ||||||
|             props_dict["name"] = self.gen_job_name() |  | ||||||
|             props_dict["use_metal"] = miniutils.quote(str(int(True))) |  | ||||||
|             props_dict["use_coreml"] = miniutils.quote(str(int(True))) |  | ||||||
|  |  | ||||||
|         if self.is_full_jit: |  | ||||||
|             props_dict["lite_interpreter"] = miniutils.quote(str(int(False))) |  | ||||||
|  |  | ||||||
|         template_name = "_".join([ |  | ||||||
|             "binary", |  | ||||||
|             "ios", |  | ||||||
|             self.get_phase_name(), |  | ||||||
|         ]) |  | ||||||
|  |  | ||||||
|         return [{template_name: props_dict}] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| BUILD_CONFIGS = [ |  | ||||||
|     IOSNightlyJob("x86_64"), |  | ||||||
|     IOSNightlyJob("arm64"), |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| BUILD_CONFIGS_FULL_JIT = [ |  | ||||||
|     IOSNightlyJob("x86_64", is_full_jit=True), |  | ||||||
|     IOSNightlyJob("arm64", is_full_jit=True), |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| WORKFLOW_DATA = BUILD_CONFIGS + BUILD_CONFIGS_FULL_JIT + [ |  | ||||||
|     IOSNightlyJob("binary", is_full_jit=False, is_upload=True), |  | ||||||
|     IOSNightlyJob("binary", is_full_jit=True, is_upload=True), |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_workflow_jobs(): |  | ||||||
|     return [item.gen_tree() for item in WORKFLOW_DATA] |  | ||||||
| @ -1,28 +0,0 @@ | |||||||
| NON_PR_BRANCH_LIST = [ |  | ||||||
|     "main", |  | ||||||
|     "master", |  | ||||||
|     r"/ci-all\/.*/", |  | ||||||
|     r"/release\/.*/", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| PR_BRANCH_LIST = [ |  | ||||||
|     r"/gh\/.*\/head/", |  | ||||||
|     r"/pull\/.*/", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/" |  | ||||||
|  |  | ||||||
| def gen_filter_dict( |  | ||||||
|         branches_list=NON_PR_BRANCH_LIST, |  | ||||||
|         tags_list=None |  | ||||||
| ): |  | ||||||
|     """Generates a filter dictionary for use with CircleCI's job filter""" |  | ||||||
|     filter_dict = { |  | ||||||
|         "branches": { |  | ||||||
|             "only": branches_list, |  | ||||||
|         }, |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     if tags_list is not None: |  | ||||||
|         filter_dict["tags"] = {"only": tags_list} |  | ||||||
|     return filter_dict |  | ||||||
| @ -1,33 +0,0 @@ | |||||||
| AWS_DOCKER_HOST = "308535385114.dkr.ecr.us-east-1.amazonaws.com" |  | ||||||
|  |  | ||||||
| def gen_docker_image(container_type): |  | ||||||
|     return ( |  | ||||||
|         "/".join([AWS_DOCKER_HOST, "pytorch", container_type]), |  | ||||||
|         f"docker-{container_type}", |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
| def gen_docker_image_requires(image_name): |  | ||||||
|     return [f"docker-{image_name}"] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE_BASIC, DOCKER_REQUIREMENT_BASE = gen_docker_image( |  | ||||||
|     "pytorch-linux-xenial-py3.7-gcc5.4" |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE_CUDA_10_2, DOCKER_REQUIREMENT_CUDA_10_2 = gen_docker_image( |  | ||||||
|     "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7" |  | ||||||
| ) |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE_GCC7, DOCKER_REQUIREMENT_GCC7 = gen_docker_image( |  | ||||||
|     "pytorch-linux-xenial-py3.7-gcc7" |  | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def gen_mobile_docker(specifier): |  | ||||||
|     container_type = "pytorch-linux-xenial-py3-clang5-" + specifier |  | ||||||
|     return gen_docker_image(container_type) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE_ASAN, DOCKER_REQUIREMENT_ASAN = gen_mobile_docker("asan") |  | ||||||
|  |  | ||||||
| DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK = gen_mobile_docker("android-ndk-r19c") |  | ||||||
| @ -1,34 +0,0 @@ | |||||||
| class MultiPartVersion: |  | ||||||
|     def __init__(self, parts, prefix=""): |  | ||||||
|         self.parts = parts |  | ||||||
|         self.prefix = prefix |  | ||||||
|  |  | ||||||
|     def prefixed_parts(self): |  | ||||||
|         """ |  | ||||||
|         Prepends the first element of the version list |  | ||||||
|         with the prefix string. |  | ||||||
|         """ |  | ||||||
|         if self.parts: |  | ||||||
|             return [self.prefix + str(self.parts[0])] + [str(part) for part in self.parts[1:]] |  | ||||||
|         else: |  | ||||||
|             return [self.prefix] |  | ||||||
|  |  | ||||||
|     def render_dots(self): |  | ||||||
|         return ".".join(self.prefixed_parts()) |  | ||||||
|  |  | ||||||
|     def render_dots_or_parts(self, with_dots): |  | ||||||
|         if with_dots: |  | ||||||
|             return [self.render_dots()] |  | ||||||
|         else: |  | ||||||
|             return self.prefixed_parts() |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class CudaVersion(MultiPartVersion): |  | ||||||
|     def __init__(self, major, minor): |  | ||||||
|         self.major = major |  | ||||||
|         self.minor = minor |  | ||||||
|  |  | ||||||
|         super().__init__([self.major, self.minor], "cuda") |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return f"{self.major}.{self.minor}" |  | ||||||
| @ -1,107 +0,0 @@ | |||||||
| from dataclasses import dataclass, field |  | ||||||
| from typing import Optional, Dict |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def X(val): |  | ||||||
|     """ |  | ||||||
|     Compact way to write a leaf node |  | ||||||
|     """ |  | ||||||
|     return val, [] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def XImportant(name): |  | ||||||
|     """Compact way to write an important (run on PRs) leaf node""" |  | ||||||
|     return (name, [("important", [X(True)])]) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class Ver: |  | ||||||
|     """ |  | ||||||
|     Represents a product with a version number |  | ||||||
|     """ |  | ||||||
|     name: str |  | ||||||
|     version: str = "" |  | ||||||
|  |  | ||||||
|     def __str__(self): |  | ||||||
|         return self.name + self.version |  | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass |  | ||||||
| class ConfigNode: |  | ||||||
|     parent: Optional['ConfigNode'] |  | ||||||
|     node_name: str |  | ||||||
|     props: Dict[str, str] = field(default_factory=dict) |  | ||||||
|  |  | ||||||
|     def get_label(self): |  | ||||||
|         return self.node_name |  | ||||||
|  |  | ||||||
|     # noinspection PyMethodMayBeStatic |  | ||||||
|     def get_children(self): |  | ||||||
|         return [] |  | ||||||
|  |  | ||||||
|     def get_parents(self): |  | ||||||
|         return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else [] |  | ||||||
|  |  | ||||||
|     def get_depth(self): |  | ||||||
|         return len(self.get_parents()) |  | ||||||
|  |  | ||||||
|     def get_node_key(self): |  | ||||||
|         return "%".join(self.get_parents() + [self.get_label()]) |  | ||||||
|  |  | ||||||
|     def find_prop(self, propname, searched=None): |  | ||||||
|         """ |  | ||||||
|         Checks if its own dictionary has |  | ||||||
|         the property, otherwise asks parent node. |  | ||||||
|         """ |  | ||||||
|  |  | ||||||
|         if searched is None: |  | ||||||
|             searched = [] |  | ||||||
|  |  | ||||||
|         searched.append(self.node_name) |  | ||||||
|  |  | ||||||
|         if propname in self.props: |  | ||||||
|             return self.props[propname] |  | ||||||
|         elif self.parent: |  | ||||||
|             return self.parent.find_prop(propname, searched) |  | ||||||
|         else: |  | ||||||
|             # raise Exception('Property "%s" does not exist anywhere in the tree! Searched: %s' % (propname, searched)) |  | ||||||
|             return None |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def dfs_recurse( |  | ||||||
|         node, |  | ||||||
|         leaf_callback=lambda x: None, |  | ||||||
|         discovery_callback=lambda x, y, z: None, |  | ||||||
|         child_callback=lambda x, y: None, |  | ||||||
|         sibling_index=0, |  | ||||||
|         sibling_count=1): |  | ||||||
|  |  | ||||||
|     discovery_callback(node, sibling_index, sibling_count) |  | ||||||
|  |  | ||||||
|     node_children = node.get_children() |  | ||||||
|     if node_children: |  | ||||||
|         for i, child in enumerate(node_children): |  | ||||||
|             child_callback(node, child) |  | ||||||
|  |  | ||||||
|             dfs_recurse( |  | ||||||
|                 child, |  | ||||||
|                 leaf_callback, |  | ||||||
|                 discovery_callback, |  | ||||||
|                 child_callback, |  | ||||||
|                 i, |  | ||||||
|                 len(node_children), |  | ||||||
|             ) |  | ||||||
|     else: |  | ||||||
|         leaf_callback(node) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def dfs(toplevel_config_node): |  | ||||||
|  |  | ||||||
|     config_list = [] |  | ||||||
|  |  | ||||||
|     def leaf_callback(node): |  | ||||||
|         config_list.append(node) |  | ||||||
|  |  | ||||||
|     dfs_recurse(toplevel_config_node, leaf_callback) |  | ||||||
|  |  | ||||||
|     return config_list |  | ||||||
| @ -1,10 +0,0 @@ | |||||||
| def quote(s): |  | ||||||
|     return sandwich('"', s) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def sandwich(bread, jam): |  | ||||||
|     return bread + jam + bread |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def override(word, substitutions): |  | ||||||
|     return substitutions.get(word, word) |  | ||||||
| @ -1,52 +0,0 @@ | |||||||
| from collections import OrderedDict |  | ||||||
|  |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
|  |  | ||||||
|  |  | ||||||
| LIST_MARKER = "- " |  | ||||||
| INDENTATION_WIDTH = 2 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def is_dict(data): |  | ||||||
|     return type(data) in [dict, OrderedDict] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def is_collection(data): |  | ||||||
|     return is_dict(data) or type(data) is list |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def render(fh, data, depth, is_list_member=False): |  | ||||||
|     """ |  | ||||||
|     PyYaml does not allow precise control over the quoting |  | ||||||
|     behavior, especially for merge references. |  | ||||||
|     Therefore, we use this custom YAML renderer. |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     indentation = " " * INDENTATION_WIDTH * depth |  | ||||||
|  |  | ||||||
|     if is_dict(data): |  | ||||||
|  |  | ||||||
|         tuples = list(data.items()) |  | ||||||
|         if type(data) is not OrderedDict: |  | ||||||
|             tuples.sort() |  | ||||||
|  |  | ||||||
|         for i, (k, v) in enumerate(tuples): |  | ||||||
|             if not v: |  | ||||||
|                 continue |  | ||||||
|             # If this dict is itself a list member, the first key gets prefixed with a list marker |  | ||||||
|             list_marker_prefix = LIST_MARKER if is_list_member and not i else "" |  | ||||||
|  |  | ||||||
|             trailing_whitespace = "\n" if is_collection(v) else " " |  | ||||||
|             fh.write(indentation + list_marker_prefix + k + ":" + trailing_whitespace) |  | ||||||
|  |  | ||||||
|             render(fh, v, depth + 1 + int(is_list_member)) |  | ||||||
|  |  | ||||||
|     elif type(data) is list: |  | ||||||
|         for v in data: |  | ||||||
|             render(fh, v, depth, True) |  | ||||||
|  |  | ||||||
|     else: |  | ||||||
|         # use empty quotes to denote an empty string value instead of blank space |  | ||||||
|         modified_data = miniutils.quote(data) if data == "" else data |  | ||||||
|         list_member_prefix = indentation + LIST_MARKER if is_list_member else "" |  | ||||||
|         fh.write(list_member_prefix + str(modified_data) + "\n") |  | ||||||
| @ -1,17 +0,0 @@ | |||||||
| #!/bin/bash -xe |  | ||||||
|  |  | ||||||
|  |  | ||||||
| YAML_FILENAME=verbatim-sources/workflows-pytorch-ge-config-tests.yml |  | ||||||
| DIFF_TOOL=meld |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Allows this script to be invoked from any directory: |  | ||||||
| cd $(dirname "$0") |  | ||||||
|  |  | ||||||
| pushd .. |  | ||||||
|  |  | ||||||
|  |  | ||||||
| $DIFF_TOOL $YAML_FILENAME <(./codegen_validation/normalize_yaml_fragment.py < $YAML_FILENAME) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| @ -1,24 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import yaml |  | ||||||
|  |  | ||||||
| # Need to import modules that lie on an upward-relative path |  | ||||||
| sys.path.append(os.path.join(sys.path[0], '..')) |  | ||||||
|  |  | ||||||
| import cimodel.lib.miniyaml as miniyaml |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def regurgitate(depth, use_pyyaml_formatter=False): |  | ||||||
|     data = yaml.safe_load(sys.stdin) |  | ||||||
|  |  | ||||||
|     if use_pyyaml_formatter: |  | ||||||
|         output = yaml.dump(data, sort_keys=True) |  | ||||||
|         sys.stdout.write(output) |  | ||||||
|     else: |  | ||||||
|         miniyaml.render(sys.stdout, data, depth) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     regurgitate(3) |  | ||||||
| @ -1,15 +0,0 @@ | |||||||
| #!/bin/bash -xe |  | ||||||
|  |  | ||||||
| YAML_FILENAME=$1 |  | ||||||
|  |  | ||||||
| # Allows this script to be invoked from any directory: |  | ||||||
| cd $(dirname "$0") |  | ||||||
|  |  | ||||||
| pushd .. |  | ||||||
|  |  | ||||||
| TEMP_FILENAME=$(mktemp) |  | ||||||
|  |  | ||||||
| cat $YAML_FILENAME | ./codegen_validation/normalize_yaml_fragment.py > $TEMP_FILENAME |  | ||||||
| mv $TEMP_FILENAME $YAML_FILENAME |  | ||||||
|  |  | ||||||
| popd |  | ||||||
							
								
								
									
										1732
									
								
								.circleci/config.yml
									
									
									
									
									
								
							
							
						
						
									
										1732
									
								
								.circleci/config.yml
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,31 +0,0 @@ | |||||||
| # Docker images for Jenkins |  | ||||||
|  |  | ||||||
| This directory contains everything needed to build the Docker images |  | ||||||
| that are used in our CI |  | ||||||
|  |  | ||||||
| The Dockerfiles located in subdirectories are parameterized to |  | ||||||
| conditionally run build stages depending on build arguments passed to |  | ||||||
| `docker build`. This lets us use only a few Dockerfiles for many |  | ||||||
| images. The different configurations are identified by a freeform |  | ||||||
| string that we call a _build environment_. This string is persisted in |  | ||||||
| each image as the `BUILD_ENVIRONMENT` environment variable. |  | ||||||
|  |  | ||||||
| See `build.sh` for valid build environments (it's the giant switch). |  | ||||||
|  |  | ||||||
| Docker builds are now defined with `.circleci/cimodel/data/simple/docker_definitions.py` |  | ||||||
|  |  | ||||||
| ## Contents |  | ||||||
|  |  | ||||||
| * `build.sh` -- dispatch script to launch all builds |  | ||||||
| * `common` -- scripts used to execute individual Docker build stages |  | ||||||
| * `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker |  | ||||||
|  |  | ||||||
| ## Usage |  | ||||||
|  |  | ||||||
| ```bash |  | ||||||
| # Build a specific image |  | ||||||
| ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest |  | ||||||
|  |  | ||||||
| # Set flags (see build.sh) and build image |  | ||||||
| sudo bash -c 'PROTOBUF=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest |  | ||||||
| ``` |  | ||||||
| @ -1 +0,0 @@ | |||||||
| <manifest package="org.pytorch.deps" /> |  | ||||||
| @ -1,66 +0,0 @@ | |||||||
| buildscript { |  | ||||||
|     ext { |  | ||||||
|         minSdkVersion = 21 |  | ||||||
|         targetSdkVersion = 28 |  | ||||||
|         compileSdkVersion = 28 |  | ||||||
|         buildToolsVersion = '28.0.3' |  | ||||||
|  |  | ||||||
|         coreVersion = "1.2.0" |  | ||||||
|         extJUnitVersion = "1.1.1" |  | ||||||
|         runnerVersion = "1.2.0" |  | ||||||
|         rulesVersion = "1.2.0" |  | ||||||
|         junitVersion = "4.12" |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     repositories { |  | ||||||
|         google() |  | ||||||
|         mavenLocal() |  | ||||||
|         mavenCentral() |  | ||||||
|         jcenter() |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     dependencies { |  | ||||||
|         classpath 'com.android.tools.build:gradle:4.1.2' |  | ||||||
|         classpath 'com.vanniktech:gradle-maven-publish-plugin:0.14.2' |  | ||||||
|     } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| repositories { |  | ||||||
|     google() |  | ||||||
|     jcenter() |  | ||||||
| } |  | ||||||
|  |  | ||||||
| apply plugin: 'com.android.library' |  | ||||||
|  |  | ||||||
| android { |  | ||||||
|     compileSdkVersion rootProject.compileSdkVersion |  | ||||||
|     buildToolsVersion rootProject.buildToolsVersion |  | ||||||
|  |  | ||||||
|     defaultConfig { |  | ||||||
|         minSdkVersion minSdkVersion |  | ||||||
|         targetSdkVersion targetSdkVersion |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     sourceSets { |  | ||||||
|         main { |  | ||||||
|             manifest.srcFile 'AndroidManifest.xml' |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| dependencies { |  | ||||||
|     implementation 'com.android.support:appcompat-v7:28.0.0' |  | ||||||
|     implementation 'androidx.appcompat:appcompat:1.0.0' |  | ||||||
|     implementation 'com.facebook.fbjni:fbjni-java-only:0.2.2' |  | ||||||
|     implementation 'com.google.code.findbugs:jsr305:3.0.1' |  | ||||||
|     implementation 'com.facebook.soloader:nativeloader:0.10.1' |  | ||||||
|  |  | ||||||
|     implementation 'junit:junit:' + rootProject.junitVersion |  | ||||||
|     implementation 'androidx.test:core:' + rootProject.coreVersion |  | ||||||
|  |  | ||||||
|     implementation 'junit:junit:' + rootProject.junitVersion |  | ||||||
|     implementation 'androidx.test:core:' + rootProject.coreVersion |  | ||||||
|     implementation 'androidx.test.ext:junit:' + rootProject.extJUnitVersion |  | ||||||
|     implementation 'androidx.test:rules:' + rootProject.rulesVersion |  | ||||||
|     implementation 'androidx.test:runner:' + rootProject.runnerVersion |  | ||||||
| } |  | ||||||
| @ -1,402 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| image="$1" |  | ||||||
| shift |  | ||||||
|  |  | ||||||
| if [ -z "${image}" ]; then |  | ||||||
|   echo "Usage: $0 IMAGE" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| function extract_version_from_image_name() { |  | ||||||
|   eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?)/ && print \$1") |  | ||||||
|   if [ "x${!2}" = x ]; then |  | ||||||
|     echo "variable '$2' not correctly parsed from image='$image'" |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
| function extract_all_from_image_name() { |  | ||||||
|   # parts $image into array, splitting on '-' |  | ||||||
|   keep_IFS="$IFS" |  | ||||||
|   IFS="-" |  | ||||||
|   declare -a parts=($image) |  | ||||||
|   IFS="$keep_IFS" |  | ||||||
|   unset keep_IFS |  | ||||||
|  |  | ||||||
|   for part in "${parts[@]}"; do |  | ||||||
|     name=$(echo "${part}" | perl -n -e"/([a-zA-Z]+)\d+(\.\d+)?(\.\d+)?/ && print \$1") |  | ||||||
|     vername="${name^^}_VERSION" |  | ||||||
|     # "py" is the odd one out, needs this special case |  | ||||||
|     if [ "x${name}" = xpy ]; then |  | ||||||
|       vername=ANACONDA_PYTHON_VERSION |  | ||||||
|     fi |  | ||||||
|     # skip non-conforming fields such as "pytorch", "linux" or "xenial" without version string |  | ||||||
|     if [ -n "${name}" ]; then |  | ||||||
|       extract_version_from_image_name "${name}" "${vername}" |  | ||||||
|     fi |  | ||||||
|   done |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Use the same pre-built XLA test image from PyTorch/XLA |  | ||||||
| if [[ "$image" == *xla* ]]; then |  | ||||||
|   echo "Using pre-built XLA test image..." |  | ||||||
|   exit 0 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$image" == *-xenial* ]]; then |  | ||||||
|   UBUNTU_VERSION=16.04 |  | ||||||
| elif [[ "$image" == *-artful* ]]; then |  | ||||||
|   UBUNTU_VERSION=17.10 |  | ||||||
| elif [[ "$image" == *-bionic* ]]; then |  | ||||||
|   UBUNTU_VERSION=18.04 |  | ||||||
| elif [[ "$image" == *-focal* ]]; then |  | ||||||
|   UBUNTU_VERSION=20.04 |  | ||||||
| elif [[ "$image" == *ubuntu* ]]; then |  | ||||||
|   extract_version_from_image_name ubuntu UBUNTU_VERSION |  | ||||||
| elif [[ "$image" == *centos* ]]; then |  | ||||||
|   extract_version_from_image_name centos CENTOS_VERSION |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "${UBUNTU_VERSION}" ]; then |  | ||||||
|   OS="ubuntu" |  | ||||||
| elif [ -n "${CENTOS_VERSION}" ]; then |  | ||||||
|   OS="centos" |  | ||||||
| else |  | ||||||
|   echo "Unable to derive operating system base..." |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| DOCKERFILE="${OS}/Dockerfile" |  | ||||||
| if [[ "$image" == *cuda* ]]; then |  | ||||||
|   DOCKERFILE="${OS}-cuda/Dockerfile" |  | ||||||
| elif [[ "$image" == *rocm* ]]; then |  | ||||||
|   DOCKERFILE="${OS}-rocm/Dockerfile" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$image" == *xenial* ]] || [[ "$image" == *bionic* ]]; then |  | ||||||
|   CMAKE_VERSION=3.13.5 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/14.04/x86_64" |  | ||||||
|  |  | ||||||
| # It's annoying to rename jobs every time you want to rewrite a |  | ||||||
| # configuration, so we hardcode everything here rather than do it |  | ||||||
| # from scratch |  | ||||||
| case "$image" in |  | ||||||
|   pytorch-linux-xenial-py3.8) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.8 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     # Do not install PROTOBUF, DB, and VISION as a test |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3.7-gcc5.4) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=5 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3.7-gcc7.2) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     # Do not install PROTOBUF, DB, and VISION as a test |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3.7-gcc7) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7) |  | ||||||
|     CUDA_VERSION=10.2 |  | ||||||
|     CUDNN_VERSION=7 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7) |  | ||||||
|     CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names |  | ||||||
|     CUDNN_VERSION=8 |  | ||||||
|     TENSORRT_VERSION=8.0.1.6 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-cuda11.3-cudnn8-py3-clang9) |  | ||||||
|     CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names |  | ||||||
|     CUDNN_VERSION=8 |  | ||||||
|     TENSORRT_VERSION=8.0.1.6 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7) |  | ||||||
|     CUDA_VERSION=11.6.0 |  | ||||||
|     CUDNN_VERSION=8 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3-clang5-asan) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=5.0 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3-clang7-asan) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3-clang7-onnx) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3-clang5-android-ndk-r19c) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=5.0 |  | ||||||
|     LLVMDEV=yes |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     ANDROID=yes |  | ||||||
|     ANDROID_NDK_VERSION=r19c |  | ||||||
|     GRADLE_VERSION=6.8.3 |  | ||||||
|     NINJA_VERSION=1.9.0 |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-xenial-py3.7-clang7) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-py3.7-clang9) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     VULKAN_SDK_VERSION=1.2.162.1 |  | ||||||
|     SWIFTSHADER=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-py3.8-gcc9) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.8 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-cuda10.2-cudnn7-py3.7-clang9) |  | ||||||
|     CUDA_VERSION=10.2 |  | ||||||
|     CUDNN_VERSION=7 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CLANG_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7) |  | ||||||
|     CUDA_VERSION=10.2 |  | ||||||
|     CUDNN_VERSION=7 |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.9 |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-rocm5.0-py3.7) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ROCM_VERSION=5.0 |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-bionic-rocm5.1-py3.7) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     GCC_VERSION=9 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     ROCM_VERSION=5.1.1 |  | ||||||
|     ;; |  | ||||||
|   pytorch-linux-focal-py3.7-gcc7) |  | ||||||
|     ANACONDA_PYTHON_VERSION=3.7 |  | ||||||
|     CMAKE_VERSION=3.12.4  # To make sure XNNPACK is enabled for the BACKWARDS_COMPAT_TEST used with this image |  | ||||||
|     GCC_VERSION=7 |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     KATEX=yes |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     # Catch-all for builds that are not hardcoded. |  | ||||||
|     PROTOBUF=yes |  | ||||||
|     DB=yes |  | ||||||
|     VISION=yes |  | ||||||
|     echo "image '$image' did not match an existing build configuration" |  | ||||||
|     if [[ "$image" == *py* ]]; then |  | ||||||
|       extract_version_from_image_name py ANACONDA_PYTHON_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *cuda* ]]; then |  | ||||||
|       extract_version_from_image_name cuda CUDA_VERSION |  | ||||||
|       extract_version_from_image_name cudnn CUDNN_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *rocm* ]]; then |  | ||||||
|       extract_version_from_image_name rocm ROCM_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *gcc* ]]; then |  | ||||||
|       extract_version_from_image_name gcc GCC_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *clang* ]]; then |  | ||||||
|       extract_version_from_image_name clang CLANG_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *devtoolset* ]]; then |  | ||||||
|       extract_version_from_image_name devtoolset DEVTOOLSET_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *glibc* ]]; then |  | ||||||
|       extract_version_from_image_name glibc GLIBC_VERSION |  | ||||||
|     fi |  | ||||||
|     if [[ "$image" == *cmake* ]]; then |  | ||||||
|       extract_version_from_image_name cmake CMAKE_VERSION |  | ||||||
|     fi |  | ||||||
|   ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| # Set Jenkins UID and GID if running Jenkins |  | ||||||
| if [ -n "${JENKINS:-}" ]; then |  | ||||||
|   JENKINS_UID=$(id -u jenkins) |  | ||||||
|   JENKINS_GID=$(id -g jenkins) |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]') |  | ||||||
|  |  | ||||||
| #when using cudnn version 8 install it separately from cuda |  | ||||||
| if [[ "$image" == *cuda*  && ${OS} == "ubuntu" ]]; then |  | ||||||
|   IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}" |  | ||||||
|   if [[ ${CUDNN_VERSION} == 8 ]]; then |  | ||||||
|     IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}" |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Build image |  | ||||||
| # TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm |  | ||||||
| # it's no longer needed. |  | ||||||
| docker build \ |  | ||||||
|        --no-cache \ |  | ||||||
|        --progress=plain \ |  | ||||||
|        --build-arg "TRAVIS_DL_URL_PREFIX=${TRAVIS_DL_URL_PREFIX}" \ |  | ||||||
|        --build-arg "BUILD_ENVIRONMENT=${image}" \ |  | ||||||
|        --build-arg "PROTOBUF=${PROTOBUF:-}" \ |  | ||||||
|        --build-arg "THRIFT=${THRIFT:-}" \ |  | ||||||
|        --build-arg "LLVMDEV=${LLVMDEV:-}" \ |  | ||||||
|        --build-arg "DB=${DB:-}" \ |  | ||||||
|        --build-arg "VISION=${VISION:-}" \ |  | ||||||
|        --build-arg "EC2=${EC2:-}" \ |  | ||||||
|        --build-arg "JENKINS=${JENKINS:-}" \ |  | ||||||
|        --build-arg "JENKINS_UID=${JENKINS_UID:-}" \ |  | ||||||
|        --build-arg "JENKINS_GID=${JENKINS_GID:-}" \ |  | ||||||
|        --build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \ |  | ||||||
|        --build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \ |  | ||||||
|        --build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \ |  | ||||||
|        --build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \ |  | ||||||
|        --build-arg "CLANG_VERSION=${CLANG_VERSION}" \ |  | ||||||
|        --build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \ |  | ||||||
|        --build-arg "GCC_VERSION=${GCC_VERSION}" \ |  | ||||||
|        --build-arg "CUDA_VERSION=${CUDA_VERSION}" \ |  | ||||||
|        --build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \ |  | ||||||
|        --build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \ |  | ||||||
|        --build-arg "ANDROID=${ANDROID}" \ |  | ||||||
|        --build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \ |  | ||||||
|        --build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \ |  | ||||||
|        --build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \ |  | ||||||
|        --build-arg "SWIFTSHADER=${SWIFTSHADER}" \ |  | ||||||
|        --build-arg "CMAKE_VERSION=${CMAKE_VERSION:-}" \ |  | ||||||
|        --build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \ |  | ||||||
|        --build-arg "KATEX=${KATEX:-}" \ |  | ||||||
|        --build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \ |  | ||||||
|        --build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx900;gfx906}" \ |  | ||||||
|        --build-arg "IMAGE_NAME=${IMAGE_NAME}" \ |  | ||||||
|        -f $(dirname ${DOCKERFILE})/Dockerfile \ |  | ||||||
|        -t "$tmp_tag" \ |  | ||||||
|        "$@" \ |  | ||||||
|        . |  | ||||||
|  |  | ||||||
| # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, |  | ||||||
| # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could |  | ||||||
| # find the correct image. As a result, here we have to replace the |  | ||||||
| #   "$UBUNTU_VERSION" == "18.04-rc" |  | ||||||
| # with |  | ||||||
| #   "$UBUNTU_VERSION" == "18.04" |  | ||||||
| UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//') |  | ||||||
|  |  | ||||||
| function drun() { |  | ||||||
|   docker run --rm "$tmp_tag" $* |  | ||||||
| } |  | ||||||
|  |  | ||||||
| if [[ "$OS" == "ubuntu" ]]; then |  | ||||||
|  |  | ||||||
|   if !(drun lsb_release -a 2>&1 | grep -qF Ubuntu); then |  | ||||||
|     echo "OS=ubuntu, but:" |  | ||||||
|     drun lsb_release -a |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
|   if !(drun lsb_release -a 2>&1 | grep -qF "$UBUNTU_VERSION"); then |  | ||||||
|     echo "UBUNTU_VERSION=$UBUNTU_VERSION, but:" |  | ||||||
|     drun lsb_release -a |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "$ANACONDA_PYTHON_VERSION" ]; then |  | ||||||
|   if !(drun python --version 2>&1 | grep -qF "Python $ANACONDA_PYTHON_VERSION"); then |  | ||||||
|     echo "ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION, but:" |  | ||||||
|     drun python --version |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "$GCC_VERSION" ]; then |  | ||||||
|   if !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then |  | ||||||
|     echo "GCC_VERSION=$GCC_VERSION, but:" |  | ||||||
|     drun gcc --version |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "$CLANG_VERSION" ]; then |  | ||||||
|   if !(drun clang --version 2>&1 | grep -qF "clang version $CLANG_VERSION"); then |  | ||||||
|     echo "CLANG_VERSION=$CLANG_VERSION, but:" |  | ||||||
|     drun clang --version |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "$KATEX" ]; then |  | ||||||
|   if !(drun katex --version); then |  | ||||||
|     echo "KATEX=$KATEX, but:" |  | ||||||
|     drun katex --version |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
| @ -1,55 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # If UPSTREAM_BUILD_ID is set (see trigger job), then we can |  | ||||||
| # use it to tag this build with the same ID used to tag all other |  | ||||||
| # base image builds. Also, we can try and pull the previous |  | ||||||
| # image first, to avoid rebuilding layers that haven't changed. |  | ||||||
|  |  | ||||||
| #until we find a way to reliably reuse previous build, this last_tag is not in use |  | ||||||
| # last_tag="$(( CIRCLE_BUILD_NUM - 1 ))" |  | ||||||
| tag="${DOCKER_TAG}" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| registry="308535385114.dkr.ecr.us-east-1.amazonaws.com" |  | ||||||
| image="${registry}/pytorch/${IMAGE_NAME}" |  | ||||||
|  |  | ||||||
| login() { |  | ||||||
|   aws ecr get-authorization-token --region us-east-1 --output text --query 'authorizationData[].authorizationToken' | |  | ||||||
|     base64 -d | |  | ||||||
|     cut -d: -f2 | |  | ||||||
|     docker login -u AWS --password-stdin "$1" |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Only run these steps if not on github actions |  | ||||||
| if [[ -z "${GITHUB_ACTIONS}" ]]; then |  | ||||||
|   # Retry on timeouts (can happen on job stampede). |  | ||||||
|   retry login "${registry}" |  | ||||||
|   # Logout on exit |  | ||||||
|   trap "docker logout ${registry}" EXIT |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # export EC2=1 |  | ||||||
| # export JENKINS=1 |  | ||||||
|  |  | ||||||
| # Try to pull the previous image (perhaps we can reuse some layers) |  | ||||||
| # if [ -n "${last_tag}" ]; then |  | ||||||
| #   docker pull "${image}:${last_tag}" || true |  | ||||||
| # fi |  | ||||||
|  |  | ||||||
| # Build new image |  | ||||||
| ./build.sh ${IMAGE_NAME} -t "${image}:${tag}" |  | ||||||
|  |  | ||||||
| docker push "${image}:${tag}" |  | ||||||
|  |  | ||||||
| if [ -z "${DOCKER_SKIP_S3_UPLOAD:-}" ]; then |  | ||||||
|   trap "rm -rf ${IMAGE_NAME}:${tag}.tar" EXIT |  | ||||||
|   docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}" |  | ||||||
|   aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read |  | ||||||
| fi |  | ||||||
| @ -1,105 +0,0 @@ | |||||||
| ARG CENTOS_VERSION |  | ||||||
|  |  | ||||||
| FROM centos:${CENTOS_VERSION} |  | ||||||
|  |  | ||||||
| ARG CENTOS_VERSION |  | ||||||
|  |  | ||||||
| # Set AMD gpu targets to build for |  | ||||||
| ARG PYTORCH_ROCM_ARCH |  | ||||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} |  | ||||||
|  |  | ||||||
| # Install required packages to build Caffe2 |  | ||||||
|  |  | ||||||
| # Install common dependencies (so that this step can be cached separately) |  | ||||||
| ARG EC2 |  | ||||||
| ADD ./common/install_base.sh install_base.sh |  | ||||||
| RUN bash ./install_base.sh && rm install_base.sh |  | ||||||
|  |  | ||||||
| # Update CentOS git version |  | ||||||
| RUN yum -y remove git |  | ||||||
| RUN yum -y remove git-* |  | ||||||
| RUN yum -y install https://packages.endpoint.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm |  | ||||||
| RUN yum install -y git |  | ||||||
|  |  | ||||||
| # Install devtoolset |  | ||||||
| ARG DEVTOOLSET_VERSION |  | ||||||
| ADD ./common/install_devtoolset.sh install_devtoolset.sh |  | ||||||
| RUN bash ./install_devtoolset.sh && rm install_devtoolset.sh |  | ||||||
| ENV BASH_ENV "/etc/profile" |  | ||||||
|  |  | ||||||
| # (optional) Install non-default glibc version |  | ||||||
| ARG GLIBC_VERSION |  | ||||||
| ADD ./common/install_glibc.sh install_glibc.sh |  | ||||||
| RUN if [ -n "${GLIBC_VERSION}" ]; then bash ./install_glibc.sh; fi |  | ||||||
| RUN rm install_glibc.sh |  | ||||||
|  |  | ||||||
| # Install user |  | ||||||
| ADD ./common/install_user.sh install_user.sh |  | ||||||
| RUN bash ./install_user.sh && rm install_user.sh |  | ||||||
|  |  | ||||||
| # Install conda and other packages (e.g., numpy, pytest) |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| ARG ANACONDA_PYTHON_VERSION |  | ||||||
| ADD requirements-ci.txt /opt/conda/requirements-ci.txt |  | ||||||
| ADD ./common/install_conda.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| RUN rm /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
| # (optional) Install protobuf for ONNX |  | ||||||
| ARG PROTOBUF |  | ||||||
| ADD ./common/install_protobuf.sh install_protobuf.sh |  | ||||||
| RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi |  | ||||||
| RUN rm install_protobuf.sh |  | ||||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} |  | ||||||
|  |  | ||||||
| # (optional) Install database packages like LMDB and LevelDB |  | ||||||
| ARG DB |  | ||||||
| ADD ./common/install_db.sh install_db.sh |  | ||||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi |  | ||||||
| RUN rm install_db.sh |  | ||||||
| ENV INSTALLED_DB ${DB} |  | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV and ffmpeg |  | ||||||
| ARG VISION |  | ||||||
| ADD ./common/install_vision.sh install_vision.sh |  | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi |  | ||||||
| RUN rm install_vision.sh |  | ||||||
| ENV INSTALLED_VISION ${VISION} |  | ||||||
|  |  | ||||||
| # Install rocm |  | ||||||
| ARG ROCM_VERSION |  | ||||||
| ADD ./common/install_rocm.sh install_rocm.sh |  | ||||||
| RUN bash ./install_rocm.sh |  | ||||||
| RUN rm install_rocm.sh |  | ||||||
| ENV PATH /opt/rocm/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/hcc/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/hip/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/opencl/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/llvm/bin:$PATH |  | ||||||
| ENV MAGMA_HOME /opt/rocm/magma |  | ||||||
| ENV LANG en_US.utf8 |  | ||||||
| ENV LC_ALL en_US.utf8 |  | ||||||
|  |  | ||||||
| # (optional) Install non-default CMake version |  | ||||||
| ARG CMAKE_VERSION |  | ||||||
| ADD ./common/install_cmake.sh install_cmake.sh |  | ||||||
| RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi |  | ||||||
| RUN rm install_cmake.sh |  | ||||||
|  |  | ||||||
| # (optional) Install non-default Ninja version |  | ||||||
| ARG NINJA_VERSION |  | ||||||
| ADD ./common/install_ninja.sh install_ninja.sh |  | ||||||
| RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi |  | ||||||
| RUN rm install_ninja.sh |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) |  | ||||||
| ADD ./common/install_cache.sh install_cache.sh |  | ||||||
| ENV PATH /opt/cache/bin:$PATH |  | ||||||
| RUN bash ./install_cache.sh && rm install_cache.sh |  | ||||||
|  |  | ||||||
| # Include BUILD_ENVIRONMENT environment variable in image |  | ||||||
| ARG BUILD_ENVIRONMENT |  | ||||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} |  | ||||||
|  |  | ||||||
| USER jenkins |  | ||||||
| CMD ["bash"] |  | ||||||
| @ -1,109 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "${ANDROID_NDK}" ] |  | ||||||
|  |  | ||||||
| _https_amazon_aws=https://ossci-android.s3.amazonaws.com |  | ||||||
|  |  | ||||||
| apt-get update |  | ||||||
| apt-get install -y --no-install-recommends autotools-dev autoconf unzip |  | ||||||
| apt-get autoclean && apt-get clean |  | ||||||
| rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
|  |  | ||||||
| pushd /tmp |  | ||||||
| curl -Os --retry 3 $_https_amazon_aws/android-ndk-${ANDROID_NDK}-linux-x86_64.zip |  | ||||||
| popd |  | ||||||
| _ndk_dir=/opt/ndk |  | ||||||
| mkdir -p "$_ndk_dir" |  | ||||||
| unzip -qo /tmp/android*.zip -d "$_ndk_dir" |  | ||||||
| _versioned_dir=$(find "$_ndk_dir/" -mindepth 1 -maxdepth 1 -type d) |  | ||||||
| mv "$_versioned_dir"/* "$_ndk_dir"/ |  | ||||||
| rmdir "$_versioned_dir" |  | ||||||
| rm -rf /tmp/* |  | ||||||
|  |  | ||||||
| # Install OpenJDK |  | ||||||
| # https://hub.docker.com/r/picoded/ubuntu-openjdk-8-jdk/dockerfile/ |  | ||||||
|  |  | ||||||
| sudo apt-get update && \ |  | ||||||
|     apt-get install -y openjdk-8-jdk && \ |  | ||||||
|     apt-get install -y ant && \ |  | ||||||
|     apt-get clean && \ |  | ||||||
|     rm -rf /var/lib/apt/lists/* && \ |  | ||||||
|     rm -rf /var/cache/oracle-jdk8-installer; |  | ||||||
|  |  | ||||||
| # Fix certificate issues, found as of |  | ||||||
| # https://bugs.launchpad.net/ubuntu/+source/ca-certificates-java/+bug/983302 |  | ||||||
|  |  | ||||||
| sudo apt-get update && \ |  | ||||||
|     apt-get install -y ca-certificates-java && \ |  | ||||||
|     apt-get clean && \ |  | ||||||
|     update-ca-certificates -f && \ |  | ||||||
|     rm -rf /var/lib/apt/lists/* && \ |  | ||||||
|     rm -rf /var/cache/oracle-jdk8-installer; |  | ||||||
|  |  | ||||||
| export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/ |  | ||||||
|  |  | ||||||
| # Installing android sdk |  | ||||||
| # https://github.com/circleci/circleci-images/blob/staging/android/Dockerfile.m4 |  | ||||||
|  |  | ||||||
| _tmp_sdk_zip=/tmp/android-sdk-linux.zip |  | ||||||
| _android_home=/opt/android/sdk |  | ||||||
|  |  | ||||||
| rm -rf $_android_home |  | ||||||
| sudo mkdir -p $_android_home |  | ||||||
| curl --silent --show-error --location --fail --retry 3 --output /tmp/android-sdk-linux.zip $_https_amazon_aws/android-sdk-linux-tools3859397-build-tools2803-2902-platforms28-29.zip |  | ||||||
| sudo unzip -q $_tmp_sdk_zip -d $_android_home |  | ||||||
| rm $_tmp_sdk_zip |  | ||||||
|  |  | ||||||
| sudo chmod -R 777 $_android_home |  | ||||||
|  |  | ||||||
| export ANDROID_HOME=$_android_home |  | ||||||
| export ADB_INSTALL_TIMEOUT=120 |  | ||||||
|  |  | ||||||
| export PATH="${ANDROID_HOME}/tools:${ANDROID_HOME}/tools/bin:${ANDROID_HOME}/platform-tools:${PATH}" |  | ||||||
| echo "PATH:${PATH}" |  | ||||||
|  |  | ||||||
| # Installing Gradle |  | ||||||
| echo "GRADLE_VERSION:${GRADLE_VERSION}" |  | ||||||
| _gradle_home=/opt/gradle |  | ||||||
| sudo rm -rf $gradle_home |  | ||||||
| sudo mkdir -p $_gradle_home |  | ||||||
|  |  | ||||||
| curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip |  | ||||||
|  |  | ||||||
| sudo unzip -q /tmp/gradle.zip -d $_gradle_home |  | ||||||
| rm /tmp/gradle.zip |  | ||||||
|  |  | ||||||
| sudo chmod -R 777 $_gradle_home |  | ||||||
|  |  | ||||||
| export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION |  | ||||||
| alias gradle="${GRADLE_HOME}/bin/gradle" |  | ||||||
|  |  | ||||||
| export PATH="${GRADLE_HOME}/bin/:${PATH}" |  | ||||||
| echo "PATH:${PATH}" |  | ||||||
|  |  | ||||||
| gradle --version |  | ||||||
|  |  | ||||||
| mkdir /var/lib/jenkins/gradledeps |  | ||||||
| cp build.gradle /var/lib/jenkins/gradledeps |  | ||||||
| cp AndroidManifest.xml /var/lib/jenkins/gradledeps |  | ||||||
|  |  | ||||||
| pushd /var/lib/jenkins |  | ||||||
|  |  | ||||||
| export GRADLE_LOCAL_PROPERTIES=gradledeps/local.properties |  | ||||||
| rm -f $GRADLE_LOCAL_PROPERTIES |  | ||||||
| echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
| echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
|  |  | ||||||
| chown -R jenkins /var/lib/jenkins/gradledeps |  | ||||||
| chgrp -R jenkins /var/lib/jenkins/gradledeps |  | ||||||
|  |  | ||||||
| sudo -H -u jenkins $GRADLE_HOME/bin/gradle -Pandroid.useAndroidX=true -p /var/lib/jenkins/gradledeps -g /var/lib/jenkins/.gradle --refresh-dependencies --debug --stacktrace assemble |  | ||||||
|  |  | ||||||
| chown -R jenkins /var/lib/jenkins/.gradle |  | ||||||
| chgrp -R jenkins /var/lib/jenkins/.gradle |  | ||||||
|  |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| rm -rf /var/lib/jenkins/.gradle/daemon |  | ||||||
| @ -1,134 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| install_ubuntu() { |  | ||||||
|   # NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`, |  | ||||||
|   # for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could |  | ||||||
|   # find the correct image. As a result, here we have to check for |  | ||||||
|   #   "$UBUNTU_VERSION" == "18.04"* |  | ||||||
|   # instead of |  | ||||||
|   #   "$UBUNTU_VERSION" == "18.04" |  | ||||||
|   if [[ "$UBUNTU_VERSION" == "18.04"* ]]; then |  | ||||||
|     cmake3="cmake=3.10*" |  | ||||||
|     maybe_libiomp_dev="libiomp-dev" |  | ||||||
|   elif [[ "$UBUNTU_VERSION" == "20.04"* ]]; then |  | ||||||
|     cmake3="cmake=3.16*" |  | ||||||
|     maybe_libiomp_dev="" |  | ||||||
|   else |  | ||||||
|     cmake3="cmake=3.5*" |  | ||||||
|     maybe_libiomp_dev="libiomp-dev" |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # TODO: Remove this once nvidia package repos are back online |  | ||||||
|   # Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968 |  | ||||||
|   # shellcheck disable=SC2046 |  | ||||||
|   sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list") |  | ||||||
|  |  | ||||||
|   # Install common dependencies |  | ||||||
|   apt-get update |  | ||||||
|   # TODO: Some of these may not be necessary |  | ||||||
|   ccache_deps="asciidoc docbook-xml docbook-xsl xsltproc" |  | ||||||
|   numpy_deps="gfortran" |  | ||||||
|   apt-get install -y --no-install-recommends \ |  | ||||||
|     $ccache_deps \ |  | ||||||
|     $numpy_deps \ |  | ||||||
|     ${cmake3} \ |  | ||||||
|     apt-transport-https \ |  | ||||||
|     autoconf \ |  | ||||||
|     automake \ |  | ||||||
|     build-essential \ |  | ||||||
|     ca-certificates \ |  | ||||||
|     curl \ |  | ||||||
|     git \ |  | ||||||
|     libatlas-base-dev \ |  | ||||||
|     libc6-dbg \ |  | ||||||
|     ${maybe_libiomp_dev} \ |  | ||||||
|     libyaml-dev \ |  | ||||||
|     libz-dev \ |  | ||||||
|     libjpeg-dev \ |  | ||||||
|     libasound2-dev \ |  | ||||||
|     libsndfile-dev \ |  | ||||||
|     software-properties-common \ |  | ||||||
|     wget \ |  | ||||||
|     sudo \ |  | ||||||
|     vim |  | ||||||
|  |  | ||||||
|   # Should resolve issues related to various apt package repository cert issues |  | ||||||
|   # see: https://github.com/pytorch/pytorch/issues/65931 |  | ||||||
|   apt-get install -y libgnutls30 |  | ||||||
|  |  | ||||||
|   # Cleanup package manager |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_centos() { |  | ||||||
|   # Need EPEL for many packages we depend on. |  | ||||||
|   # See http://fedoraproject.org/wiki/EPEL |  | ||||||
|   yum --enablerepo=extras install -y epel-release |  | ||||||
|  |  | ||||||
|   ccache_deps="asciidoc docbook-dtds docbook-style-xsl libxslt" |  | ||||||
|   numpy_deps="gcc-gfortran" |  | ||||||
|   # Note: protobuf-c-{compiler,devel} on CentOS are too old to be used |  | ||||||
|   # for Caffe2. That said, we still install them to make sure the build |  | ||||||
|   # system opts to build/use protoc and libprotobuf from third-party. |  | ||||||
|   yum install -y \ |  | ||||||
|     $ccache_deps \ |  | ||||||
|     $numpy_deps \ |  | ||||||
|     autoconf \ |  | ||||||
|     automake \ |  | ||||||
|     bzip2 \ |  | ||||||
|     cmake \ |  | ||||||
|     cmake3 \ |  | ||||||
|     curl \ |  | ||||||
|     gcc \ |  | ||||||
|     gcc-c++ \ |  | ||||||
|     gflags-devel \ |  | ||||||
|     git \ |  | ||||||
|     glibc-devel \ |  | ||||||
|     glibc-headers \ |  | ||||||
|     glog-devel \ |  | ||||||
|     hiredis-devel \ |  | ||||||
|     libstdc++-devel \ |  | ||||||
|     libsndfile-devel \ |  | ||||||
|     make \ |  | ||||||
|     opencv-devel \ |  | ||||||
|     sudo \ |  | ||||||
|     wget \ |  | ||||||
|     vim |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   yum clean all |  | ||||||
|   rm -rf /var/cache/yum |  | ||||||
|   rm -rf /var/lib/yum/yumdb |  | ||||||
|   rm -rf /var/lib/yum/history |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Install base packages depending on the base OS |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     install_ubuntu |  | ||||||
|     ;; |  | ||||||
|   centos) |  | ||||||
|     install_centos |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| # Install Valgrind separately since the apt-get version is too old. |  | ||||||
| mkdir valgrind_build && cd valgrind_build |  | ||||||
| VALGRIND_VERSION=3.16.1 |  | ||||||
| wget https://ossci-linux.s3.amazonaws.com/valgrind-${VALGRIND_VERSION}.tar.bz2 |  | ||||||
| tar -xjf valgrind-${VALGRIND_VERSION}.tar.bz2 |  | ||||||
| cd valgrind-${VALGRIND_VERSION} |  | ||||||
| ./configure --prefix=/usr/local |  | ||||||
| make -j6 |  | ||||||
| sudo make install |  | ||||||
| cd ../../ |  | ||||||
| rm -rf valgrind_build |  | ||||||
| alias valgrind="/usr/local/bin/valgrind" |  | ||||||
| @ -1,117 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| install_ubuntu() { |  | ||||||
|   echo "Preparing to build sccache from source" |  | ||||||
|   apt-get update |  | ||||||
|   apt-get install -y cargo pkg-config libssl-dev |  | ||||||
|   echo "Checking out sccache repo" |  | ||||||
|   git clone https://github.com/pytorch/sccache |  | ||||||
|   cd sccache |  | ||||||
|   echo "Building sccache" |  | ||||||
|   cargo build --release |  | ||||||
|   cp target/release/sccache /opt/cache/bin |  | ||||||
|   echo "Cleaning up" |  | ||||||
|   cd .. |  | ||||||
|   rm -rf sccache |  | ||||||
|   apt-get remove -y cargo rustc |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_binary() { |  | ||||||
|   echo "Downloading sccache binary from S3 repo" |  | ||||||
|   curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /opt/cache/bin/sccache |  | ||||||
| } |  | ||||||
|  |  | ||||||
| mkdir -p /opt/cache/bin |  | ||||||
| mkdir -p /opt/cache/lib |  | ||||||
| sed -e 's|PATH="\(.*\)"|PATH="/opt/cache/bin:\1"|g' -i /etc/environment |  | ||||||
| export PATH="/opt/cache/bin:$PATH" |  | ||||||
|  |  | ||||||
| # Setup compiler cache |  | ||||||
| if [ -n "$ROCM_VERSION" ]; then |  | ||||||
|   curl --retry 3 http://repo.radeon.com/misc/.sccache_amd/sccache -o /opt/cache/bin/sccache |  | ||||||
| else |  | ||||||
|   ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
|   case "$ID" in |  | ||||||
|     ubuntu) |  | ||||||
|       install_ubuntu |  | ||||||
|       ;; |  | ||||||
|     *) |  | ||||||
|       install_binary |  | ||||||
|       ;; |  | ||||||
|   esac |  | ||||||
| fi |  | ||||||
| chmod a+x /opt/cache/bin/sccache |  | ||||||
|  |  | ||||||
| function write_sccache_stub() { |  | ||||||
|   printf "#!/bin/sh\nif [ \$(ps -p \$PPID -o comm=) != sccache ]; then\n  exec sccache $(which $1) \"\$@\"\nelse\n  exec $(which $1) \"\$@\"\nfi" > "/opt/cache/bin/$1" |  | ||||||
|   chmod a+x "/opt/cache/bin/$1" |  | ||||||
| } |  | ||||||
|  |  | ||||||
| write_sccache_stub cc |  | ||||||
| write_sccache_stub c++ |  | ||||||
| write_sccache_stub gcc |  | ||||||
| write_sccache_stub g++ |  | ||||||
|  |  | ||||||
| # NOTE: See specific ROCM_VERSION case below. |  | ||||||
| if [ "x$ROCM_VERSION" = x ]; then |  | ||||||
|   write_sccache_stub clang |  | ||||||
|   write_sccache_stub clang++ |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "$CUDA_VERSION" ]; then |  | ||||||
|   # TODO: This is a workaround for the fact that PyTorch's FindCUDA |  | ||||||
|   # implementation cannot find nvcc if it is setup this way, because it |  | ||||||
|   # appears to search for the nvcc in PATH, and use its path to infer |  | ||||||
|   # where CUDA is installed.  Instead, we install an nvcc symlink outside |  | ||||||
|   # of the PATH, and set CUDA_NVCC_EXECUTABLE so that we make use of it. |  | ||||||
|  |  | ||||||
|   write_sccache_stub nvcc |  | ||||||
|   mv /opt/cache/bin/nvcc /opt/cache/lib/ |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "$ROCM_VERSION" ]; then |  | ||||||
|   # ROCm compiler is hcc or clang. However, it is commonly invoked via hipcc wrapper. |  | ||||||
|   # hipcc will call either hcc or clang using an absolute path starting with /opt/rocm, |  | ||||||
|   # causing the /opt/cache/bin to be skipped. We must create the sccache wrappers |  | ||||||
|   # directly under /opt/rocm while also preserving the original compiler names. |  | ||||||
|   # Note symlinks will chain as follows: [hcc or clang++] -> clang -> clang-?? |  | ||||||
|   # Final link in symlink chain must point back to original directory. |  | ||||||
|  |  | ||||||
|   # Original compiler is moved one directory deeper. Wrapper replaces it. |  | ||||||
|   function write_sccache_stub_rocm() { |  | ||||||
|     OLDCOMP=$1 |  | ||||||
|     COMPNAME=$(basename $OLDCOMP) |  | ||||||
|     TOPDIR=$(dirname $OLDCOMP) |  | ||||||
|     WRAPPED="$TOPDIR/original/$COMPNAME" |  | ||||||
|     mv "$OLDCOMP" "$WRAPPED" |  | ||||||
|     printf "#!/bin/sh\nexec sccache $WRAPPED \"\$@\"" > "$OLDCOMP" |  | ||||||
|     chmod a+x "$OLDCOMP" |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   if [[ -e "/opt/rocm/hcc/bin/hcc" ]]; then |  | ||||||
|     # ROCm 3.3 or earlier. |  | ||||||
|     mkdir /opt/rocm/hcc/bin/original |  | ||||||
|     write_sccache_stub_rocm /opt/rocm/hcc/bin/hcc |  | ||||||
|     write_sccache_stub_rocm /opt/rocm/hcc/bin/clang |  | ||||||
|     write_sccache_stub_rocm /opt/rocm/hcc/bin/clang++ |  | ||||||
|     # Fix last link in symlink chain, clang points to versioned clang in prior dir |  | ||||||
|     pushd /opt/rocm/hcc/bin/original |  | ||||||
|     ln -s ../$(readlink clang) |  | ||||||
|     popd |  | ||||||
|   elif [[ -e "/opt/rocm/llvm/bin/clang" ]]; then |  | ||||||
|     # ROCm 3.5 and beyond. |  | ||||||
|     mkdir /opt/rocm/llvm/bin/original |  | ||||||
|     write_sccache_stub_rocm /opt/rocm/llvm/bin/clang |  | ||||||
|     write_sccache_stub_rocm /opt/rocm/llvm/bin/clang++ |  | ||||||
|     # Fix last link in symlink chain, clang points to versioned clang in prior dir |  | ||||||
|     pushd /opt/rocm/llvm/bin/original |  | ||||||
|     ln -s ../$(readlink clang) |  | ||||||
|     popd |  | ||||||
|   else |  | ||||||
|     echo "Cannot find ROCm compiler." |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
| @ -1,44 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| if [ -n "$CLANG_VERSION" ]; then |  | ||||||
|  |  | ||||||
|   if [[ $CLANG_VERSION == 7 && $UBUNTU_VERSION == 16.04 ]]; then |  | ||||||
|     wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - |  | ||||||
|     sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main" |  | ||||||
|   elif [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then |  | ||||||
|     sudo apt-get update |  | ||||||
|     # gpg-agent is not available by default on 18.04 |  | ||||||
|     sudo apt-get install  -y --no-install-recommends gpg-agent |  | ||||||
|     wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add  - |  | ||||||
|     apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main" |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   sudo apt-get update |  | ||||||
|   apt-get install -y --no-install-recommends clang-"$CLANG_VERSION" |  | ||||||
|   apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION" |  | ||||||
|  |  | ||||||
|   # Install dev version of LLVM. |  | ||||||
|   if [ -n "$LLVMDEV" ]; then |  | ||||||
|     sudo apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"-dev |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # Use update-alternatives to make this version the default |  | ||||||
|   # TODO: Decide if overriding gcc as well is a good idea |  | ||||||
|   # update-alternatives --install /usr/bin/gcc gcc /usr/bin/clang-"$CLANG_VERSION" 50 |  | ||||||
|   # update-alternatives --install /usr/bin/g++ g++ /usr/bin/clang++-"$CLANG_VERSION" 50 |  | ||||||
|   update-alternatives --install /usr/bin/clang clang /usr/bin/clang-"$CLANG_VERSION" 50 |  | ||||||
|   update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-"$CLANG_VERSION" 50 |  | ||||||
|  |  | ||||||
|   # clang's packaging is a little messed up (the runtime libs aren't |  | ||||||
|   # added into the linker path), so give it a little help |  | ||||||
|   clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux") |  | ||||||
|   echo "$clang_lib" > /etc/ld.so.conf.d/clang.conf |  | ||||||
|   ldconfig |  | ||||||
|  |  | ||||||
|   # Cleanup package manager |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
|  |  | ||||||
| fi |  | ||||||
| @ -1,19 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "$CMAKE_VERSION" ] |  | ||||||
|  |  | ||||||
| # Remove system cmake install so it won't get used instead |  | ||||||
| apt-get remove cmake -y |  | ||||||
|  |  | ||||||
| # Turn 3.6.3 into v3.6 |  | ||||||
| path=$(echo "${CMAKE_VERSION}" | sed -e 's/\([0-9].[0-9]\+\).*/v\1/') |  | ||||||
| file="cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz" |  | ||||||
|  |  | ||||||
| # Download and install specific CMake version in /usr/local |  | ||||||
| pushd /tmp |  | ||||||
| curl -Os --retry 3 "https://cmake.org/files/${path}/${file}" |  | ||||||
| tar -C /usr/local --strip-components 1 --no-same-owner -zxf cmake-*.tar.gz |  | ||||||
| rm -f cmake-*.tar.gz |  | ||||||
| popd |  | ||||||
| @ -1,112 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # Optionally install conda |  | ||||||
| if [ -n "$ANACONDA_PYTHON_VERSION" ]; then |  | ||||||
|   BASE_URL="https://repo.anaconda.com/miniconda" |  | ||||||
|  |  | ||||||
|   MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1) |  | ||||||
|  |  | ||||||
|   case "$MAJOR_PYTHON_VERSION" in |  | ||||||
|     2) |  | ||||||
|       CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh" |  | ||||||
|     ;; |  | ||||||
|     3) |  | ||||||
|       CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh" |  | ||||||
|     ;; |  | ||||||
|     *) |  | ||||||
|       echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION" |  | ||||||
|       exit 1 |  | ||||||
|       ;; |  | ||||||
|   esac |  | ||||||
|  |  | ||||||
|   mkdir -p /opt/conda |  | ||||||
|   chown jenkins:jenkins /opt/conda |  | ||||||
|  |  | ||||||
|   # Work around bug where devtoolset replaces sudo and breaks it. |  | ||||||
|   if [ -n "$DEVTOOLSET_VERSION" ]; then |  | ||||||
|     SUDO=/bin/sudo |  | ||||||
|   else |  | ||||||
|     SUDO=sudo |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   as_jenkins() { |  | ||||||
|     # NB: unsetting the environment variables works around a conda bug |  | ||||||
|     # https://github.com/conda/conda/issues/6576 |  | ||||||
|     # NB: Pass on PATH and LD_LIBRARY_PATH to sudo invocation |  | ||||||
|     # NB: This must be run from a directory that jenkins has access to, |  | ||||||
|     # works around https://github.com/conda/conda-package-handling/pull/34 |  | ||||||
|     $SUDO -H -u jenkins env -u SUDO_UID -u SUDO_GID -u SUDO_COMMAND -u SUDO_USER env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $* |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   pushd /tmp |  | ||||||
|   wget -q "${BASE_URL}/${CONDA_FILE}" |  | ||||||
|   chmod +x "${CONDA_FILE}" |  | ||||||
|   as_jenkins ./"${CONDA_FILE}" -b -f -p "/opt/conda" |  | ||||||
|   popd |  | ||||||
|  |  | ||||||
|   # NB: Don't do this, rely on the rpath to get it right |  | ||||||
|   #echo "/opt/conda/lib" > /etc/ld.so.conf.d/conda-python.conf |  | ||||||
|   #ldconfig |  | ||||||
|   sed -e 's|PATH="\(.*\)"|PATH="/opt/conda/bin:\1"|g' -i /etc/environment |  | ||||||
|   export PATH="/opt/conda/bin:$PATH" |  | ||||||
|  |  | ||||||
|   # Ensure we run conda in a directory that jenkins has write access to |  | ||||||
|   pushd /opt/conda |  | ||||||
|  |  | ||||||
|   # Track latest conda update |  | ||||||
|   as_jenkins conda update -y -n base conda |  | ||||||
|  |  | ||||||
|   # Install correct Python version |  | ||||||
|   as_jenkins conda install -y python="$ANACONDA_PYTHON_VERSION" |  | ||||||
|  |  | ||||||
|   conda_install() { |  | ||||||
|     # Ensure that the install command don't upgrade/downgrade Python |  | ||||||
|     # This should be called as |  | ||||||
|     #   conda_install pkg1 pkg2 ... [-c channel] |  | ||||||
|     as_jenkins conda install -q -y python="$ANACONDA_PYTHON_VERSION" $* |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   pip_install() { |  | ||||||
|     as_jenkins pip install --progress-bar off $* |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   # Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README |  | ||||||
|   # DO NOT install cmake here as it would install a version newer than 3.10, but |  | ||||||
|   # we want to pin to version 3.10. |  | ||||||
|   if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then |  | ||||||
|     # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source |  | ||||||
|     conda_install numpy=1.19.2 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0 |  | ||||||
|   elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then |  | ||||||
|     # Install llvm-8 as it is required to compile llvmlite-0.30.0 from source |  | ||||||
|     conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0 |  | ||||||
|   elif [ "$ANACONDA_PYTHON_VERSION" = "3.7" ]; then |  | ||||||
|     # DO NOT install dataclasses if installing python-3.7, since its part of python-3.7 core packages |  | ||||||
|     conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six typing_extensions |  | ||||||
|   else |  | ||||||
|     conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six dataclasses typing_extensions |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # Magma package names are concatenation of CUDA major and minor ignoring revision |  | ||||||
|   # I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89 |  | ||||||
|   if [ -n "$CUDA_VERSION" ]; then |  | ||||||
|     conda_install magma-cuda$(TMP=${CUDA_VERSION/./};echo ${TMP%.*[0-9]}) -c pytorch |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # TODO: This isn't working atm |  | ||||||
|   conda_install nnpack -c killeent |  | ||||||
|  |  | ||||||
|   # Install some other packages, including those needed for Python test reporting |  | ||||||
|   pip_install -r /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
|   # Update scikit-learn to a python-3.8 compatible version |  | ||||||
|   if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then |  | ||||||
|     pip_install -U scikit-learn |  | ||||||
|   else |  | ||||||
|     # Pinned scikit-learn due to https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5 only) |  | ||||||
|     pip_install scikit-learn==0.20.3 |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   popd |  | ||||||
| fi |  | ||||||
| @ -1,18 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| if [[ ${CUDNN_VERSION} == 8 ]]; then |  | ||||||
|     # cuDNN license: https://developer.nvidia.com/cudnn/license_agreement |  | ||||||
|     mkdir tmp_cudnn && cd tmp_cudnn |  | ||||||
|     CUDNN_NAME="cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive" |  | ||||||
|     curl -OLs  https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/${CUDNN_NAME}.tar.xz |  | ||||||
|     tar xf ${CUDNN_NAME}.tar.xz |  | ||||||
|     cp -a ${CUDNN_NAME}/include/* /usr/include/ |  | ||||||
|     cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/ |  | ||||||
|     cp -a ${CUDNN_NAME}/include/* /usr/include/x86_64-linux-gnu/ |  | ||||||
|  |  | ||||||
|     cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/ |  | ||||||
|     cp -a ${CUDNN_NAME}/lib/* /usr/lib/x86_64-linux-gnu/ |  | ||||||
|     cd .. |  | ||||||
|     rm -rf tmp_cudnn |  | ||||||
|     ldconfig |  | ||||||
| fi |  | ||||||
| @ -1,49 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| install_ubuntu() { |  | ||||||
|   apt-get update |  | ||||||
|   apt-get install -y --no-install-recommends \ |  | ||||||
|           libhiredis-dev \ |  | ||||||
|           libleveldb-dev \ |  | ||||||
|           liblmdb-dev \ |  | ||||||
|           libsnappy-dev |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_centos() { |  | ||||||
|   # Need EPEL for many packages we depend on. |  | ||||||
|   # See http://fedoraproject.org/wiki/EPEL |  | ||||||
|   yum --enablerepo=extras install -y epel-release |  | ||||||
|  |  | ||||||
|   yum install -y \ |  | ||||||
|       hiredis-devel \ |  | ||||||
|       leveldb-devel \ |  | ||||||
|       lmdb-devel \ |  | ||||||
|       snappy-devel |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   yum clean all |  | ||||||
|   rm -rf /var/cache/yum |  | ||||||
|   rm -rf /var/lib/yum/yumdb |  | ||||||
|   rm -rf /var/lib/yum/history |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Install base packages depending on the base OS |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     install_ubuntu |  | ||||||
|     ;; |  | ||||||
|   centos) |  | ||||||
|     install_centos |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
| @ -1,10 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "$DEVTOOLSET_VERSION" ] |  | ||||||
|  |  | ||||||
| yum install -y centos-release-scl |  | ||||||
| yum install -y devtoolset-$DEVTOOLSET_VERSION |  | ||||||
|  |  | ||||||
| echo "source scl_source enable devtoolset-$DEVTOOLSET_VERSION" > "/etc/profile.d/devtoolset-$DEVTOOLSET_VERSION.sh" |  | ||||||
| @ -1,27 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| if [ -n "$GCC_VERSION" ]; then |  | ||||||
|  |  | ||||||
|   # Need the official toolchain repo to get alternate packages |  | ||||||
|   add-apt-repository ppa:ubuntu-toolchain-r/test |  | ||||||
|   apt-get update |  | ||||||
|   if [[ "$UBUNTU_VERSION" == "16.04" && "${GCC_VERSION:0:1}" == "5" ]]; then |  | ||||||
|     apt-get install -y g++-5=5.4.0-6ubuntu1~16.04.12 |  | ||||||
|     update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 |  | ||||||
|     update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 50 |  | ||||||
|     update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-5 50 |  | ||||||
|   else |  | ||||||
|     apt-get install -y g++-$GCC_VERSION |  | ||||||
|     update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50 |  | ||||||
|     update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50 |  | ||||||
|     update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50 |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
|   # Cleanup package manager |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
|  |  | ||||||
| fi |  | ||||||
| @ -1,34 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "$GLIBC_VERSION" ] |  | ||||||
| if [[ -n "$CENTOS_VERSION" ]]; then |  | ||||||
|   [ -n "$DEVTOOLSET_VERSION" ] |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| yum install -y wget sed |  | ||||||
|  |  | ||||||
| mkdir -p /packages && cd /packages |  | ||||||
| wget -q http://ftp.gnu.org/gnu/glibc/glibc-$GLIBC_VERSION.tar.gz |  | ||||||
| tar xzf glibc-$GLIBC_VERSION.tar.gz |  | ||||||
| if [[ "$GLIBC_VERSION" == "2.26" ]]; then |  | ||||||
|   cd glibc-$GLIBC_VERSION |  | ||||||
|   sed -i 's/$name ne "nss_test1"/$name ne "nss_test1" \&\& $name ne "nss_test2"/' scripts/test-installation.pl |  | ||||||
|   cd .. |  | ||||||
| fi |  | ||||||
| mkdir -p glibc-$GLIBC_VERSION-build && cd glibc-$GLIBC_VERSION-build |  | ||||||
|  |  | ||||||
| if [[ -n "$CENTOS_VERSION" ]]; then |  | ||||||
|   export PATH=/opt/rh/devtoolset-$DEVTOOLSET_VERSION/root/usr/bin:$PATH |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| ../glibc-$GLIBC_VERSION/configure --prefix=/usr CFLAGS='-Wno-stringop-truncation -Wno-format-overflow -Wno-restrict -Wno-format-truncation -g -O2' |  | ||||||
| make -j$(nproc) |  | ||||||
| make install |  | ||||||
|  |  | ||||||
| # Cleanup |  | ||||||
| rm -rf /packages |  | ||||||
| rm -rf /var/cache/yum/* |  | ||||||
| rm -rf /var/lib/rpm/__db.* |  | ||||||
| yum clean all |  | ||||||
| @ -1,6 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| mkdir -p /usr/local/include |  | ||||||
| cp jni.h /usr/local/include |  | ||||||
| @ -1,23 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| if [ -n "$KATEX" ]; then |  | ||||||
|   apt-get update |  | ||||||
|   # Ignore error if gpg-agent doesn't exist (for Ubuntu 16.04) |  | ||||||
|   apt-get install -y gpg-agent || : |  | ||||||
|  |  | ||||||
|   curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash - |  | ||||||
|   sudo apt-get install -y nodejs |  | ||||||
|  |  | ||||||
|   curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - |  | ||||||
|   echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list |  | ||||||
|  |  | ||||||
|   apt-get update |  | ||||||
|   apt-get install -y --no-install-recommends yarn |  | ||||||
|   yarn global add katex --prefix /usr/local |  | ||||||
|  |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
|  |  | ||||||
| fi |  | ||||||
| @ -1,8 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| git clone --branch v1.15 https://github.com/linux-test-project/lcov.git |  | ||||||
| pushd lcov |  | ||||||
| sudo make install   # will be installed in /usr/local/bin/lcov |  | ||||||
| popd |  | ||||||
| @ -1,13 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "$NINJA_VERSION" ] |  | ||||||
|  |  | ||||||
| url="https://github.com/ninja-build/ninja/releases/download/v${NINJA_VERSION}/ninja-linux.zip" |  | ||||||
|  |  | ||||||
| pushd /tmp |  | ||||||
| wget --no-verbose --output-document=ninja-linux.zip "$url" |  | ||||||
| unzip ninja-linux.zip -d /usr/local/bin |  | ||||||
| rm -f ninja-linux.zip |  | ||||||
| popd |  | ||||||
| @ -1,10 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| sudo apt-get update |  | ||||||
| # also install ssh to avoid error of: |  | ||||||
| # -------------------------------------------------------------------------- |  | ||||||
| # The value of the MCA parameter "plm_rsh_agent" was set to a path |  | ||||||
| # that could not be found: |  | ||||||
| #   plm_rsh_agent: ssh : rsh |  | ||||||
| sudo apt-get install -y ssh |  | ||||||
| sudo apt-get install -y --allow-downgrades --allow-change-held-packages openmpi-bin libopenmpi-dev |  | ||||||
| @ -1,14 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| OPENSSL=openssl-1.1.1k |  | ||||||
|  |  | ||||||
| wget -q -O "${OPENSSL}.tar.gz" "https://ossci-linux.s3.amazonaws.com/${OPENSSL}.tar.gz" |  | ||||||
| tar xf "${OPENSSL}.tar.gz" |  | ||||||
| cd "${OPENSSL}" |  | ||||||
| ./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)' |  | ||||||
| # NOTE: openssl install errors out when built with the -j option |  | ||||||
| make -j6; make install_sw |  | ||||||
| cd .. |  | ||||||
| rm -rf "${OPENSSL}" |  | ||||||
| @ -1,56 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # This function installs protobuf 3.17 |  | ||||||
| install_protobuf_317() { |  | ||||||
|   pb_dir="/usr/temp_pb_install_dir" |  | ||||||
|   mkdir -p $pb_dir |  | ||||||
|  |  | ||||||
|   # On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or |  | ||||||
|   # else it will fail with |  | ||||||
|   #   g++: error: ./../lib64/crti.o: No such file or directory |  | ||||||
|   ln -s /usr/lib64 "$pb_dir/lib64" |  | ||||||
|  |  | ||||||
|   curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz" |  | ||||||
|   tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz |  | ||||||
|   # -j6 to balance memory usage and speed. |  | ||||||
|   # naked `-j` seems to use too much memory. |  | ||||||
|   pushd "$pb_dir" && ./configure && make -j6 && make -j6 check && sudo make -j6 install && sudo ldconfig |  | ||||||
|   popd |  | ||||||
|   rm -rf $pb_dir |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_ubuntu() { |  | ||||||
|   # Ubuntu 14.04 has cmake 2.8.12 as the default option, so we will |  | ||||||
|   # install cmake3 here and use cmake3. |  | ||||||
|   apt-get update |  | ||||||
|   if [[ "$UBUNTU_VERSION" == 14.04 ]]; then |  | ||||||
|     apt-get install -y --no-install-recommends cmake3 |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
|  |  | ||||||
|   install_protobuf_317 |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_centos() { |  | ||||||
|   install_protobuf_317 |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Install base packages depending on the base OS |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     install_ubuntu |  | ||||||
|     ;; |  | ||||||
|   centos) |  | ||||||
|     install_centos |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
| @ -1,160 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| install_magma() { |  | ||||||
|     # "install" hipMAGMA into /opt/rocm/magma by copying after build |  | ||||||
|     git clone https://bitbucket.org/icl/magma.git |  | ||||||
|     pushd magma |  | ||||||
|     # Fixes memory leaks of magma found while executing linalg UTs |  | ||||||
|     git checkout 5959b8783e45f1809812ed96ae762f38ee701972 |  | ||||||
|     cp make.inc-examples/make.inc.hip-gcc-mkl make.inc |  | ||||||
|     echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc |  | ||||||
|     echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc |  | ||||||
|     echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc |  | ||||||
|     export PATH="${PATH}:/opt/rocm/bin" |  | ||||||
|     if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then |  | ||||||
|       amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'` |  | ||||||
|     else |  | ||||||
|       amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs` |  | ||||||
|     fi |  | ||||||
|     for arch in $amdgpu_targets; do |  | ||||||
|       echo "DEVCCFLAGS += --amdgpu-target=$arch" >> make.inc |  | ||||||
|     done |  | ||||||
|     # hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition |  | ||||||
|     sed -i 's/^FOPENMP/#FOPENMP/g' make.inc |  | ||||||
|     make -f make.gen.hipMAGMA -j $(nproc) |  | ||||||
|     LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda |  | ||||||
|     make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda |  | ||||||
|     popd |  | ||||||
|     mv magma /opt/rocm |  | ||||||
| } |  | ||||||
|  |  | ||||||
| ver() { |  | ||||||
|     printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' '); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Map ROCm version to AMDGPU version |  | ||||||
| declare -A AMDGPU_VERSIONS=( ["4.5.2"]="21.40.2" ["5.0"]="21.50" ["5.1.1"]="22.10.1" ) |  | ||||||
|  |  | ||||||
| install_ubuntu() { |  | ||||||
|     apt-get update |  | ||||||
|     if [[ $UBUNTU_VERSION == 18.04 ]]; then |  | ||||||
|       # gpg-agent is not available by default on 18.04 |  | ||||||
|       apt-get install -y --no-install-recommends gpg-agent |  | ||||||
|     fi |  | ||||||
|     if [[ $UBUNTU_VERSION == 20.04 ]]; then |  | ||||||
|       # gpg-agent is not available by default on 20.04 |  | ||||||
|       apt-get install -y --no-install-recommends gpg-agent |  | ||||||
|     fi |  | ||||||
|     apt-get install -y kmod |  | ||||||
|     apt-get install -y wget |  | ||||||
|  |  | ||||||
|     # Need the libc++1 and libc++abi1 libraries to allow torch._C to load at runtime |  | ||||||
|     apt-get install -y libc++1 |  | ||||||
|     apt-get install -y libc++abi1 |  | ||||||
|  |  | ||||||
|     if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then |  | ||||||
|         # Add amdgpu repository |  | ||||||
|         UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'` |  | ||||||
|         local amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/ubuntu" |  | ||||||
|         echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     ROCM_REPO="ubuntu" |  | ||||||
|     if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then |  | ||||||
|         ROCM_REPO="xenial" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     # Add rocm repository |  | ||||||
|     wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - |  | ||||||
|     local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}" |  | ||||||
|     echo "deb [arch=amd64] ${rocm_baseurl} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list |  | ||||||
|     apt-get update --allow-insecure-repositories |  | ||||||
|  |  | ||||||
|     DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ |  | ||||||
|                    rocm-dev \ |  | ||||||
|                    rocm-utils \ |  | ||||||
|                    rocm-libs \ |  | ||||||
|                    rccl \ |  | ||||||
|                    rocprofiler-dev \ |  | ||||||
|                    roctracer-dev |  | ||||||
|  |  | ||||||
|     # precompiled miopen kernels added in ROCm 3.5; search for all unversioned packages |  | ||||||
|     # if search fails it will abort this script; use true to avoid case where search fails |  | ||||||
|     MIOPENKERNELS=$(apt-cache search --names-only miopenkernels | awk '{print $1}' | grep -F -v . || true) |  | ||||||
|     if [[ "x${MIOPENKERNELS}" = x ]]; then |  | ||||||
|       echo "miopenkernels package not available" |  | ||||||
|     else |  | ||||||
|       DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS} |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     install_magma |  | ||||||
|  |  | ||||||
|     # Cleanup |  | ||||||
|     apt-get autoclean && apt-get clean |  | ||||||
|     rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_centos() { |  | ||||||
|  |  | ||||||
|   yum update -y |  | ||||||
|   yum install -y kmod |  | ||||||
|   yum install -y wget |  | ||||||
|   yum install -y openblas-devel |  | ||||||
|  |  | ||||||
|   yum install -y epel-release |  | ||||||
|   yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r` |  | ||||||
|  |  | ||||||
|   if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then |  | ||||||
|       # Add amdgpu repository |  | ||||||
|       local amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/7.9/main/x86_64" |  | ||||||
|       echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo |  | ||||||
|       echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|       echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|       echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|       echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|       echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}" |  | ||||||
|   echo "[ROCm]" > /etc/yum.repos.d/rocm.repo |  | ||||||
|   echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo |  | ||||||
|   echo "baseurl=${rocm_baseurl}" >> /etc/yum.repos.d/rocm.repo |  | ||||||
|   echo "enabled=1" >> /etc/yum.repos.d/rocm.repo |  | ||||||
|   echo "gpgcheck=1" >> /etc/yum.repos.d/rocm.repo |  | ||||||
|   echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/rocm.repo |  | ||||||
|  |  | ||||||
|   yum update -y |  | ||||||
|  |  | ||||||
|   yum install -y \ |  | ||||||
|                    rocm-dev \ |  | ||||||
|                    rocm-utils \ |  | ||||||
|                    rocm-libs \ |  | ||||||
|                    rccl \ |  | ||||||
|                    rocprofiler-dev \ |  | ||||||
|                    roctracer-dev |  | ||||||
|  |  | ||||||
|   install_magma |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   yum clean all |  | ||||||
|   rm -rf /var/cache/yum |  | ||||||
|   rm -rf /var/lib/yum/yumdb |  | ||||||
|   rm -rf /var/lib/yum/history |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Install Python packages depending on the base OS |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     install_ubuntu |  | ||||||
|     ;; |  | ||||||
|   centos) |  | ||||||
|     install_centos |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
| @ -1,24 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "${SWIFTSHADER}" ] |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| _https_amazon_aws=https://ossci-android.s3.amazonaws.com |  | ||||||
|  |  | ||||||
| # SwiftShader |  | ||||||
| _swiftshader_dir=/var/lib/jenkins/swiftshader |  | ||||||
| _swiftshader_file_targz=swiftshader-abe07b943-prebuilt.tar.gz |  | ||||||
| mkdir -p $_swiftshader_dir |  | ||||||
| _tmp_swiftshader_targz="/tmp/${_swiftshader_file_targz}" |  | ||||||
|  |  | ||||||
| curl --silent --show-error --location --fail --retry 3 \ |  | ||||||
|   --output "${_tmp_swiftshader_targz}" "$_https_amazon_aws/${_swiftshader_file_targz}" |  | ||||||
|  |  | ||||||
| tar -C "${_swiftshader_dir}" -xzf "${_tmp_swiftshader_targz}" |  | ||||||
|  |  | ||||||
| export VK_ICD_FILENAMES="${_swiftshader_dir}/build/Linux/vk_swiftshader_icd.json" |  | ||||||
| @ -1,14 +0,0 @@ | |||||||
| apt-get update |  | ||||||
| apt-get install -y sudo wget libboost-dev libboost-test-dev libboost-program-options-dev libboost-filesystem-dev libboost-thread-dev libevent-dev automake libtool flex bison pkg-config g++ libssl-dev |  | ||||||
| wget https://www-us.apache.org/dist/thrift/0.12.0/thrift-0.12.0.tar.gz |  | ||||||
| tar -xvf thrift-0.12.0.tar.gz |  | ||||||
| cd thrift-0.12.0 |  | ||||||
| for file in ./compiler/cpp/Makefile*; do |  | ||||||
|   sed -i 's/\-Werror//' $file |  | ||||||
| done |  | ||||||
| ./bootstrap.sh |  | ||||||
| ./configure --without-php --without-java --without-python --without-nodejs --without-go --without-ruby |  | ||||||
| sudo make |  | ||||||
| sudo make install |  | ||||||
| cd .. |  | ||||||
| rm thrift-0.12.0.tar.gz |  | ||||||
| @ -1,26 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # Mirror jenkins user in container |  | ||||||
| # jenkins user as ec2-user should have the same user-id |  | ||||||
| echo "jenkins:x:1000:1000::/var/lib/jenkins:" >> /etc/passwd |  | ||||||
| echo "jenkins:x:1000:" >> /etc/group |  | ||||||
| # Needed on focal or newer |  | ||||||
| echo "jenkins:*:19110:0:99999:7:::" >>/etc/shadow |  | ||||||
|  |  | ||||||
| # Create $HOME |  | ||||||
| mkdir -p /var/lib/jenkins |  | ||||||
| chown jenkins:jenkins /var/lib/jenkins |  | ||||||
| mkdir -p /var/lib/jenkins/.ccache |  | ||||||
| chown jenkins:jenkins /var/lib/jenkins/.ccache |  | ||||||
|  |  | ||||||
| # Allow writing to /usr/local (for make install) |  | ||||||
| chown jenkins:jenkins /usr/local |  | ||||||
|  |  | ||||||
| # Allow sudo |  | ||||||
| # TODO: Maybe we shouldn't |  | ||||||
| echo 'jenkins ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/jenkins |  | ||||||
|  |  | ||||||
| # Test that sudo works |  | ||||||
| sudo -u jenkins sudo -v |  | ||||||
| @ -1,45 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| install_ubuntu() { |  | ||||||
|   apt-get update |  | ||||||
|   apt-get install -y --no-install-recommends \ |  | ||||||
|           libopencv-dev \ |  | ||||||
|           libavcodec-dev |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   apt-get autoclean && apt-get clean |  | ||||||
|   rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* |  | ||||||
| } |  | ||||||
|  |  | ||||||
| install_centos() { |  | ||||||
|   # Need EPEL for many packages we depend on. |  | ||||||
|   # See http://fedoraproject.org/wiki/EPEL |  | ||||||
|   yum --enablerepo=extras install -y epel-release |  | ||||||
|  |  | ||||||
|   yum install -y \ |  | ||||||
|       opencv-devel \ |  | ||||||
|       ffmpeg-devel |  | ||||||
|  |  | ||||||
|   # Cleanup |  | ||||||
|   yum clean all |  | ||||||
|   rm -rf /var/cache/yum |  | ||||||
|   rm -rf /var/lib/yum/yumdb |  | ||||||
|   rm -rf /var/lib/yum/history |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Install base packages depending on the base OS |  | ||||||
| ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') |  | ||||||
| case "$ID" in |  | ||||||
|   ubuntu) |  | ||||||
|     install_ubuntu |  | ||||||
|     ;; |  | ||||||
|   centos) |  | ||||||
|     install_centos |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "Unable to determine OS..." |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
| @ -1,24 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| [ -n "${VULKAN_SDK_VERSION}" ] |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| _vulkansdk_dir=/var/lib/jenkins/vulkansdk |  | ||||||
| _tmp_vulkansdk_targz=/tmp/vulkansdk.tar.gz |  | ||||||
|  |  | ||||||
| curl \ |  | ||||||
|   --silent \ |  | ||||||
|   --show-error \ |  | ||||||
|   --location \ |  | ||||||
|   --fail \ |  | ||||||
|   --retry 3 \ |  | ||||||
|   --output "${_tmp_vulkansdk_targz}" "https://ossci-android.s3.amazonaws.com/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz" |  | ||||||
|  |  | ||||||
| mkdir -p "${_vulkansdk_dir}" |  | ||||||
| tar -C "${_vulkansdk_dir}" -xzf "${_tmp_vulkansdk_targz}" --strip-components 1 |  | ||||||
| rm -rf "${_tmp_vulkansdk_targz}" |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,212 +0,0 @@ | |||||||
| # Python dependencies required for unit tests |  | ||||||
|  |  | ||||||
| #awscli==1.6 #this breaks some platforms |  | ||||||
| #Description: AWS command line interface |  | ||||||
| #Pinned versions: 1.6 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| boto3==1.19.12 |  | ||||||
| #Description: AWS SDK for python |  | ||||||
| #Pinned versions: 1.19.12, 1.16.34 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| click |  | ||||||
| #Description: Command Line Interface Creation Kit |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| coremltools==5.0b5 |  | ||||||
| #Description: Apple framework for ML integration |  | ||||||
| #Pinned versions: 5.0b5 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #dataclasses #this breaks some platforms |  | ||||||
| #Description: Provides decorators for auto adding special methods to user classes |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| expecttest==0.1.3 |  | ||||||
| #Description: method for writing tests where test framework auto populates |  | ||||||
| # the expected output based on previous runs |  | ||||||
| #Pinned versions: 0.1.3 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| flatbuffers==2.0 |  | ||||||
| #Description: cross platform serialization library |  | ||||||
| #Pinned versions: 2.0 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #future #this breaks linux-bionic-rocm4.5-py3.7 |  | ||||||
| #Description: compatibility layer between python 2 and python 3 |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| hypothesis==4.53.2 |  | ||||||
| # Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136 |  | ||||||
| #Description: advanced library for generating parametrized tests |  | ||||||
| #Pinned versions: 3.44.6, 4.53.2 |  | ||||||
| #test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py |  | ||||||
|  |  | ||||||
| junitparser==2.1.1 |  | ||||||
| #Description: unitparser handles JUnit/xUnit Result XML files |  | ||||||
| #Pinned versions: 2.1.1 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| librosa>=0.6.2 |  | ||||||
| #Description: A python package for music and audio analysis |  | ||||||
| #Pinned versions: >=0.6.2 |  | ||||||
| #test that import: test_spectral_ops.py |  | ||||||
|  |  | ||||||
| #mkl #this breaks linux-bionic-rocm4.5-py3.7 |  | ||||||
| #Description: Intel oneAPI Math Kernel Library |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_profiler.py, test_public_bindings.py, test_testing.py, |  | ||||||
| #test_nn.py, test_mkldnn.py, test_jit.py, test_fx_experimental.py, |  | ||||||
| #test_autograd.py |  | ||||||
|  |  | ||||||
| #mkl-devel |  | ||||||
| # see mkl |  | ||||||
|  |  | ||||||
| #mock # breaks ci/circleci: docker-pytorch-linux-xenial-py3-clang5-android-ndk-r19c |  | ||||||
| #Description: A testing library that allows you to replace parts of your |  | ||||||
| #system under test with mock objects |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_module_init.py, test_modules.py, test_nn.py, |  | ||||||
| #test_testing.py |  | ||||||
|  |  | ||||||
| #MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8 |  | ||||||
| #Description: collects runtime types of function arguments and return |  | ||||||
| #values, and can automatically generate stub files |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| mypy==0.812 |  | ||||||
| # Pin MyPy version because new errors are likely to appear with each release |  | ||||||
| #Description: linter |  | ||||||
| #Pinned versions: 0.812 |  | ||||||
| #test that import: test_typing.py, test_type_hints.py |  | ||||||
|  |  | ||||||
| #networkx |  | ||||||
| #Description: creation, manipulation, and study of |  | ||||||
| #the structure, dynamics, and functions of complex networks |  | ||||||
| #Pinned versions: 2.0 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #ninja |  | ||||||
| #Description: build system.  Note that it install from |  | ||||||
| #here breaks things so it is commented out |  | ||||||
| #Pinned versions: 1.10.0.post1 |  | ||||||
| #test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py |  | ||||||
|  |  | ||||||
| numba==0.49.0 ; python_version < "3.9" |  | ||||||
| numba==0.54.1 ; python_version == "3.9" |  | ||||||
| #Description: Just-In-Time Compiler for Numerical Functions |  | ||||||
| #Pinned versions: 0.54.1, 0.49.0, <=0.49.1 |  | ||||||
| #test that import: test_numba_integration.py |  | ||||||
| #For numba issue see https://github.com/pytorch/pytorch/issues/51511 |  | ||||||
|  |  | ||||||
| #numpy |  | ||||||
| #Description: Provides N-dimensional arrays and linear algebra |  | ||||||
| #Pinned versions: 1.20 |  | ||||||
| #test that import: test_view_ops.py, test_unary_ufuncs.py, test_type_promotion.py, |  | ||||||
| #test_type_info.py, test_torch.py, test_tensorexpr_pybind.py, test_tensorexpr.py, |  | ||||||
| #test_tensorboard.py, test_tensor_creation_ops.py, test_static_runtime.py, |  | ||||||
| #test_spectral_ops.py, test_sort_and_select.py, test_shape_ops.py, |  | ||||||
| #test_segment_reductions.py, test_reductions.py, test_pruning_op.py, |  | ||||||
| #test_overrides.py, test_numpy_interop.py, test_numba_integration.py |  | ||||||
| #test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py, |  | ||||||
| #test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py, |  | ||||||
| #test_binary_ufuncs.py |  | ||||||
|  |  | ||||||
| #onnxruntime |  | ||||||
| #Description: scoring engine for Open Neural Network Exchange (ONNX) models |  | ||||||
| #Pinned versions: 1.9.0 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #pillow |  | ||||||
| #Description:  Python Imaging Library fork |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #protobuf |  | ||||||
| #Description:  Google’s data interchange format |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_tensorboard.py |  | ||||||
|  |  | ||||||
| psutil |  | ||||||
| #Description: information on running processes and system utilization |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_profiler.py, test_openmp.py, test_dataloader.py |  | ||||||
|  |  | ||||||
| pytest |  | ||||||
| #Description: testing framework |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_typing.py, test_cpp_extensions_aot.py, run_test.py |  | ||||||
|  |  | ||||||
| #pytest-benchmark |  | ||||||
| #Description: fixture for benchmarking code |  | ||||||
| #Pinned versions: 3.2.3 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #pytest-sugar |  | ||||||
| #Description: shows failures and errors instantly |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #PyYAML |  | ||||||
| #Description: data serialization format |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #requests |  | ||||||
| #Description: HTTP library |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_type_promotion.py |  | ||||||
|  |  | ||||||
| #rich |  | ||||||
| #Description: rich text and beautiful formatting in the terminal |  | ||||||
| #Pinned versions: 10.9.0 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| scikit-image |  | ||||||
| #Description: image processing routines |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: test_nn.py |  | ||||||
|  |  | ||||||
| #scikit-learn |  | ||||||
| #Description: machine learning package |  | ||||||
| #Pinned versions: 0.20.3 |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| scipy==1.6.3 |  | ||||||
| # Pin SciPy because of failing distribution tests (see #60347) |  | ||||||
| #Description: scientific python |  | ||||||
| #Pinned versions: 1.6.3 |  | ||||||
| #test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py |  | ||||||
| #test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py |  | ||||||
| #test_linalg.py, test_binary_ufuncs.py |  | ||||||
|  |  | ||||||
| #tabulate |  | ||||||
| #Description: Pretty-print tabular data |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| tb-nightly |  | ||||||
| #Description: TensorBoard |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #typing-extensions |  | ||||||
| #Description: type hints for python |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| #virtualenv |  | ||||||
| #Description: virtual environment for python |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
|  |  | ||||||
| unittest-xml-reporting<=3.2.0,>=2.0.0 |  | ||||||
| #Description: saves unit test results to xml |  | ||||||
| #Pinned versions: |  | ||||||
| #test that import: |  | ||||||
| @ -1,110 +0,0 @@ | |||||||
| ARG UBUNTU_VERSION |  | ||||||
| ARG CUDA_VERSION |  | ||||||
| ARG IMAGE_NAME |  | ||||||
|  |  | ||||||
| FROM ${IMAGE_NAME} |  | ||||||
|  |  | ||||||
| ARG UBUNTU_VERSION |  | ||||||
| ARG CUDA_VERSION |  | ||||||
|  |  | ||||||
| ENV DEBIAN_FRONTEND noninteractive |  | ||||||
|  |  | ||||||
| # Install common dependencies (so that this step can be cached separately) |  | ||||||
| ARG EC2 |  | ||||||
| ADD ./common/install_base.sh install_base.sh |  | ||||||
| RUN bash ./install_base.sh && rm install_base.sh |  | ||||||
|  |  | ||||||
| # Install user |  | ||||||
| ADD ./common/install_user.sh install_user.sh |  | ||||||
| RUN bash ./install_user.sh && rm install_user.sh |  | ||||||
|  |  | ||||||
| # Install katex |  | ||||||
| ARG KATEX |  | ||||||
| ADD ./common/install_katex.sh install_katex.sh |  | ||||||
| RUN bash ./install_katex.sh && rm install_katex.sh |  | ||||||
|  |  | ||||||
| # Install conda and other packages (e.g., numpy, pytest) |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| ARG ANACONDA_PYTHON_VERSION |  | ||||||
| ADD requirements-ci.txt /opt/conda/requirements-ci.txt |  | ||||||
| ADD ./common/install_conda.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| RUN rm /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
| # Install gcc |  | ||||||
| ARG GCC_VERSION |  | ||||||
| ADD ./common/install_gcc.sh install_gcc.sh |  | ||||||
| RUN bash ./install_gcc.sh && rm install_gcc.sh |  | ||||||
|  |  | ||||||
| # Install clang |  | ||||||
| ARG CLANG_VERSION |  | ||||||
| ADD ./common/install_clang.sh install_clang.sh |  | ||||||
| RUN bash ./install_clang.sh && rm install_clang.sh |  | ||||||
|  |  | ||||||
| # (optional) Install protobuf for ONNX |  | ||||||
| ARG PROTOBUF |  | ||||||
| ADD ./common/install_protobuf.sh install_protobuf.sh |  | ||||||
| RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi |  | ||||||
| RUN rm install_protobuf.sh |  | ||||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} |  | ||||||
|  |  | ||||||
| # (optional) Install database packages like LMDB and LevelDB |  | ||||||
| ARG DB |  | ||||||
| ADD ./common/install_db.sh install_db.sh |  | ||||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi |  | ||||||
| RUN rm install_db.sh |  | ||||||
| ENV INSTALLED_DB ${DB} |  | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV and ffmpeg |  | ||||||
| ARG VISION |  | ||||||
| ADD ./common/install_vision.sh install_vision.sh |  | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi |  | ||||||
| RUN rm install_vision.sh |  | ||||||
| ENV INSTALLED_VISION ${VISION} |  | ||||||
|  |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| ENV OPENSSL_ROOT_DIR /opt/openssl |  | ||||||
| RUN bash ./install_openssl.sh |  | ||||||
|  |  | ||||||
| # (optional) Install non-default CMake version |  | ||||||
| ARG CMAKE_VERSION |  | ||||||
| ADD ./common/install_cmake.sh install_cmake.sh |  | ||||||
| RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi |  | ||||||
| RUN rm install_cmake.sh |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) |  | ||||||
| ADD ./common/install_cache.sh install_cache.sh |  | ||||||
| ENV PATH /opt/cache/bin:$PATH |  | ||||||
| RUN bash ./install_cache.sh && rm install_cache.sh |  | ||||||
| ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache |  | ||||||
|  |  | ||||||
| # Add jni.h for java host build |  | ||||||
| ADD ./common/install_jni.sh install_jni.sh |  | ||||||
| ADD ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| # Install Open MPI for CUDA |  | ||||||
| ADD ./common/install_openmpi.sh install_openmpi.sh |  | ||||||
| RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi |  | ||||||
| RUN rm install_openmpi.sh |  | ||||||
|  |  | ||||||
| # Include BUILD_ENVIRONMENT environment variable in image |  | ||||||
| ARG BUILD_ENVIRONMENT |  | ||||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} |  | ||||||
|  |  | ||||||
| # AWS specific CUDA build guidance |  | ||||||
| ENV TORCH_CUDA_ARCH_LIST Maxwell |  | ||||||
| ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all" |  | ||||||
| ENV CUDA_PATH /usr/local/cuda |  | ||||||
|  |  | ||||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) |  | ||||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm |  | ||||||
|  |  | ||||||
| # Install CUDNN |  | ||||||
| ARG CUDNN_VERSION |  | ||||||
| ADD ./common/install_cudnn.sh install_cudnn.sh |  | ||||||
| RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi |  | ||||||
| RUN rm install_cudnn.sh |  | ||||||
|  |  | ||||||
| USER jenkins |  | ||||||
| CMD ["bash"] |  | ||||||
							
								
								
									
										1
									
								
								.circleci/docker/ubuntu-rocm/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.circleci/docker/ubuntu-rocm/.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1 +0,0 @@ | |||||||
| *.sh |  | ||||||
| @ -1,98 +0,0 @@ | |||||||
| ARG UBUNTU_VERSION |  | ||||||
|  |  | ||||||
| FROM ubuntu:${UBUNTU_VERSION} |  | ||||||
|  |  | ||||||
| ARG UBUNTU_VERSION |  | ||||||
|  |  | ||||||
| ENV DEBIAN_FRONTEND noninteractive |  | ||||||
|  |  | ||||||
| # Set AMD gpu targets to build for |  | ||||||
| ARG PYTORCH_ROCM_ARCH |  | ||||||
| ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH} |  | ||||||
|  |  | ||||||
| # Install common dependencies (so that this step can be cached separately) |  | ||||||
| ARG EC2 |  | ||||||
| ADD ./common/install_base.sh install_base.sh |  | ||||||
| RUN bash ./install_base.sh && rm install_base.sh |  | ||||||
|  |  | ||||||
| # Install clang |  | ||||||
| ARG LLVMDEV |  | ||||||
| ARG CLANG_VERSION |  | ||||||
| ADD ./common/install_clang.sh install_clang.sh |  | ||||||
| RUN bash ./install_clang.sh && rm install_clang.sh |  | ||||||
|  |  | ||||||
| # Install user |  | ||||||
| ADD ./common/install_user.sh install_user.sh |  | ||||||
| RUN bash ./install_user.sh && rm install_user.sh |  | ||||||
|  |  | ||||||
| # Install conda and other packages (e.g., numpy, pytest) |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| ARG ANACONDA_PYTHON_VERSION |  | ||||||
| ADD requirements-ci.txt /opt/conda/requirements-ci.txt |  | ||||||
| ADD ./common/install_conda.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| RUN rm /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
| # Install gcc |  | ||||||
| ARG GCC_VERSION |  | ||||||
| ADD ./common/install_gcc.sh install_gcc.sh |  | ||||||
| RUN bash ./install_gcc.sh && rm install_gcc.sh |  | ||||||
|  |  | ||||||
| # (optional) Install protobuf for ONNX |  | ||||||
| ARG PROTOBUF |  | ||||||
| ADD ./common/install_protobuf.sh install_protobuf.sh |  | ||||||
| RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi |  | ||||||
| RUN rm install_protobuf.sh |  | ||||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} |  | ||||||
|  |  | ||||||
| # (optional) Install database packages like LMDB and LevelDB |  | ||||||
| ARG DB |  | ||||||
| ADD ./common/install_db.sh install_db.sh |  | ||||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi |  | ||||||
| RUN rm install_db.sh |  | ||||||
| ENV INSTALLED_DB ${DB} |  | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV and ffmpeg |  | ||||||
| ARG VISION |  | ||||||
| ADD ./common/install_vision.sh install_vision.sh |  | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi |  | ||||||
| RUN rm install_vision.sh |  | ||||||
| ENV INSTALLED_VISION ${VISION} |  | ||||||
|  |  | ||||||
| # Install rocm |  | ||||||
| ARG ROCM_VERSION |  | ||||||
| ADD ./common/install_rocm.sh install_rocm.sh |  | ||||||
| RUN bash ./install_rocm.sh |  | ||||||
| RUN rm install_rocm.sh |  | ||||||
| ENV PATH /opt/rocm/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/hcc/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/hip/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/opencl/bin:$PATH |  | ||||||
| ENV PATH /opt/rocm/llvm/bin:$PATH |  | ||||||
| ENV MAGMA_HOME /opt/rocm/magma |  | ||||||
| ENV LANG C.UTF-8 |  | ||||||
| ENV LC_ALL C.UTF-8 |  | ||||||
|  |  | ||||||
| # (optional) Install non-default CMake version |  | ||||||
| ARG CMAKE_VERSION |  | ||||||
| ADD ./common/install_cmake.sh install_cmake.sh |  | ||||||
| RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi |  | ||||||
| RUN rm install_cmake.sh |  | ||||||
|  |  | ||||||
| # (optional) Install non-default Ninja version |  | ||||||
| ARG NINJA_VERSION |  | ||||||
| ADD ./common/install_ninja.sh install_ninja.sh |  | ||||||
| RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi |  | ||||||
| RUN rm install_ninja.sh |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) |  | ||||||
| ADD ./common/install_cache.sh install_cache.sh |  | ||||||
| ENV PATH /opt/cache/bin:$PATH |  | ||||||
| RUN bash ./install_cache.sh && rm install_cache.sh |  | ||||||
|  |  | ||||||
| # Include BUILD_ENVIRONMENT environment variable in image |  | ||||||
| ARG BUILD_ENVIRONMENT |  | ||||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} |  | ||||||
|  |  | ||||||
| USER jenkins |  | ||||||
| CMD ["bash"] |  | ||||||
| @ -1,133 +0,0 @@ | |||||||
| ARG UBUNTU_VERSION |  | ||||||
|  |  | ||||||
| FROM ubuntu:${UBUNTU_VERSION} |  | ||||||
|  |  | ||||||
| ARG UBUNTU_VERSION |  | ||||||
|  |  | ||||||
| ENV DEBIAN_FRONTEND noninteractive |  | ||||||
|  |  | ||||||
| # Install common dependencies (so that this step can be cached separately) |  | ||||||
| ARG EC2 |  | ||||||
| ADD ./common/install_base.sh install_base.sh |  | ||||||
| RUN bash ./install_base.sh && rm install_base.sh |  | ||||||
|  |  | ||||||
| # Install clang |  | ||||||
| ARG LLVMDEV |  | ||||||
| ARG CLANG_VERSION |  | ||||||
| ADD ./common/install_clang.sh install_clang.sh |  | ||||||
| RUN bash ./install_clang.sh && rm install_clang.sh |  | ||||||
|  |  | ||||||
| # (optional) Install thrift. |  | ||||||
| ARG THRIFT |  | ||||||
| ADD ./common/install_thrift.sh install_thrift.sh |  | ||||||
| RUN if [ -n "${THRIFT}" ]; then bash ./install_thrift.sh; fi |  | ||||||
| RUN rm install_thrift.sh |  | ||||||
| ENV INSTALLED_THRIFT ${THRIFT} |  | ||||||
|  |  | ||||||
| # Install user |  | ||||||
| ADD ./common/install_user.sh install_user.sh |  | ||||||
| RUN bash ./install_user.sh && rm install_user.sh |  | ||||||
|  |  | ||||||
| # Install katex |  | ||||||
| ARG KATEX |  | ||||||
| ADD ./common/install_katex.sh install_katex.sh |  | ||||||
| RUN bash ./install_katex.sh && rm install_katex.sh |  | ||||||
|  |  | ||||||
| # Install conda and other packages (e.g., numpy, pytest) |  | ||||||
| ENV PATH /opt/conda/bin:$PATH |  | ||||||
| ARG ANACONDA_PYTHON_VERSION |  | ||||||
| ADD requirements-ci.txt /opt/conda/requirements-ci.txt |  | ||||||
| ADD ./common/install_conda.sh install_conda.sh |  | ||||||
| RUN bash ./install_conda.sh && rm install_conda.sh |  | ||||||
| RUN rm /opt/conda/requirements-ci.txt |  | ||||||
|  |  | ||||||
| # Install gcc |  | ||||||
| ARG GCC_VERSION |  | ||||||
| ADD ./common/install_gcc.sh install_gcc.sh |  | ||||||
| RUN bash ./install_gcc.sh && rm install_gcc.sh |  | ||||||
|  |  | ||||||
| # Install lcov for C++ code coverage |  | ||||||
| ADD ./common/install_lcov.sh install_lcov.sh |  | ||||||
| RUN  bash ./install_lcov.sh && rm install_lcov.sh |  | ||||||
|  |  | ||||||
| # (optional) Install protobuf for ONNX |  | ||||||
| ARG PROTOBUF |  | ||||||
| ADD ./common/install_protobuf.sh install_protobuf.sh |  | ||||||
| RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi |  | ||||||
| RUN rm install_protobuf.sh |  | ||||||
| ENV INSTALLED_PROTOBUF ${PROTOBUF} |  | ||||||
|  |  | ||||||
| # (optional) Install database packages like LMDB and LevelDB |  | ||||||
| ARG DB |  | ||||||
| ADD ./common/install_db.sh install_db.sh |  | ||||||
| RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi |  | ||||||
| RUN rm install_db.sh |  | ||||||
| ENV INSTALLED_DB ${DB} |  | ||||||
|  |  | ||||||
| # (optional) Install vision packages like OpenCV and ffmpeg |  | ||||||
| ARG VISION |  | ||||||
| ADD ./common/install_vision.sh install_vision.sh |  | ||||||
| RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi |  | ||||||
| RUN rm install_vision.sh |  | ||||||
| ENV INSTALLED_VISION ${VISION} |  | ||||||
|  |  | ||||||
| # (optional) Install Android NDK |  | ||||||
| ARG ANDROID |  | ||||||
| ARG ANDROID_NDK |  | ||||||
| ARG GRADLE_VERSION |  | ||||||
| ADD ./common/install_android.sh install_android.sh |  | ||||||
| ADD ./android/AndroidManifest.xml AndroidManifest.xml |  | ||||||
| ADD ./android/build.gradle build.gradle |  | ||||||
| RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi |  | ||||||
| RUN rm install_android.sh |  | ||||||
| RUN rm AndroidManifest.xml |  | ||||||
| RUN rm build.gradle |  | ||||||
| ENV INSTALLED_ANDROID ${ANDROID} |  | ||||||
|  |  | ||||||
| # (optional) Install Vulkan SDK |  | ||||||
| ARG VULKAN_SDK_VERSION |  | ||||||
| ADD ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh |  | ||||||
| RUN if [ -n "${VULKAN_SDK_VERSION}" ]; then bash ./install_vulkan_sdk.sh; fi |  | ||||||
| RUN rm install_vulkan_sdk.sh |  | ||||||
|  |  | ||||||
| # (optional) Install swiftshader |  | ||||||
| ARG SWIFTSHADER |  | ||||||
| ADD ./common/install_swiftshader.sh install_swiftshader.sh |  | ||||||
| RUN if [ -n "${SWIFTSHADER}" ]; then bash ./install_swiftshader.sh; fi |  | ||||||
| RUN rm install_swiftshader.sh |  | ||||||
|  |  | ||||||
| # (optional) Install non-default CMake version |  | ||||||
| ARG CMAKE_VERSION |  | ||||||
| ADD ./common/install_cmake.sh install_cmake.sh |  | ||||||
| RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi |  | ||||||
| RUN rm install_cmake.sh |  | ||||||
|  |  | ||||||
| # (optional) Install non-default Ninja version |  | ||||||
| ARG NINJA_VERSION |  | ||||||
| ADD ./common/install_ninja.sh install_ninja.sh |  | ||||||
| RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi |  | ||||||
| RUN rm install_ninja.sh |  | ||||||
|  |  | ||||||
| ADD ./common/install_openssl.sh install_openssl.sh |  | ||||||
| RUN bash ./install_openssl.sh |  | ||||||
| ENV OPENSSL_ROOT_DIR /opt/openssl |  | ||||||
|  |  | ||||||
| # Install ccache/sccache (do this last, so we get priority in PATH) |  | ||||||
| ADD ./common/install_cache.sh install_cache.sh |  | ||||||
| ENV PATH /opt/cache/bin:$PATH |  | ||||||
| RUN bash ./install_cache.sh && rm install_cache.sh |  | ||||||
|  |  | ||||||
| # Add jni.h for java host build |  | ||||||
| ADD ./common/install_jni.sh install_jni.sh |  | ||||||
| ADD ./java/jni.h jni.h |  | ||||||
| RUN bash ./install_jni.sh && rm install_jni.sh |  | ||||||
|  |  | ||||||
| # Include BUILD_ENVIRONMENT environment variable in image |  | ||||||
| ARG BUILD_ENVIRONMENT |  | ||||||
| ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} |  | ||||||
|  |  | ||||||
| # Install LLVM dev version (Defined in the pytorch/builder github repository) |  | ||||||
| COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm |  | ||||||
|  |  | ||||||
| USER jenkins |  | ||||||
| CMD ["bash"] |  | ||||||
| @ -1,39 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import subprocess |  | ||||||
| import sys |  | ||||||
| import tempfile |  | ||||||
|  |  | ||||||
| import generate_config_yml |  | ||||||
|  |  | ||||||
|  |  | ||||||
| CHECKED_IN_FILE = "config.yml" |  | ||||||
| REGENERATION_SCRIPT = "regenerate.sh" |  | ||||||
|  |  | ||||||
| PARENT_DIR = os.path.basename(os.path.dirname(os.path.abspath(__file__))) |  | ||||||
| README_PATH = os.path.join(PARENT_DIR, "README.md") |  | ||||||
|  |  | ||||||
| ERROR_MESSAGE_TEMPLATE = """ |  | ||||||
| The checked-in CircleCI "%s" file does not match what was generated by the scripts. |  | ||||||
| Please re-run the "%s" script in the "%s" directory and commit the result. See "%s" for more information. |  | ||||||
| """ |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def check_consistency(): |  | ||||||
|  |  | ||||||
|     _, temp_filename = tempfile.mkstemp("-generated-config.yml") |  | ||||||
|  |  | ||||||
|     with open(temp_filename, "w") as fh: |  | ||||||
|         generate_config_yml.stitch_sources(fh) |  | ||||||
|  |  | ||||||
|     try: |  | ||||||
|         subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE]) |  | ||||||
|     except subprocess.CalledProcessError: |  | ||||||
|         sys.exit(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH)) |  | ||||||
|     finally: |  | ||||||
|         os.remove(temp_filename) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     check_consistency() |  | ||||||
| @ -1,191 +0,0 @@ | |||||||
| #!/usr/bin/env python3 |  | ||||||
|  |  | ||||||
| """ |  | ||||||
| This script is the source of truth for config.yml. |  | ||||||
| Please see README.md in this directory for details. |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| import os |  | ||||||
| import shutil |  | ||||||
| import sys |  | ||||||
| from collections import namedtuple |  | ||||||
|  |  | ||||||
| import cimodel.data.simple.docker_definitions |  | ||||||
| import cimodel.data.simple.mobile_definitions |  | ||||||
| import cimodel.data.simple.nightly_ios |  | ||||||
| import cimodel.data.simple.anaconda_prune_defintions |  | ||||||
| import cimodel.lib.miniutils as miniutils |  | ||||||
| import cimodel.lib.miniyaml as miniyaml |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class File(object): |  | ||||||
|     """ |  | ||||||
|     Verbatim copy the contents of a file into config.yml |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def __init__(self, filename): |  | ||||||
|         self.filename = filename |  | ||||||
|  |  | ||||||
|     def write(self, output_filehandle): |  | ||||||
|         with open(os.path.join("verbatim-sources", self.filename)) as fh: |  | ||||||
|             shutil.copyfileobj(fh, output_filehandle) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class FunctionGen(namedtuple("FunctionGen", "function depth")): |  | ||||||
|     __slots__ = () |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Treegen(FunctionGen): |  | ||||||
|     """ |  | ||||||
|     Insert the content of a YAML tree into config.yml |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def write(self, output_filehandle): |  | ||||||
|         miniyaml.render(output_filehandle, self.function(), self.depth) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Listgen(FunctionGen): |  | ||||||
|     """ |  | ||||||
|     Insert the content of a YAML list into config.yml |  | ||||||
|     """ |  | ||||||
|  |  | ||||||
|     def write(self, output_filehandle): |  | ||||||
|         miniyaml.render(output_filehandle, self.function(), self.depth) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def horizontal_rule(): |  | ||||||
|     return "".join("#" * 78) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class Header(object): |  | ||||||
|     def __init__(self, title, summary=None): |  | ||||||
|         self.title = title |  | ||||||
|         self.summary_lines = summary or [] |  | ||||||
|  |  | ||||||
|     def write(self, output_filehandle): |  | ||||||
|         text_lines = [self.title] + self.summary_lines |  | ||||||
|         comment_lines = ["# " + x for x in text_lines] |  | ||||||
|         lines = miniutils.sandwich([horizontal_rule()], comment_lines) |  | ||||||
|  |  | ||||||
|         for line in filter(None, lines): |  | ||||||
|             output_filehandle.write(line + "\n") |  | ||||||
|  |  | ||||||
| def _for_all_items(items, functor) -> None: |  | ||||||
|     if isinstance(items, list): |  | ||||||
|         for item in items: |  | ||||||
|             _for_all_items(item, functor) |  | ||||||
|     if isinstance(items, dict) and len(items) == 1: |  | ||||||
|         item_type, item = next(iter(items.items())) |  | ||||||
|         functor(item_type, item) |  | ||||||
|  |  | ||||||
| def filter_master_only_jobs(items): |  | ||||||
|     def _is_main_or_master_item(item): |  | ||||||
|         filters = item.get('filters', None) |  | ||||||
|         branches = filters.get('branches', None) if filters is not None else None |  | ||||||
|         branches_only = branches.get('only', None) if branches is not None else None |  | ||||||
|         return ('main' in branches_only or 'master' in branches_only) if branches_only is not None else False |  | ||||||
|  |  | ||||||
|     master_deps = set() |  | ||||||
|  |  | ||||||
|     def _save_requires_if_master(item_type, item): |  | ||||||
|         requires = item.get('requires', None) |  | ||||||
|         item_name = item.get("name", None) |  | ||||||
|         if not isinstance(requires, list): |  | ||||||
|             return |  | ||||||
|         if _is_main_or_master_item(item) or item_name in master_deps: |  | ||||||
|             master_deps.update([n.strip('"') for n in requires]) |  | ||||||
|  |  | ||||||
|     def _do_filtering(items): |  | ||||||
|         if isinstance(items, list): |  | ||||||
|             rc = [_do_filtering(item) for item in items] |  | ||||||
|             return [item for item in rc if len(item if item is not None else []) > 0] |  | ||||||
|         assert isinstance(items, dict) and len(items) == 1 |  | ||||||
|         item_type, item = next(iter(items.items())) |  | ||||||
|         item_name = item.get("name", None) |  | ||||||
|         item_name = item_name.strip('"') if item_name is not None else None |  | ||||||
|         if not _is_main_or_master_item(item) and item_name not in master_deps: |  | ||||||
|             return None |  | ||||||
|         if 'filters' in item: |  | ||||||
|             item = item.copy() |  | ||||||
|             item.pop('filters') |  | ||||||
|         return {item_type: item} |  | ||||||
|  |  | ||||||
|     # Scan of dependencies twice to pick up nested required jobs |  | ||||||
|     # I.e. jobs depending on jobs that main-only job depend on |  | ||||||
|     _for_all_items(items, _save_requires_if_master) |  | ||||||
|     _for_all_items(items, _save_requires_if_master) |  | ||||||
|     return _do_filtering(items) |  | ||||||
|  |  | ||||||
| def generate_required_docker_images(items): |  | ||||||
|     required_docker_images = set() |  | ||||||
|  |  | ||||||
|     def _requires_docker_image(item_type, item): |  | ||||||
|         requires = item.get('requires', None) |  | ||||||
|         if not isinstance(requires, list): |  | ||||||
|             return |  | ||||||
|         for requirement in requires: |  | ||||||
|             requirement = requirement.replace('"', '') |  | ||||||
|             if requirement.startswith('docker-'): |  | ||||||
|                 required_docker_images.add(requirement) |  | ||||||
|  |  | ||||||
|     _for_all_items(items, _requires_docker_image) |  | ||||||
|     return required_docker_images |  | ||||||
|  |  | ||||||
| def gen_build_workflows_tree(): |  | ||||||
|     build_workflows_functions = [ |  | ||||||
|         cimodel.data.simple.mobile_definitions.get_workflow_jobs, |  | ||||||
|         cimodel.data.simple.nightly_ios.get_workflow_jobs, |  | ||||||
|         cimodel.data.simple.anaconda_prune_defintions.get_workflow_jobs, |  | ||||||
|     ] |  | ||||||
|     build_jobs = [f() for f in build_workflows_functions] |  | ||||||
|     build_jobs.extend( |  | ||||||
|         cimodel.data.simple.docker_definitions.get_workflow_jobs( |  | ||||||
|             # sort for consistency |  | ||||||
|             sorted(generate_required_docker_images(build_jobs)) |  | ||||||
|         ) |  | ||||||
|     ) |  | ||||||
|     master_build_jobs = filter_master_only_jobs(build_jobs) |  | ||||||
|  |  | ||||||
|     rc = { |  | ||||||
|         "workflows": { |  | ||||||
|             "build": { |  | ||||||
|                 "when": r"<< pipeline.parameters.run_build >>", |  | ||||||
|                 "jobs": build_jobs, |  | ||||||
|             }, |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|     if len(master_build_jobs) > 0: |  | ||||||
|         rc["workflows"]["master_build"] = { |  | ||||||
|             "when": r"<< pipeline.parameters.run_master_build >>", |  | ||||||
|             "jobs": master_build_jobs, |  | ||||||
|         } |  | ||||||
|     return rc |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Order of this list matters to the generated config.yml. |  | ||||||
| YAML_SOURCES = [ |  | ||||||
|     File("header-section.yml"), |  | ||||||
|     File("commands.yml"), |  | ||||||
|     File("nightly-binary-build-defaults.yml"), |  | ||||||
|     Header("Build parameters"), |  | ||||||
|     File("build-parameters/pytorch-build-params.yml"), |  | ||||||
|     File("build-parameters/binary-build-params.yml"), |  | ||||||
|     Header("Job specs"), |  | ||||||
|     File("job-specs/binary-job-specs.yml"), |  | ||||||
|     File("job-specs/job-specs-custom.yml"), |  | ||||||
|     File("job-specs/binary_update_htmls.yml"), |  | ||||||
|     File("job-specs/binary-build-tests.yml"), |  | ||||||
|     File("job-specs/docker_jobs.yml"), |  | ||||||
|     Header("Workflows"), |  | ||||||
|     Treegen(gen_build_workflows_tree, 0), |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def stitch_sources(output_filehandle): |  | ||||||
|     for f in YAML_SOURCES: |  | ||||||
|         f.write(output_filehandle) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if __name__ == "__main__": |  | ||||||
|  |  | ||||||
|     stitch_sources(sys.stdout) |  | ||||||
| @ -1,5 +0,0 @@ | |||||||
| cd $PSScriptRoot; |  | ||||||
| $NewFile = New-TemporaryFile; |  | ||||||
| python generate_config_yml.py > $NewFile.name |  | ||||||
| (Get-Content $NewFile.name -Raw).TrimEnd().Replace("`r`n","`n") | Set-Content config.yml -Force |  | ||||||
| Remove-Item $NewFile.name |  | ||||||
| @ -1,17 +0,0 @@ | |||||||
| #!/bin/bash -e |  | ||||||
|  |  | ||||||
| # Allows this script to be invoked from any directory: |  | ||||||
| cd "$(dirname "$0")" |  | ||||||
|  |  | ||||||
| UNCOMMIT_CHANGE=$(git status -s | grep " config.yml" | wc -l | xargs) |  | ||||||
| if [[ $UNCOMMIT_CHANGE != 0 ]]; then |  | ||||||
|     OLD_FILE=$(mktemp) |  | ||||||
|     cp config.yml "$OLD_FILE" |  | ||||||
|     echo "Uncommitted change detected in .circleci/config.yml" |  | ||||||
|     echo "It has been backed up to $OLD_FILE" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| NEW_FILE=$(mktemp) |  | ||||||
| ./generate_config_yml.py > "$NEW_FILE" |  | ||||||
| cp "$NEW_FILE" config.yml |  | ||||||
| echo "New config generated in .circleci/config.yml" |  | ||||||
| @ -1,4 +0,0 @@ | |||||||
| All the scripts in this directory are callable from `~/workspace/.circleci/scripts/foo.sh`. |  | ||||||
| Don't try to call them as `.circleci/scripts/foo.sh`, that won't |  | ||||||
| (necessarily) work.  See Note [Workspace for CircleCI scripts] in |  | ||||||
| job-specs-setup.yml for more details. |  | ||||||
| @ -1,69 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|     $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # This step runs on multiple executors with different envfile locations |  | ||||||
| if [[ "$(uname)" == Darwin ]]; then |  | ||||||
|   # macos executor (builds and tests) |  | ||||||
|   workdir="/Users/distiller/project" |  | ||||||
| elif [[ "$OSTYPE" == "msys" ]]; then |  | ||||||
|   # windows executor (builds and tests) |  | ||||||
|   rm -rf /c/w |  | ||||||
|   ln -s "/c/Users/circleci/project" /c/w |  | ||||||
|   workdir="/c/w" |  | ||||||
| elif [[ -d "/home/circleci/project" ]]; then |  | ||||||
|   # machine executor (binary tests) |  | ||||||
|   workdir="/home/circleci/project" |  | ||||||
| else |  | ||||||
|   # docker executor (binary builds) |  | ||||||
|   workdir="/" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # It is very important that this stays in sync with binary_populate_env.sh |  | ||||||
| if [[ "$OSTYPE" == "msys" ]]; then |  | ||||||
|   # We need to make the paths as short as possible on Windows |  | ||||||
|   export PYTORCH_ROOT="$workdir/p" |  | ||||||
|   export BUILDER_ROOT="$workdir/b" |  | ||||||
| else |  | ||||||
|   export PYTORCH_ROOT="$workdir/pytorch" |  | ||||||
|   export BUILDER_ROOT="$workdir/builder" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Try to extract PR number from branch if not already set |  | ||||||
| if [[ -z "${CIRCLE_PR_NUMBER:-}" ]]; then |  | ||||||
|   CIRCLE_PR_NUMBER="$(echo ${CIRCLE_BRANCH} | sed -E -n 's/pull\/([0-9]*).*/\1/p')" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Clone the Pytorch branch |  | ||||||
| retry git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT" |  | ||||||
| pushd "$PYTORCH_ROOT" |  | ||||||
| if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then |  | ||||||
|   # "smoke" binary build on PRs |  | ||||||
|   git fetch --force origin "pull/${CIRCLE_PR_NUMBER}/head:remotes/origin/pull/${CIRCLE_PR_NUMBER}" |  | ||||||
|   git reset --hard "$CIRCLE_SHA1" |  | ||||||
|   git checkout -q -B "$CIRCLE_BRANCH" |  | ||||||
|   git reset --hard "$CIRCLE_SHA1" |  | ||||||
| elif [[ -n "${CIRCLE_SHA1:-}" ]]; then |  | ||||||
|   # Scheduled workflows & "smoke" binary build on master on PR merges |  | ||||||
|   DEFAULT_BRANCH="$(git remote show $CIRCLE_REPOSITORY_URL | awk '/HEAD branch/ {print $NF}')" |  | ||||||
|   git reset --hard "$CIRCLE_SHA1" |  | ||||||
|   git checkout -q -B $DEFAULT_BRANCH |  | ||||||
| else |  | ||||||
|   echo "Can't tell what to checkout" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
| retry git submodule update --init --recursive --jobs 0 |  | ||||||
| echo "Using Pytorch from " |  | ||||||
| git --no-pager log --max-count 1 |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| # Clone the Builder master repo |  | ||||||
| retry git clone -q https://github.com/pytorch/builder.git -b release/1.12 "$BUILDER_ROOT" |  | ||||||
| pushd "$BUILDER_ROOT" |  | ||||||
| echo "Using builder from " |  | ||||||
| git --no-pager log --max-count 1 |  | ||||||
| popd |  | ||||||
| @ -1,44 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| # This step runs on multiple executors with different envfile locations |  | ||||||
| if [[ "$(uname)" == Darwin ]]; then |  | ||||||
|   envfile="/Users/distiller/project/env" |  | ||||||
| elif [[ -d "/home/circleci/project" ]]; then |  | ||||||
|   # machine executor (binary tests) |  | ||||||
|   envfile="/home/circleci/project/env" |  | ||||||
| else |  | ||||||
|   # docker executor (binary builds) |  | ||||||
|   envfile="/env" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # TODO this is super hacky and ugly. Basically, the binary_update_html job does |  | ||||||
| # not have an env file, since it does not call binary_populate_env.sh, since it |  | ||||||
| # does not have a BUILD_ENVIRONMENT. So for this one case, which we detect by a |  | ||||||
| # lack of an env file, we manually export the environment variables that we |  | ||||||
| # need to install miniconda |  | ||||||
| if [[ ! -f "$envfile" ]]; then |  | ||||||
|   MINICONDA_ROOT="/home/circleci/project/miniconda" |  | ||||||
|   workdir="/home/circleci/project" |  | ||||||
|   retry () { |  | ||||||
|       $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
|   } |  | ||||||
|   export -f retry |  | ||||||
| else |  | ||||||
|   source "$envfile" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| conda_sh="$workdir/install_miniconda.sh" |  | ||||||
| if [[ "$(uname)" == Darwin ]]; then |  | ||||||
|   curl --retry 3 -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh |  | ||||||
| else |  | ||||||
|   curl --retry 3 -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh |  | ||||||
| fi |  | ||||||
| chmod +x "$conda_sh" |  | ||||||
| "$conda_sh" -b -p "$MINICONDA_ROOT" |  | ||||||
| rm -f "$conda_sh" |  | ||||||
|  |  | ||||||
| # We can't actually add miniconda to the PATH in the envfile, because that |  | ||||||
| # breaks 'unbuffer' in Mac jobs. This is probably because conda comes with |  | ||||||
| # a tclsh, which then gets inserted before the tclsh needed in /usr/bin |  | ||||||
| @ -1,47 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -ex -o pipefail |  | ||||||
|  |  | ||||||
| echo "" |  | ||||||
| echo "DIR: $(pwd)" |  | ||||||
| WORKSPACE=/Users/distiller/workspace |  | ||||||
| PROJ_ROOT=/Users/distiller/project |  | ||||||
| export TCLLIBPATH="/usr/local/lib" |  | ||||||
|  |  | ||||||
| # Install conda |  | ||||||
| curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh |  | ||||||
| chmod +x ~/conda.sh |  | ||||||
| /bin/bash ~/conda.sh -b -p ~/anaconda |  | ||||||
| export PATH="~/anaconda/bin:${PATH}" |  | ||||||
| source ~/anaconda/bin/activate |  | ||||||
|  |  | ||||||
| # Install dependencies |  | ||||||
| conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions --yes |  | ||||||
| conda install -c conda-forge valgrind --yes |  | ||||||
| export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} |  | ||||||
|  |  | ||||||
| # sync submodules |  | ||||||
| cd ${PROJ_ROOT} |  | ||||||
| git submodule sync |  | ||||||
| git submodule update --init --recursive --jobs 0 |  | ||||||
|  |  | ||||||
| # run build script |  | ||||||
| chmod a+x ${PROJ_ROOT}/scripts/build_ios.sh |  | ||||||
| echo "########################################################" |  | ||||||
| cat ${PROJ_ROOT}/scripts/build_ios.sh |  | ||||||
| echo "########################################################" |  | ||||||
| echo "IOS_ARCH: ${IOS_ARCH}" |  | ||||||
| echo "IOS_PLATFORM: ${IOS_PLATFORM}" |  | ||||||
| echo "USE_PYTORCH_METAL: ${USE_PYTORCH_METAL}" |  | ||||||
| echo "USE_COREML_DELEGATE: ${USE_COREML_DELEGATE}" |  | ||||||
| export IOS_ARCH=${IOS_ARCH} |  | ||||||
| export IOS_PLATFORM=${IOS_PLATFORM} |  | ||||||
| export USE_PYTORCH_METAL=${USE_PYTORCH_METAL} |  | ||||||
| export USE_COREML_DELEGATE=${USE_COREML_DELEGATE} |  | ||||||
| unbuffer ${PROJ_ROOT}/scripts/build_ios.sh 2>&1 | ts |  | ||||||
|  |  | ||||||
| #store the binary |  | ||||||
| cd ${WORKSPACE} |  | ||||||
| DEST_DIR=${WORKSPACE}/ios |  | ||||||
| mkdir -p ${DEST_DIR} |  | ||||||
| cp -R ${PROJ_ROOT}/build_ios/install ${DEST_DIR} |  | ||||||
| mv ${DEST_DIR}/install ${DEST_DIR}/${IOS_ARCH} |  | ||||||
| @ -1,30 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -ex -o pipefail |  | ||||||
|  |  | ||||||
| echo "" |  | ||||||
| echo "DIR: $(pwd)" |  | ||||||
| PROJ_ROOT=/Users/distiller/project |  | ||||||
| cd ${PROJ_ROOT}/ios/TestApp |  | ||||||
| # install fastlane |  | ||||||
| sudo gem install bundler && bundle install |  | ||||||
| # install certificates |  | ||||||
| echo "${IOS_CERT_KEY_2022}" >> cert.txt |  | ||||||
| base64 --decode cert.txt -o Certificates.p12 |  | ||||||
| rm cert.txt |  | ||||||
| bundle exec fastlane install_root_cert |  | ||||||
| bundle exec fastlane install_dev_cert |  | ||||||
| # install the provisioning profile |  | ||||||
| PROFILE=PyTorch_CI_2022.mobileprovision |  | ||||||
| PROVISIONING_PROFILES=~/Library/MobileDevice/Provisioning\ Profiles |  | ||||||
| mkdir -pv "${PROVISIONING_PROFILES}" |  | ||||||
| cd "${PROVISIONING_PROFILES}" |  | ||||||
| echo "${IOS_SIGN_KEY_2022}" >> cert.txt |  | ||||||
| base64 --decode cert.txt -o ${PROFILE} |  | ||||||
| rm cert.txt |  | ||||||
| # run the ruby build script |  | ||||||
| if ! [ -x "$(command -v xcodebuild)" ]; then |  | ||||||
|     echo 'Error: xcodebuild is not installed.' |  | ||||||
|     exit 1 |  | ||||||
| fi |  | ||||||
| PROFILE=PyTorch_CI_2022 |  | ||||||
| ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID} |  | ||||||
| @ -1,75 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -ex -o pipefail |  | ||||||
|  |  | ||||||
| echo "" |  | ||||||
| echo "DIR: $(pwd)" |  | ||||||
| WORKSPACE=/Users/distiller/workspace |  | ||||||
| PROJ_ROOT=/Users/distiller/project |  | ||||||
| ARTIFACTS_DIR=${WORKSPACE}/ios |  | ||||||
| ls ${ARTIFACTS_DIR} |  | ||||||
| ZIP_DIR=${WORKSPACE}/zip |  | ||||||
| mkdir -p ${ZIP_DIR}/install/lib |  | ||||||
| mkdir -p ${ZIP_DIR}/src |  | ||||||
| # copy header files |  | ||||||
| cp -R ${ARTIFACTS_DIR}/arm64/include ${ZIP_DIR}/install/ |  | ||||||
| # build a FAT bianry |  | ||||||
| cd ${ZIP_DIR}/install/lib |  | ||||||
| target_libs=(libc10.a libclog.a libcpuinfo.a libeigen_blas.a libpthreadpool.a libpytorch_qnnpack.a libtorch_cpu.a libtorch.a libXNNPACK.a) |  | ||||||
| for lib in ${target_libs[*]} |  | ||||||
| do |  | ||||||
|     if [ -f "${ARTIFACTS_DIR}/x86_64/lib/${lib}" ] && [ -f "${ARTIFACTS_DIR}/arm64/lib/${lib}" ]; then |  | ||||||
|         libs=("${ARTIFACTS_DIR}/x86_64/lib/${lib}" "${ARTIFACTS_DIR}/arm64/lib/${lib}") |  | ||||||
|         lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/${lib} |  | ||||||
|     fi |  | ||||||
| done |  | ||||||
| lipo -i ${ZIP_DIR}/install/lib/*.a |  | ||||||
| echo "BUILD_LITE_INTERPRETER: ${BUILD_LITE_INTERPRETER}" |  | ||||||
| # copy the umbrella header and license |  | ||||||
| if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then |  | ||||||
|     cp ${PROJ_ROOT}/ios/LibTorch-Lite.h ${ZIP_DIR}/src/ |  | ||||||
| else |  | ||||||
|     cp ${PROJ_ROOT}/ios/LibTorch.h ${ZIP_DIR}/src/ |  | ||||||
| fi |  | ||||||
| cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/ |  | ||||||
| # zip the library |  | ||||||
| export DATE="$(date -u +%Y%m%d)" |  | ||||||
| export IOS_NIGHTLY_BUILD_VERSION="1.12.0.${DATE}" |  | ||||||
| if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then |  | ||||||
|     # libtorch_lite_ios_nightly_1.11.0.20210810.zip |  | ||||||
|     ZIPFILE="libtorch_lite_ios_nightly_${IOS_NIGHTLY_BUILD_VERSION}.zip" |  | ||||||
| else |  | ||||||
|     ZIPFILE="libtorch_ios_nightly_build.zip" |  | ||||||
| fi |  | ||||||
| cd ${ZIP_DIR} |  | ||||||
| #for testing |  | ||||||
| touch version.txt |  | ||||||
| echo "${IOS_NIGHTLY_BUILD_VERSION}" > version.txt |  | ||||||
| zip -r ${ZIPFILE} install src version.txt LICENSE |  | ||||||
| # upload to aws |  | ||||||
| # Install conda then 'conda install' awscli |  | ||||||
| curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh |  | ||||||
| chmod +x ~/conda.sh |  | ||||||
| /bin/bash ~/conda.sh -b -p ~/anaconda |  | ||||||
| export PATH="~/anaconda/bin:${PATH}" |  | ||||||
| source ~/anaconda/bin/activate |  | ||||||
| conda install -c conda-forge awscli --yes |  | ||||||
| set +x |  | ||||||
| export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD} |  | ||||||
| export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD} |  | ||||||
| set +x |  | ||||||
| # echo "AWS KEY: ${AWS_ACCESS_KEY_ID}" |  | ||||||
| # echo "AWS SECRET: ${AWS_SECRET_ACCESS_KEY}" |  | ||||||
| aws s3 cp ${ZIPFILE} s3://ossci-ios-build/ --acl public-read |  | ||||||
|  |  | ||||||
| if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then |  | ||||||
|     # create a new LibTorch-Lite-Nightly.podspec from the template |  | ||||||
|     echo "cp ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec.template ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec" |  | ||||||
|     cp ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec.template ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec |  | ||||||
|  |  | ||||||
|     # update pod version |  | ||||||
|     sed -i '' -e "s/IOS_NIGHTLY_BUILD_VERSION/${IOS_NIGHTLY_BUILD_VERSION}/g" ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec |  | ||||||
|     cat ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec |  | ||||||
|  |  | ||||||
|     # push the new LibTorch-Lite-Nightly.podspec to CocoaPods |  | ||||||
|     pod trunk push --verbose --allow-warnings --use-libraries --skip-import-validation ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec |  | ||||||
| fi |  | ||||||
| @ -1,34 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| echo "RUNNING ON $(uname -a) WITH $(nproc) CPUS AND $(free -m)" |  | ||||||
| set -eux -o pipefail |  | ||||||
| source /env |  | ||||||
|  |  | ||||||
| # Because most Circle executors only have 20 CPUs, using more causes OOMs w/ Ninja and nvcc parallelization |  | ||||||
| MEMORY_LIMIT_MAX_JOBS=18 |  | ||||||
| NUM_CPUS=$(( $(nproc) - 2 )) |  | ||||||
|  |  | ||||||
| # Defaults here for **binary** linux builds so they can be changed in one place |  | ||||||
| export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))} |  | ||||||
|  |  | ||||||
| if [[ "${DESIRED_CUDA}" =~ cu11[0-9] ]]; then |  | ||||||
|   export BUILD_SPLIT_CUDA="ON" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Parse the parameters |  | ||||||
| if [[ "$PACKAGE_TYPE" == 'conda' ]]; then |  | ||||||
|   build_script='conda/build_pytorch.sh' |  | ||||||
| elif [[ "$DESIRED_CUDA" == cpu ]]; then |  | ||||||
|   build_script='manywheel/build_cpu.sh' |  | ||||||
| elif [[ "$DESIRED_CUDA" == *"rocm"* ]]; then |  | ||||||
|   build_script='manywheel/build_rocm.sh' |  | ||||||
| else |  | ||||||
|   build_script='manywheel/build.sh' |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$CIRCLE_BRANCH" == "main" ]] || [[ "$CIRCLE_BRANCH" == "master" ]] || [[ "$CIRCLE_BRANCH" == release/* ]]; then |  | ||||||
|   export BUILD_DEBUG_INFO=1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Build the package |  | ||||||
| SKIP_ALL_TESTS=1 "/builder/$build_script" |  | ||||||
| @ -1,119 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| OUTPUT_SCRIPT=${OUTPUT_SCRIPT:-/home/circleci/project/ci_test_script.sh} |  | ||||||
|  |  | ||||||
| # only source if file exists |  | ||||||
| if [[ -f /home/circleci/project/env ]]; then |  | ||||||
|   source /home/circleci/project/env |  | ||||||
| fi |  | ||||||
| cat >"${OUTPUT_SCRIPT}" <<EOL |  | ||||||
| # =================== The following code will be executed inside Docker container =================== |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|     "\$@"  || (sleep 1 && "\$@") || (sleep 2 && "\$@") |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Source binary env file here if exists |  | ||||||
| if [[ -e "${BINARY_ENV_FILE:-/nofile}" ]]; then |  | ||||||
|   source "${BINARY_ENV_FILE:-/nofile}" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)" |  | ||||||
|  |  | ||||||
| # Set up Python |  | ||||||
| if [[ "$PACKAGE_TYPE" == conda ]]; then |  | ||||||
|   retry conda create -qyn testenv python="$DESIRED_PYTHON" |  | ||||||
|   source activate testenv >/dev/null |  | ||||||
| elif [[ "$PACKAGE_TYPE" != libtorch ]]; then |  | ||||||
|   python_path="/opt/python/cp\$python_nodot-cp\${python_nodot}" |  | ||||||
|   # Prior to Python 3.8 paths were suffixed with an 'm' |  | ||||||
|   if [[ -d  "\${python_path}/bin" ]]; then |  | ||||||
|     export PATH="\${python_path}/bin:\$PATH" |  | ||||||
|   elif [[ -d "\${python_path}m/bin" ]]; then |  | ||||||
|     export PATH="\${python_path}m/bin:\$PATH" |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| EXTRA_CONDA_FLAGS="" |  | ||||||
| NUMPY_PIN="" |  | ||||||
| PROTOBUF_PACKAGE="defaults::protobuf" |  | ||||||
| if [[ "\$python_nodot" = *310* ]]; then |  | ||||||
|   EXTRA_CONDA_FLAGS="-c=conda-forge" |  | ||||||
|   # There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20 |  | ||||||
|   # we set a lower boundary here just to be safe |  | ||||||
|   NUMPY_PIN=">=1.21.2" |  | ||||||
|   PROTOBUF_PACKAGE="protobuf>=3.19.0" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "\$python_nodot" = *39*  ]]; then |  | ||||||
|   EXTRA_CONDA_FLAGS="-c=conda-forge" |  | ||||||
|   # There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20 |  | ||||||
|   # we set a lower boundary here just to be safe |  | ||||||
|   NUMPY_PIN=">=1.20" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$DESIRED_CUDA" == "cu116" ]]; then |  | ||||||
|   EXTRA_CONDA_FLAGS="-c=conda-forge" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Move debug wheels out of the the package dir so they don't get installed |  | ||||||
| mkdir -p /tmp/debug_final_pkgs |  | ||||||
| mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to move" |  | ||||||
|  |  | ||||||
| # Install the package |  | ||||||
| # These network calls should not have 'retry's because they are installing |  | ||||||
| # locally and aren't actually network calls |  | ||||||
| # TODO there is duplicated and inconsistent test-python-env setup across this |  | ||||||
| #   file, builder/smoke_test.sh, and builder/run_tests.sh, and also in the |  | ||||||
| #   conda build scripts themselves. These should really be consolidated |  | ||||||
| # Pick only one package of multiple available (which happens as result of workflow re-runs) |  | ||||||
| pkg="/final_pkgs/\$(ls -1 /final_pkgs|sort|tail -1)" |  | ||||||
| if [[ "$PACKAGE_TYPE" == conda ]]; then |  | ||||||
|   ( |  | ||||||
|     # For some reason conda likes to re-activate the conda environment when attempting this install |  | ||||||
|     # which means that a deactivate is run and some variables might not exist when that happens, |  | ||||||
|     # namely CONDA_MKL_INTERFACE_LAYER_BACKUP from libblas so let's just ignore unbound variables when |  | ||||||
|     # it comes to the conda installation commands |  | ||||||
|     set +u |  | ||||||
|     retry conda install \${EXTRA_CONDA_FLAGS} -yq \ |  | ||||||
|       "numpy\${NUMPY_PIN}" \ |  | ||||||
|       future \ |  | ||||||
|       mkl>=2018 \ |  | ||||||
|       ninja \ |  | ||||||
|       dataclasses \ |  | ||||||
|       typing-extensions \ |  | ||||||
|       ${PROTOBUF_PACKAGE} \ |  | ||||||
|       six |  | ||||||
|     if [[ "$DESIRED_CUDA" == 'cpu' ]]; then |  | ||||||
|       retry conda install -c pytorch -y cpuonly |  | ||||||
|     else |  | ||||||
|       # DESIRED_CUDA is in format cu90 or cu102 |  | ||||||
|       if [[ "${#DESIRED_CUDA}" == 4 ]]; then |  | ||||||
|         cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}" |  | ||||||
|       else |  | ||||||
|         cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}" |  | ||||||
|       fi |  | ||||||
|       retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "cudatoolkit=\${cu_ver}" |  | ||||||
|     fi |  | ||||||
|     conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline |  | ||||||
|   ) |  | ||||||
| elif [[ "$PACKAGE_TYPE" != libtorch ]]; then |  | ||||||
|   pip install "\$pkg" |  | ||||||
|   retry pip install -q future numpy protobuf typing-extensions six |  | ||||||
| fi |  | ||||||
| if [[ "$PACKAGE_TYPE" == libtorch ]]; then |  | ||||||
|   pkg="\$(ls /final_pkgs/*-latest.zip)" |  | ||||||
|   unzip "\$pkg" -d /tmp |  | ||||||
|   cd /tmp/libtorch |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Test the package |  | ||||||
| /builder/check_binary.sh |  | ||||||
|  |  | ||||||
| # =================== The above code will be executed inside Docker container =================== |  | ||||||
| EOL |  | ||||||
| echo |  | ||||||
| echo |  | ||||||
| echo "The script that will run in the next step is:" |  | ||||||
| cat "${OUTPUT_SCRIPT}" |  | ||||||
| @ -1,19 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| source "${BINARY_ENV_FILE:-/Users/distiller/project/env}" |  | ||||||
| mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" |  | ||||||
|  |  | ||||||
| if [[ -z "${IS_GHA:-}" ]]; then |  | ||||||
|   export PATH="${workdir:-${HOME}}/miniconda/bin:${PATH}" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Build |  | ||||||
| export USE_PYTORCH_METAL_EXPORT=1 |  | ||||||
| export USE_COREML_DELEGATE=1 |  | ||||||
| if [[ "$PACKAGE_TYPE" == conda ]]; then |  | ||||||
|   "${BUILDER_ROOT}/conda/build_pytorch.sh" |  | ||||||
| else |  | ||||||
|   export TORCH_PACKAGE_NAME="$(echo $TORCH_PACKAGE_NAME | tr '-' '_')" |  | ||||||
|   "${BUILDER_ROOT}/wheel/build_wheel.sh" |  | ||||||
| fi |  | ||||||
| @ -1,34 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| source "/Users/distiller/project/env" |  | ||||||
| export "PATH=$workdir/miniconda/bin:$PATH" |  | ||||||
| pkg="$workdir/final_pkgs/$(ls $workdir/final_pkgs)" |  | ||||||
|  |  | ||||||
| # Create a new test env |  | ||||||
| # TODO cut all this out into a separate test job and have an entirely different |  | ||||||
| # miniconda |  | ||||||
| if [[ "$PACKAGE_TYPE" != libtorch ]]; then |  | ||||||
|   source deactivate || true |  | ||||||
|   conda create -qyn test python="$DESIRED_PYTHON" |  | ||||||
|   source activate test >/dev/null |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Install the package |  | ||||||
| if [[ "$PACKAGE_TYPE" == libtorch ]]; then |  | ||||||
|   pkg="$(ls $workdir/final_pkgs/*-latest.zip)" |  | ||||||
|   unzip "$pkg" -d /tmp |  | ||||||
|   cd /tmp/libtorch |  | ||||||
| elif [[ "$PACKAGE_TYPE" == conda ]]; then |  | ||||||
|   conda install -y "$pkg" |  | ||||||
| else |  | ||||||
|   pip install "$pkg" -v |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Test |  | ||||||
| if [[ "$PACKAGE_TYPE" == libtorch ]]; then |  | ||||||
|   $workdir/builder/check_binary.sh |  | ||||||
| else |  | ||||||
|   pushd "$workdir/pytorch" |  | ||||||
|   $workdir/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA" |  | ||||||
| fi |  | ||||||
| @ -1,230 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
| export TZ=UTC |  | ||||||
|  |  | ||||||
| tagged_version() { |  | ||||||
|   # Grabs version from either the env variable CIRCLE_TAG |  | ||||||
|   # or the pytorch git described version |  | ||||||
|   if [[ "$OSTYPE" == "msys" &&  -z "${IS_GHA:-}" ]]; then |  | ||||||
|     GIT_DIR="${workdir}/p/.git" |  | ||||||
|   else |  | ||||||
|     GIT_DIR="${workdir}/pytorch/.git" |  | ||||||
|   fi |  | ||||||
|   GIT_DESCRIBE="git --git-dir ${GIT_DIR} describe --tags --match v[0-9]*.[0-9]*.[0-9]*" |  | ||||||
|   if [[ -n "${CIRCLE_TAG:-}" ]]; then |  | ||||||
|     echo "${CIRCLE_TAG}" |  | ||||||
|   elif [[ ! -d "${GIT_DIR}" ]]; then |  | ||||||
|     echo "Abort, abort! Git dir ${GIT_DIR} does not exists!" |  | ||||||
|     kill $$ |  | ||||||
|   elif ${GIT_DESCRIBE} --exact >/dev/null; then |  | ||||||
|     ${GIT_DESCRIBE} |  | ||||||
|   else |  | ||||||
|     return 1 |  | ||||||
|   fi |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # These are only relevant for CircleCI |  | ||||||
| # TODO: Remove these later once migrated fully to GHA |  | ||||||
| if [[ -z ${IS_GHA:-} ]]; then |  | ||||||
|   # We need to write an envfile to persist these variables to following |  | ||||||
|   # steps, but the location of the envfile depends on the circleci executor |  | ||||||
|   if [[ "$(uname)" == Darwin ]]; then |  | ||||||
|     # macos executor (builds and tests) |  | ||||||
|     workdir="/Users/distiller/project" |  | ||||||
|   elif [[ "$OSTYPE" == "msys" ]]; then |  | ||||||
|     # windows executor (builds and tests) |  | ||||||
|     workdir="/c/w" |  | ||||||
|   elif [[ -d "/home/circleci/project" ]]; then |  | ||||||
|     # machine executor (binary tests) |  | ||||||
|     workdir="/home/circleci/project" |  | ||||||
|   else |  | ||||||
|     # docker executor (binary builds) |  | ||||||
|     workdir="/" |  | ||||||
|   fi |  | ||||||
|   envfile="$workdir/env" |  | ||||||
|   touch "$envfile" |  | ||||||
|   chmod +x "$envfile" |  | ||||||
|  |  | ||||||
|   # Parse the BUILD_ENVIRONMENT to package type, python, and cuda |  | ||||||
|   configs=($BUILD_ENVIRONMENT) |  | ||||||
|   export PACKAGE_TYPE="${configs[0]}" |  | ||||||
|   export DESIRED_PYTHON="${configs[1]}" |  | ||||||
|   export DESIRED_CUDA="${configs[2]}" |  | ||||||
|   if [[ "${OSTYPE}" == "msys" ]]; then |  | ||||||
|     export DESIRED_DEVTOOLSET="" |  | ||||||
|     export LIBTORCH_CONFIG="${configs[3]:-}" |  | ||||||
|     if [[ "$LIBTORCH_CONFIG" == 'debug' ]]; then |  | ||||||
|       export DEBUG=1 |  | ||||||
|     fi |  | ||||||
|   else |  | ||||||
|     export DESIRED_DEVTOOLSET="${configs[3]:-}" |  | ||||||
|   fi |  | ||||||
| else |  | ||||||
|   envfile=${BINARY_ENV_FILE:-/tmp/env} |  | ||||||
|   if [[ -n "${PYTORCH_ROOT}"  ]]; then |  | ||||||
|     workdir=$(dirname "${PYTORCH_ROOT}") |  | ||||||
|   else |  | ||||||
|     # docker executor (binary builds) |  | ||||||
|     workdir="/" |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then |  | ||||||
|   export BUILD_PYTHONLESS=1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Pick docker image |  | ||||||
| export DOCKER_IMAGE=${DOCKER_IMAGE:-} |  | ||||||
| if [[ -z "$DOCKER_IMAGE" ]]; then |  | ||||||
|   if [[ "$PACKAGE_TYPE" == conda ]]; then |  | ||||||
|     export DOCKER_IMAGE="pytorch/conda-cuda" |  | ||||||
|   elif [[ "$DESIRED_CUDA" == cpu ]]; then |  | ||||||
|     export DOCKER_IMAGE="pytorch/manylinux-cpu" |  | ||||||
|   else |  | ||||||
|     export DOCKER_IMAGE="pytorch/manylinux-cuda${DESIRED_CUDA:2}" |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| USE_GOLD_LINKER="OFF" |  | ||||||
| # GOLD linker can not be used if CUPTI is statically linked into PyTorch, see https://github.com/pytorch/pytorch/issues/57744 |  | ||||||
| if [[ ${DESIRED_CUDA} == "cpu" ]]; then |  | ||||||
|   USE_GOLD_LINKER="ON" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
| # Default to nightly, since that's where this normally uploads to |  | ||||||
| PIP_UPLOAD_FOLDER='nightly/' |  | ||||||
| # We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it |  | ||||||
| export DATE="$(date -u +%Y%m%d)" |  | ||||||
| #TODO: We should be pulling semver version from the base version.txt |  | ||||||
| BASE_BUILD_VERSION="1.12.0.dev$DATE" |  | ||||||
| # Change BASE_BUILD_VERSION to git tag when on a git tag |  | ||||||
| # Use 'git -C' to make doubly sure we're in the correct directory for checking |  | ||||||
| # the git tag |  | ||||||
| if tagged_version >/dev/null; then |  | ||||||
|   # Switch upload folder to 'test/' if we are on a tag |  | ||||||
|   PIP_UPLOAD_FOLDER='test/' |  | ||||||
|   # Grab git tag, remove prefixed v and remove everything after - |  | ||||||
|   # Used to clean up tags that are for release candidates like v1.6.0-rc1 |  | ||||||
|   # Turns tag v1.6.0-rc1 -> v1.6.0 |  | ||||||
|   BASE_BUILD_VERSION="$(tagged_version | sed -e 's/^v//' -e 's/-.*$//')" |  | ||||||
| fi |  | ||||||
| if [[ "$(uname)" == 'Darwin' ]] || [[ "$PACKAGE_TYPE" == conda ]]; then |  | ||||||
|   export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}" |  | ||||||
| else |  | ||||||
|   export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}+$DESIRED_CUDA" |  | ||||||
| fi |  | ||||||
| export PYTORCH_BUILD_NUMBER=1 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| JAVA_HOME= |  | ||||||
| BUILD_JNI=OFF |  | ||||||
| if [[ "$PACKAGE_TYPE" == libtorch ]]; then |  | ||||||
|   POSSIBLE_JAVA_HOMES=() |  | ||||||
|   POSSIBLE_JAVA_HOMES+=(/usr/local) |  | ||||||
|   POSSIBLE_JAVA_HOMES+=(/usr/lib/jvm/java-8-openjdk-amd64) |  | ||||||
|   POSSIBLE_JAVA_HOMES+=(/Library/Java/JavaVirtualMachines/*.jdk/Contents/Home) |  | ||||||
|   # Add the Windows-specific JNI path |  | ||||||
|   POSSIBLE_JAVA_HOMES+=("$PWD/.circleci/windows-jni/") |  | ||||||
|   for JH in "${POSSIBLE_JAVA_HOMES[@]}" ; do |  | ||||||
|     if [[ -e "$JH/include/jni.h" ]] ; then |  | ||||||
|       # Skip if we're not on Windows but haven't found a JAVA_HOME |  | ||||||
|       if [[ "$JH" == "$PWD/.circleci/windows-jni/" && "$OSTYPE" != "msys" ]] ; then |  | ||||||
|         break |  | ||||||
|       fi |  | ||||||
|       echo "Found jni.h under $JH" |  | ||||||
|       JAVA_HOME="$JH" |  | ||||||
|       BUILD_JNI=ON |  | ||||||
|       break |  | ||||||
|     fi |  | ||||||
|   done |  | ||||||
|   if [ -z "$JAVA_HOME" ]; then |  | ||||||
|     echo "Did not find jni.h" |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| cat >"$envfile" <<EOL |  | ||||||
| # =================== The following code will be executed inside Docker container =================== |  | ||||||
| export TZ=UTC |  | ||||||
| echo "Running on $(uname -a) at $(date)" |  | ||||||
|  |  | ||||||
| export PACKAGE_TYPE="$PACKAGE_TYPE" |  | ||||||
| export DESIRED_PYTHON="${DESIRED_PYTHON:-}" |  | ||||||
| export DESIRED_CUDA="$DESIRED_CUDA" |  | ||||||
| export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}" |  | ||||||
| export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}" |  | ||||||
| if [[ "${OSTYPE}" == "msys" ]]; then |  | ||||||
|   export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}" |  | ||||||
|   if [[ "${LIBTORCH_CONFIG:-}" == 'debug' ]]; then |  | ||||||
|     export DEBUG=1 |  | ||||||
|   fi |  | ||||||
|   export DESIRED_DEVTOOLSET="" |  | ||||||
| else |  | ||||||
|   export DESIRED_DEVTOOLSET="${DESIRED_DEVTOOLSET:-}" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| export DATE="$DATE" |  | ||||||
| export NIGHTLIES_DATE_PREAMBLE=1.12.0.dev |  | ||||||
| export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION" |  | ||||||
| export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER" |  | ||||||
| export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION" |  | ||||||
|  |  | ||||||
| # TODO: We don't need this anymore IIUC |  | ||||||
| export TORCH_PACKAGE_NAME='torch' |  | ||||||
| export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly' |  | ||||||
| export ANACONDA_USER='pytorch' |  | ||||||
|  |  | ||||||
| export USE_FBGEMM=1 |  | ||||||
| export JAVA_HOME=$JAVA_HOME |  | ||||||
| export BUILD_JNI=$BUILD_JNI |  | ||||||
| export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER" |  | ||||||
| export DOCKER_IMAGE="$DOCKER_IMAGE" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| export USE_GOLD_LINKER="${USE_GOLD_LINKER}" |  | ||||||
| export USE_GLOO_WITH_OPENSSL="ON" |  | ||||||
| # =================== The above code will be executed inside Docker container =================== |  | ||||||
| EOL |  | ||||||
|  |  | ||||||
| # nproc doesn't exist on darwin |  | ||||||
| if [[ "$(uname)" != Darwin ]]; then |  | ||||||
|   # Because most Circle executors only have 20 CPUs, using more causes OOMs w/ Ninja and nvcc parallelization |  | ||||||
|   MEMORY_LIMIT_MAX_JOBS=18 |  | ||||||
|   NUM_CPUS=$(( $(nproc) - 2 )) |  | ||||||
|  |  | ||||||
|   # Defaults here for **binary** linux builds so they can be changed in one place |  | ||||||
|   export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))} |  | ||||||
|  |  | ||||||
|   cat >>"$envfile" <<EOL |  | ||||||
|   export MAX_JOBS="${MAX_JOBS}" |  | ||||||
| EOL |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ -z "${IS_GHA:-}" ]]; then |  | ||||||
|   cat >>"$envfile" <<EOL |  | ||||||
|   export workdir="$workdir" |  | ||||||
|   export MAC_PACKAGE_WORK_DIR="$workdir" |  | ||||||
|   if [[ "$OSTYPE" == "msys" ]]; then |  | ||||||
|     export PYTORCH_ROOT="$workdir/p" |  | ||||||
|     export BUILDER_ROOT="$workdir/b" |  | ||||||
|   else |  | ||||||
|     export PYTORCH_ROOT="$workdir/pytorch" |  | ||||||
|     export BUILDER_ROOT="$workdir/builder" |  | ||||||
|   fi |  | ||||||
|   export MINICONDA_ROOT="$workdir/miniconda" |  | ||||||
|   export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs" |  | ||||||
|  |  | ||||||
|   export CIRCLE_TAG="${CIRCLE_TAG:-}" |  | ||||||
|   export CIRCLE_SHA1="$CIRCLE_SHA1" |  | ||||||
|   export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}" |  | ||||||
|   export CIRCLE_BRANCH="$CIRCLE_BRANCH" |  | ||||||
|   export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID" |  | ||||||
| EOL |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| echo 'retry () {' >> "$envfile" |  | ||||||
| echo '    $*  || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)' >> "$envfile" |  | ||||||
| echo '}' >> "$envfile" |  | ||||||
| echo 'export -f retry' >> "$envfile" |  | ||||||
|  |  | ||||||
| cat "$envfile" |  | ||||||
| @ -1,29 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
|  |  | ||||||
| # This section is used in the binary_test and smoke_test jobs. It expects |  | ||||||
| # 'binary_populate_env' to have populated /home/circleci/project/env and it |  | ||||||
| # expects another section to populate /home/circleci/project/ci_test_script.sh |  | ||||||
| # with the code to run in the docker |  | ||||||
|  |  | ||||||
| # Expect all needed environment variables to be written to this file |  | ||||||
| source /home/circleci/project/env |  | ||||||
| echo "Running the following code in Docker" |  | ||||||
| cat /home/circleci/project/ci_test_script.sh |  | ||||||
| echo |  | ||||||
| echo |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| # Expect actual code to be written to this file |  | ||||||
| chmod +x /home/circleci/project/ci_test_script.sh |  | ||||||
|  |  | ||||||
| VOLUME_MOUNTS="-v /home/circleci/project/:/circleci_stuff -v /home/circleci/project/final_pkgs:/final_pkgs -v ${PYTORCH_ROOT}:/pytorch -v ${BUILDER_ROOT}:/builder" |  | ||||||
| # Run the docker |  | ||||||
| if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then |  | ||||||
|   export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --gpus all ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}") |  | ||||||
| else |  | ||||||
|   export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}") |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Execute the test script that was populated by an earlier section |  | ||||||
| export COMMAND='((echo "source /circleci_stuff/env && /circleci_stuff/ci_test_script.sh") | docker exec -i "$id" bash) 2>&1' |  | ||||||
| echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts |  | ||||||
| @ -1,102 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
|  |  | ||||||
| set -euo pipefail |  | ||||||
|  |  | ||||||
| PACKAGE_TYPE=${PACKAGE_TYPE:-conda} |  | ||||||
|  |  | ||||||
| PKG_DIR=${PKG_DIR:-/tmp/workspace/final_pkgs} |  | ||||||
|  |  | ||||||
| # Designates whether to submit as a release candidate or a nightly build |  | ||||||
| # Value should be `test` when uploading release candidates |  | ||||||
| # currently set within `designate_upload_channel` |  | ||||||
| UPLOAD_CHANNEL=${UPLOAD_CHANNEL:-nightly} |  | ||||||
| # Designates what subfolder to put packages into |  | ||||||
| UPLOAD_SUBFOLDER=${UPLOAD_SUBFOLDER:-cpu} |  | ||||||
| UPLOAD_BUCKET="s3://pytorch" |  | ||||||
| BACKUP_BUCKET="s3://pytorch-backup" |  | ||||||
|  |  | ||||||
| DRY_RUN=${DRY_RUN:-enabled} |  | ||||||
| # Don't actually do work unless explicit |  | ||||||
| ANACONDA="true anaconda" |  | ||||||
| AWS_S3_CP="aws s3 cp --dryrun" |  | ||||||
| if [[ "${DRY_RUN}" = "disabled" ]]; then |  | ||||||
|   ANACONDA="anaconda" |  | ||||||
|   AWS_S3_CP="aws s3 cp" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| do_backup() { |  | ||||||
|   local backup_dir |  | ||||||
|   backup_dir=$1 |  | ||||||
|   ( |  | ||||||
|     pushd /tmp/workspace |  | ||||||
|     set -x |  | ||||||
|     ${AWS_S3_CP} --recursive . "${BACKUP_BUCKET}/${CIRCLE_TAG}/${backup_dir}/" |  | ||||||
|   ) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| conda_upload() { |  | ||||||
|   ( |  | ||||||
|     set -x |  | ||||||
|     ${ANACONDA} \ |  | ||||||
|       upload  \ |  | ||||||
|       ${PKG_DIR}/*.tar.bz2 \ |  | ||||||
|       -u "pytorch-${UPLOAD_CHANNEL}" \ |  | ||||||
|       --label main \ |  | ||||||
|       --no-progress \ |  | ||||||
|       --force |  | ||||||
|   ) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| s3_upload() { |  | ||||||
|   local extension |  | ||||||
|   local pkg_type |  | ||||||
|   extension="$1" |  | ||||||
|   pkg_type="$2" |  | ||||||
|   s3_dir="${UPLOAD_BUCKET}/${pkg_type}/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}/" |  | ||||||
|   ( |  | ||||||
|     for pkg in ${PKG_DIR}/*.${extension}; do |  | ||||||
|       ( |  | ||||||
|         set -x |  | ||||||
|         ${AWS_S3_CP} --no-progress --acl public-read "${pkg}" "${s3_dir}" |  | ||||||
|       ) |  | ||||||
|     done |  | ||||||
|   ) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Install dependencies (should be a no-op if previously installed) |  | ||||||
| conda install -yq anaconda-client |  | ||||||
| pip install -q awscli |  | ||||||
|  |  | ||||||
| case "${PACKAGE_TYPE}" in |  | ||||||
|   conda) |  | ||||||
|     conda_upload |  | ||||||
|     # Fetch  platform (eg. win-64, linux-64, etc.) from index file |  | ||||||
|     # Because there's no actual conda command to read this |  | ||||||
|     subdir=$(\ |  | ||||||
|       tar -xOf ${PKG_DIR}/*.bz2 info/index.json \ |  | ||||||
|         | grep subdir  \ |  | ||||||
|         | cut -d ':' -f2 \ |  | ||||||
|         | sed -e 's/[[:space:]]//' -e 's/"//g' -e 's/,//' \ |  | ||||||
|     ) |  | ||||||
|     BACKUP_DIR="conda/${subdir}" |  | ||||||
|     ;; |  | ||||||
|   libtorch) |  | ||||||
|     s3_upload "zip" "libtorch" |  | ||||||
|     BACKUP_DIR="libtorch/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}" |  | ||||||
|     ;; |  | ||||||
|   # wheel can either refer to wheel/manywheel |  | ||||||
|   *wheel) |  | ||||||
|     s3_upload "whl" "whl" |  | ||||||
|     BACKUP_DIR="whl/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}" |  | ||||||
|     ;; |  | ||||||
|   *) |  | ||||||
|     echo "ERROR: unknown package type: ${PACKAGE_TYPE}" |  | ||||||
|     exit 1 |  | ||||||
|     ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| # CIRCLE_TAG is defined by upstream circleci, |  | ||||||
| # this can be changed to recognize tagged versions |  | ||||||
| if [[ -n "${CIRCLE_TAG:-}" ]]; then |  | ||||||
|   do_backup "${BACKUP_DIR}" |  | ||||||
| fi |  | ||||||
| @ -1,80 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| source "${BINARY_ENV_FILE:-/c/w/env}" |  | ||||||
| mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" |  | ||||||
|  |  | ||||||
| export CUDA_VERSION="${DESIRED_CUDA/cu/}" |  | ||||||
| export USE_SCCACHE=1 |  | ||||||
| export SCCACHE_BUCKET=ossci-compiler-cache-windows |  | ||||||
| export SCCACHE_IGNORE_SERVER_IO_ERROR=1 |  | ||||||
| export VC_YEAR=2019 |  | ||||||
|  |  | ||||||
| if [[ "${DESIRED_CUDA}" == *"cu11"* ]]; then |  | ||||||
|     export BUILD_SPLIT_CUDA=ON |  | ||||||
| fi |  | ||||||
|  |  | ||||||
|  |  | ||||||
| echo "Free Space for CUDA DEBUG BUILD" |  | ||||||
| if [[ "${CIRCLECI:-}" == 'true' ]]; then |  | ||||||
|     export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT" |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\Microsoft.NET" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\Microsoft.NET" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files\\dotnet" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files\\dotnet" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\dotnet" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\dotnet" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\Microsoft SQL Server" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\Microsoft SQL Server" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\Xamarin" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\Xamarin" |  | ||||||
|     fi |  | ||||||
|  |  | ||||||
|     if [[ -d "C:\\Program Files (x86)\\Google" ]]; then |  | ||||||
|         rm -rf "C:\\Program Files (x86)\\Google" |  | ||||||
|     fi |  | ||||||
|     set +x |  | ||||||
|     export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-} |  | ||||||
|     export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-} |  | ||||||
|     set -x |  | ||||||
|     if [[ -d "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" ]]; then |  | ||||||
|         mv "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" . |  | ||||||
|         rm -rf "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages" |  | ||||||
|         mkdir -p "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages" |  | ||||||
|         mv _Instances "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages" |  | ||||||
|     fi |  | ||||||
|     if [[ -d "C:\\Microsoft" ]]; then |  | ||||||
|         # don't use quotes here |  | ||||||
|         rm -rf /c/Microsoft/AndroidNDK* |  | ||||||
|     fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| echo "Free space on filesystem before build:" |  | ||||||
| df -h |  | ||||||
|  |  | ||||||
| pushd "$BUILDER_ROOT" |  | ||||||
| if [[ "$PACKAGE_TYPE" == 'conda' ]]; then |  | ||||||
|     ./windows/internal/build_conda.bat |  | ||||||
| elif [[ "$PACKAGE_TYPE" == 'wheel' || "$PACKAGE_TYPE" == 'libtorch' ]]; then |  | ||||||
|     export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT" |  | ||||||
|     ./windows/internal/build_wheels.bat |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| echo "Free space on filesystem after build:" |  | ||||||
| df -h |  | ||||||
| @ -1,13 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| source "${BINARY_ENV_FILE:-/c/w/env}" |  | ||||||
|  |  | ||||||
| export CUDA_VERSION="${DESIRED_CUDA/cu/}" |  | ||||||
| export VC_YEAR=2019 |  | ||||||
|  |  | ||||||
| pushd "$BUILDER_ROOT" |  | ||||||
|  |  | ||||||
| ./windows/internal/smoke_test.bat |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| @ -1,97 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| env |  | ||||||
| echo "BUILD_ENVIRONMENT:$BUILD_ENVIRONMENT" |  | ||||||
|  |  | ||||||
| export ANDROID_NDK_HOME=/opt/ndk |  | ||||||
| export ANDROID_NDK=/opt/ndk |  | ||||||
| export ANDROID_HOME=/opt/android/sdk |  | ||||||
|  |  | ||||||
| # Must be in sync with GRADLE_VERSION in docker image for android |  | ||||||
| # https://github.com/pietern/pytorch-dockerfiles/blob/master/build.sh#L155 |  | ||||||
| export GRADLE_VERSION=6.8.3 |  | ||||||
| export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION |  | ||||||
| export GRADLE_PATH=$GRADLE_HOME/bin/gradle |  | ||||||
|  |  | ||||||
| # touch gradle cache files to prevent expiration |  | ||||||
| while IFS= read -r -d '' file |  | ||||||
| do |  | ||||||
|   touch "$file" || true |  | ||||||
| done < <(find /var/lib/jenkins/.gradle -type f -print0) |  | ||||||
|  |  | ||||||
| export GRADLE_LOCAL_PROPERTIES=~/workspace/android/local.properties |  | ||||||
| rm -f $GRADLE_LOCAL_PROPERTIES |  | ||||||
| echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
| echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
| echo "cmake.dir=/usr/local" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|   $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Run custom build script |  | ||||||
| if [[ "${BUILD_ENVIRONMENT}" == *-gradle-custom-build* ]]; then |  | ||||||
|   # Install torch & torchvision - used to download & dump used ops from test model. |  | ||||||
|   retry pip install torch torchvision --progress-bar off |  | ||||||
|  |  | ||||||
|   exec "$(dirname "${BASH_SOURCE[0]}")/../../android/build_test_app_custom.sh" armeabi-v7a |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Run default build |  | ||||||
| BUILD_ANDROID_INCLUDE_DIR_x86=~/workspace/build_android/install/include |  | ||||||
| BUILD_ANDROID_LIB_DIR_x86=~/workspace/build_android/install/lib |  | ||||||
|  |  | ||||||
| BUILD_ANDROID_INCLUDE_DIR_x86_64=~/workspace/build_android_install_x86_64/install/include |  | ||||||
| BUILD_ANDROID_LIB_DIR_x86_64=~/workspace/build_android_install_x86_64/install/lib |  | ||||||
|  |  | ||||||
| BUILD_ANDROID_INCLUDE_DIR_arm_v7a=~/workspace/build_android_install_arm_v7a/install/include |  | ||||||
| BUILD_ANDROID_LIB_DIR_arm_v7a=~/workspace/build_android_install_arm_v7a/install/lib |  | ||||||
|  |  | ||||||
| BUILD_ANDROID_INCLUDE_DIR_arm_v8a=~/workspace/build_android_install_arm_v8a/install/include |  | ||||||
| BUILD_ANDROID_LIB_DIR_arm_v8a=~/workspace/build_android_install_arm_v8a/install/lib |  | ||||||
|  |  | ||||||
| PYTORCH_ANDROID_SRC_MAIN_DIR=~/workspace/android/pytorch_android/src/main |  | ||||||
|  |  | ||||||
| JNI_INCLUDE_DIR=${PYTORCH_ANDROID_SRC_MAIN_DIR}/cpp/libtorch_include |  | ||||||
| mkdir -p $JNI_INCLUDE_DIR |  | ||||||
|  |  | ||||||
| JNI_LIBS_DIR=${PYTORCH_ANDROID_SRC_MAIN_DIR}/jniLibs |  | ||||||
| mkdir -p $JNI_LIBS_DIR |  | ||||||
|  |  | ||||||
| ln -s ${BUILD_ANDROID_INCLUDE_DIR_x86} ${JNI_INCLUDE_DIR}/x86 |  | ||||||
| ln -s ${BUILD_ANDROID_LIB_DIR_x86} ${JNI_LIBS_DIR}/x86 |  | ||||||
|  |  | ||||||
| if [[ "${BUILD_ENVIRONMENT}" != *-gradle-build-only-x86_32* ]]; then |  | ||||||
| ln -s ${BUILD_ANDROID_INCLUDE_DIR_x86_64} ${JNI_INCLUDE_DIR}/x86_64 |  | ||||||
| ln -s ${BUILD_ANDROID_LIB_DIR_x86_64} ${JNI_LIBS_DIR}/x86_64 |  | ||||||
|  |  | ||||||
| ln -s ${BUILD_ANDROID_INCLUDE_DIR_arm_v7a} ${JNI_INCLUDE_DIR}/armeabi-v7a |  | ||||||
| ln -s ${BUILD_ANDROID_LIB_DIR_arm_v7a} ${JNI_LIBS_DIR}/armeabi-v7a |  | ||||||
|  |  | ||||||
| ln -s ${BUILD_ANDROID_INCLUDE_DIR_arm_v8a} ${JNI_INCLUDE_DIR}/arm64-v8a |  | ||||||
| ln -s ${BUILD_ANDROID_LIB_DIR_arm_v8a} ${JNI_LIBS_DIR}/arm64-v8a |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| GRADLE_PARAMS="-p android assembleRelease --debug --stacktrace" |  | ||||||
| if [[ "${BUILD_ENVIRONMENT}" == *-gradle-build-only-x86_32* ]]; then |  | ||||||
|     GRADLE_PARAMS+=" -PABI_FILTERS=x86" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "{GRADLE_OFFLINE:-}" ]; then |  | ||||||
|     GRADLE_PARAMS+=" --offline" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| $GRADLE_PATH $GRADLE_PARAMS |  | ||||||
|  |  | ||||||
| find . -type f -name "*.a" -exec ls -lh {} \; |  | ||||||
|  |  | ||||||
| while IFS= read -r -d '' file |  | ||||||
| do |  | ||||||
|   echo |  | ||||||
|   echo "$file" |  | ||||||
|   ls -lah "$file" |  | ||||||
|   zipinfo -l "$file" |  | ||||||
| done < <(find . -type f -name '*.aar' -print0) |  | ||||||
|  |  | ||||||
| find . -type f -name *aar -print | xargs tar cfvz ~/workspace/android/artifacts.tgz |  | ||||||
| @ -1,107 +0,0 @@ | |||||||
| # =================== The following code **should** be executed inside Docker container =================== |  | ||||||
|  |  | ||||||
| # Install dependencies |  | ||||||
| sudo apt-get -y update |  | ||||||
| sudo apt-get -y install expect-dev |  | ||||||
|  |  | ||||||
| # This is where the local pytorch install in the docker image is located |  | ||||||
| pt_checkout="/var/lib/jenkins/workspace" |  | ||||||
|  |  | ||||||
| # Since we're cat-ing this file, we need to escape all $'s |  | ||||||
| echo "cpp_doc_push_script.sh: Invoked with $*" |  | ||||||
|  |  | ||||||
| # for statements like ${1:-${DOCS_INSTALL_PATH:-docs/}} |  | ||||||
| # the order of operations goes: |  | ||||||
| #   1. Check if there's an argument $1 |  | ||||||
| #   2. If no argument check for environment var DOCS_INSTALL_PATH |  | ||||||
| #   3. If no environment var fall back to default 'docs/' |  | ||||||
|  |  | ||||||
| # NOTE: It might seem weird to gather the second argument before gathering the first argument |  | ||||||
| #       but since DOCS_INSTALL_PATH can be derived from DOCS_VERSION it's probably better to |  | ||||||
| #       try and gather it first, just so we don't potentially break people who rely on this script |  | ||||||
| # Argument 2: What version of the Python API docs we are building. |  | ||||||
| version="${2:-${DOCS_VERSION:-master}}" |  | ||||||
| if [ -z "$version" ]; then |  | ||||||
| echo "error: cpp_doc_push_script.sh: version (arg2) not specified" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Argument 1: Where to copy the built documentation for Python API to |  | ||||||
| # (pytorch.github.io/$install_path) |  | ||||||
| install_path="${1:-${DOCS_INSTALL_PATH:-docs/${DOCS_VERSION}}}" |  | ||||||
| if [ -z "$install_path" ]; then |  | ||||||
| echo "error: cpp_doc_push_script.sh: install_path (arg1) not specified" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| is_main_doc=false |  | ||||||
| if [ "$version" == "master" ]; then |  | ||||||
|   is_main_doc=true |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| echo "install_path: $install_path  version: $version" |  | ||||||
|  |  | ||||||
| # ======================== Building PyTorch C++ API Docs ======================== |  | ||||||
|  |  | ||||||
| echo "Building PyTorch C++ API docs..." |  | ||||||
|  |  | ||||||
| # Clone the cppdocs repo |  | ||||||
| rm -rf cppdocs |  | ||||||
| git clone https://github.com/pytorch/cppdocs |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| sudo apt-get -y install doxygen |  | ||||||
|  |  | ||||||
| # Generate ATen files |  | ||||||
| pushd "${pt_checkout}" |  | ||||||
| pip install -r requirements.txt |  | ||||||
| time python -m torchgen.gen \ |  | ||||||
|   -s aten/src/ATen \ |  | ||||||
|   -d build/aten/src/ATen |  | ||||||
|  |  | ||||||
| # Copy some required files |  | ||||||
| cp torch/_utils_internal.py tools/shared |  | ||||||
|  |  | ||||||
| # Generate PyTorch files |  | ||||||
| time python tools/setup_helpers/generate_code.py \ |  | ||||||
|   --native-functions-path aten/src/ATen/native/native_functions.yaml \ |  | ||||||
|   --tags-path aten/src/ATen/native/tags.yaml |  | ||||||
|  |  | ||||||
| # Build the docs |  | ||||||
| pushd docs/cpp |  | ||||||
| pip install -r requirements.txt |  | ||||||
| time make VERBOSE=1 html -j |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| popd |  | ||||||
|  |  | ||||||
| pushd cppdocs |  | ||||||
|  |  | ||||||
| # Purge everything with some exceptions |  | ||||||
| mkdir /tmp/cppdocs-sync |  | ||||||
| mv _config.yml README.md /tmp/cppdocs-sync/ |  | ||||||
| rm -rf * |  | ||||||
|  |  | ||||||
| # Copy over all the newly generated HTML |  | ||||||
| cp -r "${pt_checkout}"/docs/cpp/build/html/* . |  | ||||||
|  |  | ||||||
| # Copy back _config.yml |  | ||||||
| rm -rf _config.yml |  | ||||||
| mv /tmp/cppdocs-sync/* . |  | ||||||
|  |  | ||||||
| # Make a new commit |  | ||||||
| git add . || true |  | ||||||
| git status |  | ||||||
| git config user.email "soumith+bot@pytorch.org" |  | ||||||
| git config user.name "pytorchbot" |  | ||||||
| # If there aren't changes, don't make a commit; push is no-op |  | ||||||
| git commit -m "Generate C++ docs from pytorch/pytorch@${GITHUB_SHA}" || true |  | ||||||
| git status |  | ||||||
|  |  | ||||||
| if [[ "${WITH_PUSH:-}" == true ]]; then |  | ||||||
|   git push -u origin |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| # =================== The above code **should** be executed inside Docker container =================== |  | ||||||
| @ -1,8 +0,0 @@ | |||||||
| set "DRIVER_DOWNLOAD_LINK=https://s3.amazonaws.com/ossci-windows/452.39-data-center-tesla-desktop-win10-64bit-international.exe" |  | ||||||
| curl --retry 3 -kL %DRIVER_DOWNLOAD_LINK% --output 452.39-data-center-tesla-desktop-win10-64bit-international.exe |  | ||||||
| if errorlevel 1 exit /b 1 |  | ||||||
|  |  | ||||||
| start /wait 452.39-data-center-tesla-desktop-win10-64bit-international.exe -s -noreboot |  | ||||||
| if errorlevel 1 exit /b 1 |  | ||||||
|  |  | ||||||
| del 452.39-data-center-tesla-desktop-win10-64bit-international.exe || ver > NUL |  | ||||||
| @ -1,46 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| # DO NOT ADD 'set -x' not to reveal CircleCI secret context environment variables |  | ||||||
| set -eu -o pipefail |  | ||||||
|  |  | ||||||
| export ANDROID_NDK_HOME=/opt/ndk |  | ||||||
| export ANDROID_HOME=/opt/android/sdk |  | ||||||
|  |  | ||||||
| export GRADLE_VERSION=6.8.3 |  | ||||||
| export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION |  | ||||||
| export GRADLE_PATH=$GRADLE_HOME/bin/gradle |  | ||||||
|  |  | ||||||
| echo "BUILD_ENVIRONMENT:$BUILD_ENVIRONMENT" |  | ||||||
| ls -la ~/workspace |  | ||||||
|  |  | ||||||
| GRADLE_PROPERTIES=~/workspace/android/gradle.properties |  | ||||||
|  |  | ||||||
| IS_SNAPSHOT="$(grep 'VERSION_NAME=[0-9\.]\+-SNAPSHOT' "$GRADLE_PROPERTIES")" |  | ||||||
| echo "IS_SNAPSHOT:$IS_SNAPSHOT" |  | ||||||
|  |  | ||||||
| if [ -z "$IS_SNAPSHOT" ]; then |  | ||||||
|   echo "Error: version is not snapshot." |  | ||||||
| elif [ -z "$SONATYPE_NEXUS_USERNAME" ]; then |  | ||||||
|   echo "Error: missing env variable SONATYPE_NEXUS_USERNAME." |  | ||||||
| elif [ -z "$SONATYPE_NEXUS_PASSWORD" ]; then |  | ||||||
|   echo "Error: missing env variable SONATYPE_NEXUS_PASSWORD." |  | ||||||
| elif [ -z "$ANDROID_SIGN_KEY" ]; then |  | ||||||
|   echo "Error: missing env variable ANDROID_SIGN_KEY." |  | ||||||
| elif [ -z "$ANDROID_SIGN_PASS" ]; then |  | ||||||
|   echo "Error: missing env variable ANDROID_SIGN_PASS." |  | ||||||
| else |  | ||||||
|   GRADLE_LOCAL_PROPERTIES=~/workspace/android/local.properties |  | ||||||
|   rm -f $GRADLE_LOCAL_PROPERTIES |  | ||||||
|  |  | ||||||
|   echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
|   echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES |  | ||||||
|  |  | ||||||
|   echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES |  | ||||||
|   echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES |  | ||||||
|   echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES |  | ||||||
|   echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES |  | ||||||
|  |  | ||||||
|   echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES |  | ||||||
|   echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES |  | ||||||
|  |  | ||||||
|   $GRADLE_PATH -p ~/workspace/android/ uploadArchives |  | ||||||
| fi |  | ||||||
| @ -1,142 +0,0 @@ | |||||||
| # =================== The following code **should** be executed inside Docker container =================== |  | ||||||
|  |  | ||||||
| # Install dependencies |  | ||||||
| sudo apt-get -y update |  | ||||||
| sudo apt-get -y install expect-dev |  | ||||||
|  |  | ||||||
| # This is where the local pytorch install in the docker image is located |  | ||||||
| pt_checkout="/var/lib/jenkins/workspace" |  | ||||||
|  |  | ||||||
| source "$pt_checkout/.jenkins/pytorch/common_utils.sh" |  | ||||||
|  |  | ||||||
| echo "python_doc_push_script.sh: Invoked with $*" |  | ||||||
|  |  | ||||||
| set -ex |  | ||||||
|  |  | ||||||
| # for statements like ${1:-${DOCS_INSTALL_PATH:-docs/}} |  | ||||||
| # the order of operations goes: |  | ||||||
| #   1. Check if there's an argument $1 |  | ||||||
| #   2. If no argument check for environment var DOCS_INSTALL_PATH |  | ||||||
| #   3. If no environment var fall back to default 'docs/' |  | ||||||
|  |  | ||||||
| # NOTE: It might seem weird to gather the second argument before gathering the first argument |  | ||||||
| #       but since DOCS_INSTALL_PATH can be derived from DOCS_VERSION it's probably better to |  | ||||||
| #       try and gather it first, just so we don't potentially break people who rely on this script |  | ||||||
| # Argument 2: What version of the docs we are building. |  | ||||||
| version="${2:-${DOCS_VERSION:-master}}" |  | ||||||
| if [ -z "$version" ]; then |  | ||||||
| echo "error: python_doc_push_script.sh: version (arg2) not specified" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Argument 1: Where to copy the built documentation to |  | ||||||
| # (pytorch.github.io/$install_path) |  | ||||||
| install_path="${1:-${DOCS_INSTALL_PATH:-docs/${DOCS_VERSION}}}" |  | ||||||
| if [ -z "$install_path" ]; then |  | ||||||
| echo "error: python_doc_push_script.sh: install_path (arg1) not specified" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| is_main_doc=false |  | ||||||
| if [ "$version" == "master" ]; then |  | ||||||
|   is_main_doc=true |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Argument 3: The branch to push to. Usually is "site" |  | ||||||
| branch="${3:-${DOCS_BRANCH:-site}}" |  | ||||||
| if [ -z "$branch" ]; then |  | ||||||
| echo "error: python_doc_push_script.sh: branch (arg3) not specified" |  | ||||||
|   exit 1 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| echo "install_path: $install_path  version: $version" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| build_docs () { |  | ||||||
|   set +e |  | ||||||
|   set -o pipefail |  | ||||||
|   make $1 2>&1 | tee /tmp/docs_build.txt |  | ||||||
|   code=$? |  | ||||||
|   if [ $code -ne 0 ]; then |  | ||||||
|     set +x |  | ||||||
|     echo ========================= |  | ||||||
|     grep "WARNING:" /tmp/docs_build.txt |  | ||||||
|     echo ========================= |  | ||||||
|     echo Docs build failed. If the failure is not clear, scan back in the log |  | ||||||
|     echo for any WARNINGS or for the line "build finished with problems" |  | ||||||
|     echo "(tried to echo the WARNINGS above the ==== line)" |  | ||||||
|     echo ========================= |  | ||||||
|   fi |  | ||||||
|   set -ex |  | ||||||
|   return $code |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  |  | ||||||
| git clone https://github.com/pytorch/pytorch.github.io -b $branch --depth 1 |  | ||||||
| pushd pytorch.github.io |  | ||||||
|  |  | ||||||
| export LC_ALL=C |  | ||||||
| export PATH=/opt/conda/bin:$PATH |  | ||||||
|  |  | ||||||
| rm -rf pytorch || true |  | ||||||
|  |  | ||||||
| # Get all the documentation sources, put them in one place |  | ||||||
| pushd "$pt_checkout" |  | ||||||
| pushd docs |  | ||||||
|  |  | ||||||
| # Build the docs |  | ||||||
| pip -q install -r requirements.txt |  | ||||||
| if [ "$is_main_doc" = true ]; then |  | ||||||
|   build_docs html |  | ||||||
|   [ $? -eq 0 ] || exit $? |  | ||||||
|   make coverage |  | ||||||
|   # Now we have the coverage report, we need to make sure it is empty. |  | ||||||
|   # Count the number of lines in the file and turn that number into a variable |  | ||||||
|   # $lines. The `cut -f1 ...` is to only parse the number, not the filename |  | ||||||
|   # Skip the report header by subtracting 2: the header will be output even if |  | ||||||
|   # there are no undocumented items. |  | ||||||
|   # |  | ||||||
|   # Also: see docs/source/conf.py for "coverage_ignore*" items, which should |  | ||||||
|   # be documented then removed from there. |  | ||||||
|   lines=$(wc -l build/coverage/python.txt 2>/dev/null |cut -f1 -d' ') |  | ||||||
|   undocumented=$(($lines - 2)) |  | ||||||
|   if [ $undocumented -lt 0 ]; then |  | ||||||
|     echo coverage output not found |  | ||||||
|     exit 1 |  | ||||||
|   elif [ $undocumented -gt 0 ]; then |  | ||||||
|     echo undocumented objects found: |  | ||||||
|     cat build/coverage/python.txt |  | ||||||
|     exit 1 |  | ||||||
|   fi |  | ||||||
| else |  | ||||||
|   # skip coverage, format for stable or tags |  | ||||||
|   build_docs html-stable |  | ||||||
|   [ $? -eq 0 ] || exit $? |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # Move them into the docs repo |  | ||||||
| popd |  | ||||||
| popd |  | ||||||
| git rm -rf "$install_path" || true |  | ||||||
| mv "$pt_checkout/docs/build/html" "$install_path" |  | ||||||
|  |  | ||||||
| # Prevent Google from indexing $install_path/_modules. This folder contains |  | ||||||
| # generated source files. |  | ||||||
| # NB: the following only works on gnu sed. The sed shipped with mac os is different. |  | ||||||
| # One can `brew install gnu-sed` on a mac and then use "gsed" instead of "sed". |  | ||||||
| find "$install_path/_modules" -name "*.html" -print0 | xargs -0 sed -i '/<head>/a \ \ <meta name="robots" content="noindex">' |  | ||||||
|  |  | ||||||
| git add "$install_path" || true |  | ||||||
| git status |  | ||||||
| git config user.email "soumith+bot@pytorch.org" |  | ||||||
| git config user.name "pytorchbot" |  | ||||||
| # If there aren't changes, don't make a commit; push is no-op |  | ||||||
| git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true |  | ||||||
| git status |  | ||||||
|  |  | ||||||
| if [[ "${WITH_PUSH:-}" == true ]]; then |  | ||||||
|   git push -u origin "${branch}" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| popd |  | ||||||
| # =================== The above code **should** be executed inside Docker container =================== |  | ||||||
| @ -1,112 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| set -ex -o pipefail |  | ||||||
|  |  | ||||||
| # Remove unnecessary sources |  | ||||||
| sudo rm -f /etc/apt/sources.list.d/google-chrome.list |  | ||||||
| sudo rm -f /etc/apt/heroku.list |  | ||||||
| sudo rm -f /etc/apt/openjdk-r-ubuntu-ppa-xenial.list |  | ||||||
| sudo rm -f /etc/apt/partner.list |  | ||||||
|  |  | ||||||
| # To increase the network reliability, let apt decide which mirror is best to use |  | ||||||
| sudo sed -i -e 's/http:\/\/.*archive/mirror:\/\/mirrors/' -e 's/\/ubuntu\//\/mirrors.txt/' /etc/apt/sources.list |  | ||||||
|  |  | ||||||
| retry () { |  | ||||||
|   $*  || $* || $* || $* || $* |  | ||||||
| } |  | ||||||
|  |  | ||||||
| # Method adapted from here: https://askubuntu.com/questions/875213/apt-get-to-retry-downloading |  | ||||||
| # (with use of tee to avoid permissions problems) |  | ||||||
| # This is better than retrying the whole apt-get command |  | ||||||
| echo "APT::Acquire::Retries \"3\";" | sudo tee /etc/apt/apt.conf.d/80-retries |  | ||||||
|  |  | ||||||
| retry sudo apt-get update -qq |  | ||||||
| retry sudo apt-get -y install \ |  | ||||||
|   moreutils \ |  | ||||||
|   expect-dev |  | ||||||
|  |  | ||||||
| echo "== DOCKER VERSION ==" |  | ||||||
| docker version |  | ||||||
|  |  | ||||||
| if ! command -v aws >/dev/null; then |  | ||||||
|   retry sudo pip3 -q install awscli==1.19.64 |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then |  | ||||||
|   DRIVER_FN="NVIDIA-Linux-x86_64-510.60.02.run" |  | ||||||
|   wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN" |  | ||||||
|   sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false) |  | ||||||
|   nvidia-smi |  | ||||||
|  |  | ||||||
|   # Taken directly from https://github.com/NVIDIA/nvidia-docker |  | ||||||
|   # Add the package repositories |  | ||||||
|   distribution=$(. /etc/os-release;echo "$ID$VERSION_ID") |  | ||||||
|   curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - |  | ||||||
|   curl -s -L "https://nvidia.github.io/nvidia-docker/${distribution}/nvidia-docker.list" | sudo tee /etc/apt/sources.list.d/nvidia-docker.list |  | ||||||
|  |  | ||||||
|   retry sudo apt-get update -qq |  | ||||||
|   # Necessary to get the `--gpus` flag to function within docker |  | ||||||
|   retry sudo apt-get install -y nvidia-container-toolkit |  | ||||||
|   sudo systemctl restart docker |  | ||||||
| else |  | ||||||
|   # Explicitly remove nvidia docker apt repositories if not building for cuda |  | ||||||
|   sudo rm -rf /etc/apt/sources.list.d/nvidia-docker.list |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| add_to_env_file() { |  | ||||||
|   local name=$1 |  | ||||||
|   local value=$2 |  | ||||||
|   case "$value" in |  | ||||||
|     *\ *) |  | ||||||
|       # BASH_ENV should be set by CircleCI |  | ||||||
|       echo "${name}='${value}'" >> "${BASH_ENV:-/tmp/env}" |  | ||||||
|       ;; |  | ||||||
|     *) |  | ||||||
|       echo "${name}=${value}" >> "${BASH_ENV:-/tmp/env}" |  | ||||||
|       ;; |  | ||||||
|   esac |  | ||||||
| } |  | ||||||
|  |  | ||||||
| add_to_env_file IN_CI 1 |  | ||||||
| add_to_env_file CI_MASTER "${CI_MASTER:-}" |  | ||||||
| add_to_env_file COMMIT_SOURCE "${CIRCLE_BRANCH:-}" |  | ||||||
| add_to_env_file BUILD_ENVIRONMENT "${BUILD_ENVIRONMENT}" |  | ||||||
| add_to_env_file CIRCLE_PULL_REQUEST "${CIRCLE_PULL_REQUEST}" |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then |  | ||||||
|   add_to_env_file SCCACHE_BUCKET ossci-compiler-cache-circleci-v2 |  | ||||||
|  |  | ||||||
|   SCCACHE_MAX_JOBS=$(( $(nproc) - 1 )) |  | ||||||
|   MEMORY_LIMIT_MAX_JOBS=8  # the "large" resource class on CircleCI has 32 CPU cores, if we use all of them we'll OOM |  | ||||||
|   MAX_JOBS=$(( ${SCCACHE_MAX_JOBS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${SCCACHE_MAX_JOBS} )) |  | ||||||
|   add_to_env_file MAX_JOBS "${MAX_JOBS}" |  | ||||||
|  |  | ||||||
|   if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then |  | ||||||
|     add_to_env_file TORCH_CUDA_ARCH_LIST 5.2 |  | ||||||
|   fi |  | ||||||
|  |  | ||||||
|   if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then |  | ||||||
|     # This IAM user allows write access to S3 bucket for sccache & bazels3cache |  | ||||||
|     set +x |  | ||||||
|     add_to_env_file XLA_CLANG_CACHE_S3_BUCKET_NAME "${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}" |  | ||||||
|     add_to_env_file AWS_ACCESS_KEY_ID "${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}" |  | ||||||
|     add_to_env_file AWS_SECRET_ACCESS_KEY "${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}" |  | ||||||
|     set -x |  | ||||||
|   else |  | ||||||
|     # This IAM user allows write access to S3 bucket for sccache |  | ||||||
|     set +x |  | ||||||
|     add_to_env_file XLA_CLANG_CACHE_S3_BUCKET_NAME "${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}" |  | ||||||
|     add_to_env_file AWS_ACCESS_KEY_ID "${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}" |  | ||||||
|     add_to_env_file AWS_SECRET_ACCESS_KEY "${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}" |  | ||||||
|     set -x |  | ||||||
|   fi |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| # This IAM user only allows read-write access to ECR |  | ||||||
| set +x |  | ||||||
| export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4:-} |  | ||||||
| export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4:-} |  | ||||||
| export AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\") |  | ||||||
| export AWS_REGION=us-east-1 |  | ||||||
| aws ecr get-login-password --region $AWS_REGION|docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com |  | ||||||
| set -x |  | ||||||
| @ -1,50 +0,0 @@ | |||||||
| #!/usr/bin/env bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| # Set up CircleCI GPG keys for apt, if needed |  | ||||||
| curl --retry 3 -s -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add - |  | ||||||
|  |  | ||||||
| # Stop background apt updates.  Hypothetically, the kill should not |  | ||||||
| # be necessary, because stop is supposed to send a kill signal to |  | ||||||
| # the process, but we've added it for good luck.  Also |  | ||||||
| # hypothetically, it's supposed to be unnecessary to wait for |  | ||||||
| # the process to block.  We also have that line for good luck. |  | ||||||
| # If you like, try deleting them and seeing if it works. |  | ||||||
| sudo systemctl stop apt-daily.service || true |  | ||||||
| sudo systemctl kill --kill-who=all apt-daily.service || true |  | ||||||
|  |  | ||||||
| sudo systemctl stop unattended-upgrades.service || true |  | ||||||
| sudo systemctl kill --kill-who=all unattended-upgrades.service || true |  | ||||||
|  |  | ||||||
| # wait until `apt-get update` has been killed |  | ||||||
| while systemctl is-active --quiet apt-daily.service |  | ||||||
| do |  | ||||||
|     sleep 1; |  | ||||||
| done |  | ||||||
| while systemctl is-active --quiet unattended-upgrades.service |  | ||||||
| do |  | ||||||
|     sleep 1; |  | ||||||
| done |  | ||||||
|  |  | ||||||
| # See if we actually were successful |  | ||||||
| systemctl list-units --all | cat |  | ||||||
|  |  | ||||||
| # For good luck, try even harder to kill apt-get |  | ||||||
| sudo pkill apt-get || true |  | ||||||
|  |  | ||||||
| # For even better luck, purge unattended-upgrades |  | ||||||
| sudo apt-get purge -y unattended-upgrades || true |  | ||||||
|  |  | ||||||
| cat /etc/apt/sources.list |  | ||||||
|  |  | ||||||
| # For the bestest luck, kill again now |  | ||||||
| sudo pkill apt || true |  | ||||||
| sudo pkill dpkg || true |  | ||||||
|  |  | ||||||
| # Try to detect if apt/dpkg is stuck |  | ||||||
| if ps auxfww | grep '[a]pt'; then |  | ||||||
|   echo "WARNING: There are leftover apt processes; subsequent apt update will likely fail" |  | ||||||
| fi |  | ||||||
| if ps auxfww | grep '[d]pkg'; then |  | ||||||
|   echo "WARNING: There are leftover dpkg processes; subsequent apt update will likely fail" |  | ||||||
| fi |  | ||||||
| @ -1,140 +0,0 @@ | |||||||
| # Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0 |  | ||||||
|  |  | ||||||
| import re |  | ||||||
| import json |  | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| import requests |  | ||||||
| import time |  | ||||||
|  |  | ||||||
| AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/" |  | ||||||
| AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "") |  | ||||||
| PIPELINE_ID = "911" |  | ||||||
| PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f" |  | ||||||
| TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main") |  | ||||||
| TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "") |  | ||||||
|  |  | ||||||
| build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0" |  | ||||||
|  |  | ||||||
| s = requests.Session() |  | ||||||
| s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64}) |  | ||||||
|  |  | ||||||
| def submit_build(pipeline_id, project_id, source_branch, source_version): |  | ||||||
|     print("Submitting build for branch: " + source_branch) |  | ||||||
|     print("Commit SHA1: ", source_version) |  | ||||||
|  |  | ||||||
|     run_build_raw = s.post(build_base_url, json={ |  | ||||||
|         "definition": {"id": pipeline_id}, |  | ||||||
|         "project": {"id": project_id}, |  | ||||||
|         "sourceBranch": source_branch, |  | ||||||
|         "sourceVersion": source_version |  | ||||||
|     }) |  | ||||||
|  |  | ||||||
|     try: |  | ||||||
|         run_build_json = run_build_raw.json() |  | ||||||
|     except json.decoder.JSONDecodeError as e: |  | ||||||
|         print(e) |  | ||||||
|         print("Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired.") |  | ||||||
|         sys.exit(-1) |  | ||||||
|  |  | ||||||
|     build_id = run_build_json['id'] |  | ||||||
|  |  | ||||||
|     print("Submitted bulid: " + str(build_id)) |  | ||||||
|     print("Bulid URL: " + run_build_json['url']) |  | ||||||
|     return build_id |  | ||||||
|  |  | ||||||
| def get_build(_id): |  | ||||||
|     get_build_url = AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}?api-version=6.0" |  | ||||||
|     get_build_raw = s.get(get_build_url) |  | ||||||
|     return get_build_raw.json() |  | ||||||
|  |  | ||||||
| def get_build_logs(_id): |  | ||||||
|     get_build_logs_url = AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}/logs?api-version=6.0" |  | ||||||
|     get_build_logs_raw = s.get(get_build_logs_url) |  | ||||||
|     return get_build_logs_raw.json() |  | ||||||
|  |  | ||||||
| def get_log_content(url): |  | ||||||
|     resp = s.get(url) |  | ||||||
|     return resp.text |  | ||||||
|  |  | ||||||
| def wait_for_build(_id): |  | ||||||
|     build_detail = get_build(_id) |  | ||||||
|     build_status = build_detail['status'] |  | ||||||
|  |  | ||||||
|     while build_status == 'notStarted': |  | ||||||
|         print('Waiting for run to start: ' + str(_id)) |  | ||||||
|         sys.stdout.flush() |  | ||||||
|         try: |  | ||||||
|             build_detail = get_build(_id) |  | ||||||
|             build_status = build_detail['status'] |  | ||||||
|         except Exception as e: |  | ||||||
|             print("Error getting build") |  | ||||||
|             print(e) |  | ||||||
|  |  | ||||||
|         time.sleep(30) |  | ||||||
|  |  | ||||||
|     print("Bulid started: ", str(_id)) |  | ||||||
|  |  | ||||||
|     handled_logs = set() |  | ||||||
|     while build_status == 'inProgress': |  | ||||||
|         try: |  | ||||||
|             print("Waiting for log: " + str(_id)) |  | ||||||
|             logs = get_build_logs(_id) |  | ||||||
|         except Exception as e: |  | ||||||
|             print("Error fetching logs") |  | ||||||
|             print(e) |  | ||||||
|             time.sleep(30) |  | ||||||
|             continue |  | ||||||
|  |  | ||||||
|         for log in logs['value']: |  | ||||||
|             log_id = log['id'] |  | ||||||
|             if log_id in handled_logs: |  | ||||||
|                 continue |  | ||||||
|             handled_logs.add(log_id) |  | ||||||
|             print('Fetching log: \n' + log['url']) |  | ||||||
|             try: |  | ||||||
|                 log_content = get_log_content(log['url']) |  | ||||||
|                 print(log_content) |  | ||||||
|             except Exception as e: |  | ||||||
|                 print("Error getting log content") |  | ||||||
|                 print(e) |  | ||||||
|             sys.stdout.flush() |  | ||||||
|         build_detail = get_build(_id) |  | ||||||
|         build_status = build_detail['status'] |  | ||||||
|         time.sleep(30) |  | ||||||
|  |  | ||||||
|     build_result = build_detail['result'] |  | ||||||
|  |  | ||||||
|     print("Bulid status: " + build_status) |  | ||||||
|     print("Bulid result: " + build_result) |  | ||||||
|  |  | ||||||
|     return build_status, build_result |  | ||||||
|  |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     # Convert the branch name for Azure DevOps |  | ||||||
|     match = re.search(r'pull/(\d+)', TARGET_BRANCH) |  | ||||||
|     if match is not None: |  | ||||||
|         pr_num = match.group(1) |  | ||||||
|         SOURCE_BRANCH = f'refs/pull/{pr_num}/head' |  | ||||||
|     else: |  | ||||||
|         SOURCE_BRANCH = f'refs/heads/{TARGET_BRANCH}' |  | ||||||
|  |  | ||||||
|     MAX_RETRY = 2 |  | ||||||
|     retry = MAX_RETRY |  | ||||||
|  |  | ||||||
|     while retry > 0: |  | ||||||
|         build_id = submit_build(PIPELINE_ID, PROJECT_ID, SOURCE_BRANCH, TARGET_COMMIT) |  | ||||||
|         build_status, build_result = wait_for_build(build_id) |  | ||||||
|  |  | ||||||
|         if build_result != 'succeeded': |  | ||||||
|             retry = retry - 1 |  | ||||||
|             if retry > 0: |  | ||||||
|                 print("Retrying... remaining attempt: " + str(retry)) |  | ||||||
|                 # Wait a bit before retrying |  | ||||||
|                 time.sleep((MAX_RETRY - retry) * 120) |  | ||||||
|                 continue |  | ||||||
|             else: |  | ||||||
|                 print("No more chance to retry. Giving up.") |  | ||||||
|                 sys.exit(-1) |  | ||||||
|         else: |  | ||||||
|             break |  | ||||||
| @ -1,65 +0,0 @@ | |||||||
| # https://developercommunity.visualstudio.com/t/install-specific-version-of-vs-component/1142479 |  | ||||||
| # Where to find the links: https://docs.microsoft.com/en-us/visualstudio/releases/2019/history#release-dates-and-build-numbers |  | ||||||
|  |  | ||||||
| # BuildTools from S3 |  | ||||||
| $VS_DOWNLOAD_LINK = "https://s3.amazonaws.com/ossci-windows/vs${env:VS_VERSION}_BuildTools.exe" |  | ||||||
| $COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe" |  | ||||||
| $VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools", |  | ||||||
|                                                      "--add Microsoft.Component.MSBuild", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.Component.Roslyn.Compiler", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.Component.TextTemplating", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.Component.VC.CoreIde", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64", |  | ||||||
|                                                      "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81") |  | ||||||
|  |  | ||||||
| if (${env:INSTALL_WINDOWS_SDK} -eq "1") { |  | ||||||
|     $VS_INSTALL_ARGS += "--add Microsoft.VisualStudio.Component.Windows10SDK.19041" |  | ||||||
| } |  | ||||||
|  |  | ||||||
| if (Test-Path "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe") { |  | ||||||
|     $VS_VERSION_major = [int] ${env:VS_VERSION}.split(".")[0] |  | ||||||
|     $existingPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -products "Microsoft.VisualStudio.Product.BuildTools" -version "[${env:VS_VERSION}, ${env:VS_VERSION_major + 1})" -property installationPath |  | ||||||
|     if (($existingPath -ne $null) -and (!${env:CIRCLECI})) { |  | ||||||
|         echo "Found correctly versioned existing BuildTools installation in $existingPath" |  | ||||||
|         exit 0 |  | ||||||
|     } |  | ||||||
|     $pathToRemove = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -products "Microsoft.VisualStudio.Product.BuildTools" -property installationPath |  | ||||||
| } |  | ||||||
|  |  | ||||||
| echo "Downloading VS installer from S3." |  | ||||||
| curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe |  | ||||||
| if ($LASTEXITCODE -ne 0) { |  | ||||||
|     echo "Download of the VS 2019 Version ${env:VS_VERSION} installer failed" |  | ||||||
|     exit 1 |  | ||||||
| } |  | ||||||
|  |  | ||||||
| if ($pathToRemove -ne $null) { |  | ||||||
|     echo "Uninstalling $pathToRemove." |  | ||||||
|     $VS_UNINSTALL_ARGS = @("uninstall", "--installPath", "`"$pathToRemove`"", "--quiet","--wait") |  | ||||||
|     $process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_UNINSTALL_ARGS -NoNewWindow -Wait -PassThru |  | ||||||
|     $exitCode = $process.ExitCode |  | ||||||
|     if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { |  | ||||||
|         echo "Original BuildTools uninstall failed with code $exitCode" |  | ||||||
|         exit 1 |  | ||||||
|     } |  | ||||||
|     echo "Other versioned BuildTools uninstalled." |  | ||||||
| } |  | ||||||
|  |  | ||||||
| echo "Installing Visual Studio version ${env:VS_VERSION}." |  | ||||||
| $process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru |  | ||||||
| Remove-Item -Path vs_installer.exe -Force |  | ||||||
| $exitCode = $process.ExitCode |  | ||||||
| if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { |  | ||||||
|     echo "VS 2019 installer exited with code $exitCode, which should be one of [0, 3010]." |  | ||||||
|     curl.exe --retry 3 -kL $COLLECT_DOWNLOAD_LINK --output Collect.exe |  | ||||||
|     if ($LASTEXITCODE -ne 0) { |  | ||||||
|         echo "Download of the VS Collect tool failed." |  | ||||||
|         exit 1 |  | ||||||
|     } |  | ||||||
|     Start-Process "${PWD}\Collect.exe" -NoNewWindow -Wait -PassThru |  | ||||||
|     New-Item -Path "C:\w\build-results" -ItemType "directory" -Force |  | ||||||
|     Copy-Item -Path "${env:TEMP}\vslogs.zip" -Destination "C:\w\build-results\" |  | ||||||
|     exit 1 |  | ||||||
| } |  | ||||||
| @ -1,5 +0,0 @@ | |||||||
| $CMATH_DOWNLOAD_LINK = "https://raw.githubusercontent.com/microsoft/STL/12c684bba78f9b032050526abdebf14f58ca26a3/stl/inc/cmath" |  | ||||||
| $VC14_28_INSTALL_PATH="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910\include" |  | ||||||
|  |  | ||||||
| curl.exe --retry 3 -kL $CMATH_DOWNLOAD_LINK --output "$home\cmath" |  | ||||||
| Move-Item -Path "$home\cmath" -Destination "$VC14_28_INSTALL_PATH" -Force |  | ||||||
| @ -1,70 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
| case ${CUDA_VERSION} in |  | ||||||
|     10.2) |  | ||||||
|         cuda_installer_name="cuda_10.2.89_441.22_win10" |  | ||||||
|         cuda_install_packages="nvcc_10.2 cuobjdump_10.2 nvprune_10.2 cupti_10.2 cublas_10.2 cublas_dev_10.2 cudart_10.2 cufft_10.2 cufft_dev_10.2 curand_10.2 curand_dev_10.2 cusolver_10.2 cusolver_dev_10.2 cusparse_10.2 cusparse_dev_10.2 nvgraph_10.2 nvgraph_dev_10.2 npp_10.2 npp_dev_10.2 nvrtc_10.2 nvrtc_dev_10.2 nvml_dev_10.2" |  | ||||||
|         ;; |  | ||||||
|     11.3) |  | ||||||
|         cuda_installer_name="cuda_11.3.0_465.89_win10" |  | ||||||
|         cuda_install_packages="thrust_11.3 nvcc_11.3 cuobjdump_11.3 nvprune_11.3 nvprof_11.3 cupti_11.3 cublas_11.3 cublas_dev_11.3 cudart_11.3 cufft_11.3 cufft_dev_11.3 curand_11.3 curand_dev_11.3 cusolver_11.3 cusolver_dev_11.3 cusparse_11.3 cusparse_dev_11.3 npp_11.3 npp_dev_11.3 nvrtc_11.3 nvrtc_dev_11.3 nvml_dev_11.3" |  | ||||||
|         ;; |  | ||||||
|     11.6) |  | ||||||
|         cuda_installer_name="cuda_11.6.0_511.23_windows" |  | ||||||
|         cuda_install_packages="thrust_11.6 nvcc_11.6 cuobjdump_11.6 nvprune_11.6 nvprof_11.6 cupti_11.6 cublas_11.6 cublas_dev_11.6 cudart_11.6 cufft_11.6 cufft_dev_11.6 curand_11.6 curand_dev_11.6 cusolver_11.6 cusolver_dev_11.6 cusparse_11.6 cusparse_dev_11.6 npp_11.6 npp_dev_11.6 nvrtc_11.6 nvrtc_dev_11.6 nvml_dev_11.6" |  | ||||||
|         ;; |  | ||||||
|     *) |  | ||||||
|         echo "CUDA_VERSION $CUDA_VERSION is not supported yet" |  | ||||||
|         exit 1 |  | ||||||
|         ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
|  |  | ||||||
| if [[ -f "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc.exe" ]]; then |  | ||||||
|     echo "Existing CUDA v${CUDA_VERSION} installation found, skipping install" |  | ||||||
| else |  | ||||||
|     tmp_dir=$(mktemp -d) |  | ||||||
|     ( |  | ||||||
|         # no need to popd after, the subshell shouldn't affect the parent shell |  | ||||||
|         pushd "${tmp_dir}" |  | ||||||
|         cuda_installer_link="https://ossci-windows.s3.amazonaws.com/${cuda_installer_name}.exe" |  | ||||||
|  |  | ||||||
|         curl --retry 3 -kLO $cuda_installer_link |  | ||||||
|         7z x ${cuda_installer_name}.exe -o${cuda_installer_name} |  | ||||||
|         pushd ${cuda_installer_name} |  | ||||||
|         mkdir cuda_install_logs |  | ||||||
|  |  | ||||||
|         set +e |  | ||||||
|  |  | ||||||
|         # This breaks for some reason if you quote cuda_install_packages |  | ||||||
|         # shellcheck disable=SC2086 |  | ||||||
|         ./setup.exe -s ${cuda_install_packages} -loglevel:6 -log:"$(pwd -W)/cuda_install_logs" |  | ||||||
|  |  | ||||||
|         set -e |  | ||||||
|  |  | ||||||
|         if [[ ! -f "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc.exe" ]]; then |  | ||||||
|             echo "CUDA installation failed" |  | ||||||
|             mkdir -p /c/w/build-results |  | ||||||
|             7z a "c:\\w\\build-results\\cuda_install_logs.7z" cuda_install_logs |  | ||||||
|             exit 1 |  | ||||||
|         fi |  | ||||||
|     ) |  | ||||||
|     rm -rf "${tmp_dir}" |  | ||||||
| fi |  | ||||||
|  |  | ||||||
| if [[ -f "/c/Program Files/NVIDIA Corporation/NvToolsExt/bin/x64/nvToolsExt64_1.dll" ]]; then |  | ||||||
|     echo "Existing nvtools installation found, skipping install" |  | ||||||
| else |  | ||||||
|     # create tmp dir for download |  | ||||||
|     tmp_dir=$(mktemp -d) |  | ||||||
|     ( |  | ||||||
|         # no need to popd after, the subshell shouldn't affect the parent shell |  | ||||||
|         pushd "${tmp_dir}" |  | ||||||
|         curl --retry 3 -kLO https://ossci-windows.s3.amazonaws.com/NvToolsExt.7z |  | ||||||
|         7z x NvToolsExt.7z -oNvToolsExt |  | ||||||
|         mkdir -p "C:/Program Files/NVIDIA Corporation/NvToolsExt" |  | ||||||
|         cp -r NvToolsExt/* "C:/Program Files/NVIDIA Corporation/NvToolsExt/" |  | ||||||
|     ) |  | ||||||
|     rm -rf "${tmp_dir}" |  | ||||||
| fi |  | ||||||
| @ -1,48 +0,0 @@ | |||||||
| #!/bin/bash |  | ||||||
| set -eux -o pipefail |  | ||||||
|  |  | ||||||
|  |  | ||||||
| windows_s3_link="https://ossci-windows.s3.amazonaws.com" |  | ||||||
|  |  | ||||||
| case ${CUDA_VERSION} in |  | ||||||
|     10.2) |  | ||||||
|         cudnn_file_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.5.32" |  | ||||||
|         ;; |  | ||||||
|     11.3) |  | ||||||
|         # Use cudnn8.3 with hard-coded cuda11.3 version |  | ||||||
|         cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive" |  | ||||||
|         ;; |  | ||||||
|     11.6) |  | ||||||
|         # Use cudnn8.3 with hard-coded cuda11.5 version |  | ||||||
|         cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive" |  | ||||||
|         ;; |  | ||||||
|     *) |  | ||||||
|         echo "CUDA_VERSION: ${CUDA_VERSION} not supported yet" |  | ||||||
|         exit 1 |  | ||||||
|         ;; |  | ||||||
| esac |  | ||||||
|  |  | ||||||
| cudnn_installer_name="cudnn_installer.zip" |  | ||||||
| cudnn_installer_link="${windows_s3_link}/${cudnn_file_name}.zip" |  | ||||||
| cudnn_install_folder="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/" |  | ||||||
|  |  | ||||||
| if [[ -f "${cudnn_install_folder}/include/cudnn.h" ]]; then |  | ||||||
|     echo "Existing cudnn installation found, skipping install..." |  | ||||||
| else |  | ||||||
|     tmp_dir=$(mktemp -d) |  | ||||||
|     ( |  | ||||||
|         pushd "${tmp_dir}" |  | ||||||
|         curl --retry 3 -o "${cudnn_installer_name}" "$cudnn_installer_link" |  | ||||||
|         7z x "${cudnn_installer_name}" -ocudnn |  | ||||||
|         # Use '${var:?}/*' to avoid potentially expanding to '/*' |  | ||||||
|         # Remove all of the directories before attempting to copy files |  | ||||||
|         rm -rf "${cudnn_install_folder:?}/*" |  | ||||||
|         cp -rf cudnn/cuda/* "${cudnn_install_folder}" |  | ||||||
|  |  | ||||||
|         #Make sure windows path contains zlib dll |  | ||||||
|         curl -k -L "${windows_s3_link}/zlib123dllx64.zip" --output "${tmp_dir}\zlib123dllx64.zip" |  | ||||||
|         7z x "${tmp_dir}\zlib123dllx64.zip" -o"${tmp_dir}\zlib" |  | ||||||
|         xcopy /Y "${tmp_dir}\zlib\dll_x64\*.dll" "C:\Windows\System32" |  | ||||||
|     ) |  | ||||||
|     rm -rf "${tmp_dir}" |  | ||||||
| fi |  | ||||||
| @ -1,65 +0,0 @@ | |||||||
| binary_linux_build_params: &binary_linux_build_params |  | ||||||
|   parameters: |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     docker_image: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     libtorch_variant: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     resource_class: |  | ||||||
|       type: string |  | ||||||
|       default: "2xlarge+" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: << parameters.build_environment >> |  | ||||||
|     LIBTORCH_VARIANT: << parameters.libtorch_variant >> |  | ||||||
|     ANACONDA_USER: pytorch |  | ||||||
|   resource_class: << parameters.resource_class >> |  | ||||||
|   docker: |  | ||||||
|     - image: << parameters.docker_image >> |  | ||||||
|  |  | ||||||
| binary_linux_test_upload_params: &binary_linux_test_upload_params |  | ||||||
|   parameters: |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     docker_image: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     libtorch_variant: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     resource_class: |  | ||||||
|       type: string |  | ||||||
|       default: "medium" |  | ||||||
|     use_cuda_docker_runtime: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: << parameters.build_environment >> |  | ||||||
|     DOCKER_IMAGE: << parameters.docker_image >> |  | ||||||
|     USE_CUDA_DOCKER_RUNTIME: << parameters.use_cuda_docker_runtime >> |  | ||||||
|     LIBTORCH_VARIANT: << parameters.libtorch_variant >> |  | ||||||
|   resource_class: << parameters.resource_class >> |  | ||||||
|  |  | ||||||
| binary_mac_params: &binary_mac_params |  | ||||||
|   parameters: |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: << parameters.build_environment >> |  | ||||||
|  |  | ||||||
| binary_windows_params: &binary_windows_params |  | ||||||
|   parameters: |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     executor: |  | ||||||
|       type: string |  | ||||||
|       default: "windows-xlarge-cpu-with-nvidia-cuda" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: << parameters.build_environment >> |  | ||||||
|     JOB_EXECUTOR: <<parameters.executor>> |  | ||||||
| @ -1,105 +0,0 @@ | |||||||
| pytorch_params: &pytorch_params |  | ||||||
|   parameters: |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     docker_image: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     resource_class: |  | ||||||
|       type: string |  | ||||||
|       default: "large" |  | ||||||
|     use_cuda_docker_runtime: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     build_only: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     ci_master: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: << parameters.build_environment >> |  | ||||||
|     DOCKER_IMAGE: << parameters.docker_image >> |  | ||||||
|     USE_CUDA_DOCKER_RUNTIME: << parameters.use_cuda_docker_runtime >> |  | ||||||
|     BUILD_ONLY: << parameters.build_only >> |  | ||||||
|     CI_MASTER: << pipeline.parameters.run_master_build >> |  | ||||||
|   resource_class: << parameters.resource_class >> |  | ||||||
|  |  | ||||||
| pytorch_ios_params: &pytorch_ios_params |  | ||||||
|   parameters: |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     ios_arch: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     ios_platform: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     op_list: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     use_metal: |  | ||||||
|       type: string |  | ||||||
|       default: "0" |  | ||||||
|     lite_interpreter: |  | ||||||
|       type: string |  | ||||||
|       default: "1" |  | ||||||
|     use_coreml: |  | ||||||
|       type: string |  | ||||||
|       default: "0" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: << parameters.build_environment >> |  | ||||||
|     IOS_ARCH: << parameters.ios_arch >> |  | ||||||
|     IOS_PLATFORM: << parameters.ios_platform >> |  | ||||||
|     SELECTED_OP_LIST: << parameters.op_list >> |  | ||||||
|     USE_PYTORCH_METAL: << parameters.use_metal >> |  | ||||||
|     BUILD_LITE_INTERPRETER: << parameters.lite_interpreter >> |  | ||||||
|     USE_COREML_DELEGATE: << parameters.use_coreml >> |  | ||||||
|  |  | ||||||
| pytorch_windows_params: &pytorch_windows_params |  | ||||||
|   parameters: |  | ||||||
|     executor: |  | ||||||
|       type: string |  | ||||||
|       default: "windows-xlarge-cpu-with-nvidia-cuda" |  | ||||||
|     build_environment: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     test_name: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|     cuda_version: |  | ||||||
|       type: string |  | ||||||
|       default: "10.1" |  | ||||||
|     python_version: |  | ||||||
|       type: string |  | ||||||
|       default: "3.8" |  | ||||||
|     vs_version: |  | ||||||
|       type: string |  | ||||||
|       default: "16.8.6" |  | ||||||
|     vc_version: |  | ||||||
|       type: string |  | ||||||
|       default: "14.16" |  | ||||||
|     vc_year: |  | ||||||
|       type: string |  | ||||||
|       default: "2019" |  | ||||||
|     vc_product: |  | ||||||
|       type: string |  | ||||||
|       default: "BuildTools" |  | ||||||
|     use_cuda: |  | ||||||
|       type: string |  | ||||||
|       default: "" |  | ||||||
|   environment: |  | ||||||
|     BUILD_ENVIRONMENT: <<parameters.build_environment>> |  | ||||||
|     SCCACHE_BUCKET: "ossci-compiler-cache" |  | ||||||
|     CUDA_VERSION: <<parameters.cuda_version>> |  | ||||||
|     PYTHON_VERSION: <<parameters.python_version>> |  | ||||||
|     VS_VERSION: <<parameters.vs_version>> |  | ||||||
|     VC_VERSION: <<parameters.vc_version>> |  | ||||||
|     VC_YEAR: <<parameters.vc_year>> |  | ||||||
|     VC_PRODUCT: <<parameters.vc_product>> |  | ||||||
|     USE_CUDA: <<parameters.use_cuda>> |  | ||||||
|     TORCH_CUDA_ARCH_LIST: "5.2 7.5" |  | ||||||
|     JOB_BASE_NAME: <<parameters.test_name>> |  | ||||||
|     JOB_EXECUTOR: <<parameters.executor>> |  | ||||||
| @ -1,174 +0,0 @@ | |||||||
| commands: |  | ||||||
|  |  | ||||||
|   calculate_docker_image_tag: |  | ||||||
|     description: "Calculates the docker image tag" |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: "Calculate docker image hash" |  | ||||||
|           command: | |  | ||||||
|             DOCKER_TAG=$(git rev-parse HEAD:.circleci/docker) |  | ||||||
|             echo "DOCKER_TAG=${DOCKER_TAG}" >> "${BASH_ENV}" |  | ||||||
|  |  | ||||||
|   designate_upload_channel: |  | ||||||
|     description: "inserts the correct upload channel into ${BASH_ENV}" |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: adding UPLOAD_CHANNEL to BASH_ENV |  | ||||||
|           command: | |  | ||||||
|             our_upload_channel=nightly |  | ||||||
|             # On tags upload to test instead |  | ||||||
|             if [[ -n "${CIRCLE_TAG}" ]]; then |  | ||||||
|               our_upload_channel=test |  | ||||||
|             fi |  | ||||||
|             echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} |  | ||||||
|  |  | ||||||
|   # This system setup script is meant to run before the CI-related scripts, e.g., |  | ||||||
|   # installing Git client, checking out code, setting up CI env, and |  | ||||||
|   # building/testing. |  | ||||||
|   setup_linux_system_environment: |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: Set Up System Environment |  | ||||||
|           no_output_timeout: "1h" |  | ||||||
|           command: .circleci/scripts/setup_linux_system_environment.sh |  | ||||||
|  |  | ||||||
|   setup_ci_environment: |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: Set Up CI Environment After attach_workspace |  | ||||||
|           no_output_timeout: "1h" |  | ||||||
|           command: .circleci/scripts/setup_ci_environment.sh |  | ||||||
|  |  | ||||||
|   brew_update: |  | ||||||
|     description: "Update Homebrew and install base formulae" |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: Update Homebrew |  | ||||||
|           no_output_timeout: "10m" |  | ||||||
|           command: | |  | ||||||
|             set -ex |  | ||||||
|  |  | ||||||
|             # Update repositories manually. |  | ||||||
|             # Running `brew update` produces a comparison between the |  | ||||||
|             # current checkout and the updated checkout, which takes a |  | ||||||
|             # very long time because the existing checkout is 2y old. |  | ||||||
|             for path in $(find /usr/local/Homebrew -type d -name .git) |  | ||||||
|             do |  | ||||||
|             cd $path/.. |  | ||||||
|             git fetch --depth=1 origin |  | ||||||
|             git reset --hard origin/master |  | ||||||
|             done |  | ||||||
|  |  | ||||||
|             export HOMEBREW_NO_AUTO_UPDATE=1 |  | ||||||
|  |  | ||||||
|             # Install expect and moreutils so that we can call `unbuffer` and `ts`. |  | ||||||
|             # moreutils installs a `parallel` executable by default, which conflicts |  | ||||||
|             # with the executable from the GNU `parallel`, so we must unlink GNU |  | ||||||
|             # `parallel` first, and relink it afterwards. |  | ||||||
|             brew unlink parallel |  | ||||||
|             brew install moreutils |  | ||||||
|             brew link parallel --overwrite |  | ||||||
|             brew install expect |  | ||||||
|  |  | ||||||
|   brew_install: |  | ||||||
|     description: "Install Homebrew formulae" |  | ||||||
|     parameters: |  | ||||||
|       formulae: |  | ||||||
|         type: string |  | ||||||
|         default: "" |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: Install << parameters.formulae >> |  | ||||||
|           no_output_timeout: "10m" |  | ||||||
|           command: | |  | ||||||
|             set -ex |  | ||||||
|             export HOMEBREW_NO_AUTO_UPDATE=1 |  | ||||||
|             brew install << parameters.formulae >> |  | ||||||
|  |  | ||||||
|   run_brew_for_macos_build: |  | ||||||
|     steps: |  | ||||||
|       - brew_update |  | ||||||
|       - brew_install: |  | ||||||
|           formulae: libomp |  | ||||||
|  |  | ||||||
|   run_brew_for_ios_build: |  | ||||||
|     steps: |  | ||||||
|       - brew_update |  | ||||||
|       - brew_install: |  | ||||||
|           formulae: libtool |  | ||||||
|  |  | ||||||
|   optional_merge_target_branch: |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: (Optional) Merge target branch |  | ||||||
|           no_output_timeout: "10m" |  | ||||||
|           command: | |  | ||||||
|             if [[ -n "$CIRCLE_PULL_REQUEST" && "$CIRCLE_BRANCH" != "nightly" ]]; then |  | ||||||
|               PR_NUM=$(basename $CIRCLE_PULL_REQUEST) |  | ||||||
|               CIRCLE_PR_BASE_BRANCH=$(curl -s https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/$PR_NUM | jq -r '.base.ref') |  | ||||||
|               if [[ "${BUILD_ENVIRONMENT}" == *"xla"* || "${BUILD_ENVIRONMENT}" == *"gcc5"* ]] ; then |  | ||||||
|                 set -x |  | ||||||
|                 git config --global user.email "circleci.ossci@gmail.com" |  | ||||||
|                 git config --global user.name "CircleCI" |  | ||||||
|                 git config remote.origin.url https://github.com/pytorch/pytorch.git |  | ||||||
|                 git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master |  | ||||||
|                 git fetch --tags --progress https://github.com/pytorch/pytorch.git +refs/heads/master:refs/remotes/origin/master --depth=100 --quiet |  | ||||||
|                 # PRs generated from ghstack has format CIRCLE_PR_BASE_BRANCH=gh/xxx/1234/base |  | ||||||
|                 if [[ "${CIRCLE_PR_BASE_BRANCH}" == "gh/"* ]]; then |  | ||||||
|                   CIRCLE_PR_BASE_BRANCH=master |  | ||||||
|                 fi |  | ||||||
|                 export GIT_MERGE_TARGET=`git log -n 1 --pretty=format:"%H" origin/$CIRCLE_PR_BASE_BRANCH` |  | ||||||
|                 echo "GIT_MERGE_TARGET: " ${GIT_MERGE_TARGET} |  | ||||||
|                 export GIT_COMMIT=${CIRCLE_SHA1} |  | ||||||
|                 echo "GIT_COMMIT: " ${GIT_COMMIT} |  | ||||||
|                 git checkout -f ${GIT_COMMIT} |  | ||||||
|                 git reset --hard ${GIT_COMMIT} |  | ||||||
|                 git merge --allow-unrelated-histories --no-edit --no-ff ${GIT_MERGE_TARGET} |  | ||||||
|                 echo "Merged $CIRCLE_PR_BASE_BRANCH branch before building in environment $BUILD_ENVIRONMENT" |  | ||||||
|                 set +x |  | ||||||
|               else |  | ||||||
|                 echo "No need to merge with $CIRCLE_PR_BASE_BRANCH, skipping..." |  | ||||||
|               fi |  | ||||||
|             else |  | ||||||
|               echo "This is not a pull request, skipping..." |  | ||||||
|             fi |  | ||||||
|  |  | ||||||
|   upload_binary_size_for_android_build: |  | ||||||
|     description: "Upload binary size data for Android build" |  | ||||||
|     parameters: |  | ||||||
|       build_type: |  | ||||||
|         type: string |  | ||||||
|         default: "" |  | ||||||
|       artifacts: |  | ||||||
|         type: string |  | ||||||
|         default: "" |  | ||||||
|     steps: |  | ||||||
|       - run: |  | ||||||
|           name: "Binary Size - Install Dependencies" |  | ||||||
|           no_output_timeout: "5m" |  | ||||||
|           command: | |  | ||||||
|             retry () { |  | ||||||
|               $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) |  | ||||||
|             } |  | ||||||
|             retry pip3 install requests |  | ||||||
|       - run: |  | ||||||
|           name: "Binary Size - Untar Artifacts" |  | ||||||
|           no_output_timeout: "5m" |  | ||||||
|           command: | |  | ||||||
|             # The artifact file is created inside docker container, which contains the result binaries. |  | ||||||
|             # Now unpackage it into the project folder. The subsequent script will scan project folder |  | ||||||
|             # to locate result binaries and report their sizes. |  | ||||||
|             # If artifact file is not provided it assumes that the project folder has been mounted in |  | ||||||
|             # the docker during build and already contains the result binaries, so this step can be skipped. |  | ||||||
|             export ARTIFACTS="<< parameters.artifacts >>" |  | ||||||
|             if [ -n "${ARTIFACTS}" ]; then |  | ||||||
|               tar xf "${ARTIFACTS}" -C ~/project |  | ||||||
|             fi |  | ||||||
|       - run: |  | ||||||
|           name: "Binary Size - Upload << parameters.build_type >>" |  | ||||||
|           no_output_timeout: "5m" |  | ||||||
|           command: | |  | ||||||
|             cd ~/project |  | ||||||
|             export ANDROID_BUILD_TYPE="<< parameters.build_type >>" |  | ||||||
|             export COMMIT_TIME=$(git log --max-count=1 --format=%ct || echo 0) |  | ||||||
|             python3 -m tools.stats.upload_binary_size_to_scuba android |  | ||||||
| @ -1,41 +0,0 @@ | |||||||
| # WARNING: DO NOT EDIT THIS FILE DIRECTLY!!! |  | ||||||
| # See the README.md in this directory. |  | ||||||
|  |  | ||||||
| # IMPORTANT: To update Docker image version, please follow |  | ||||||
| # the instructions at |  | ||||||
| # https://github.com/pytorch/pytorch/wiki/Docker-image-build-on-CircleCI |  | ||||||
|  |  | ||||||
| version: 2.1 |  | ||||||
|  |  | ||||||
| parameters: |  | ||||||
|   run_binary_tests: |  | ||||||
|     type: boolean |  | ||||||
|     default: false |  | ||||||
|   run_build: |  | ||||||
|     type: boolean |  | ||||||
|     default: true |  | ||||||
|   run_master_build: |  | ||||||
|     type: boolean |  | ||||||
|     default: false |  | ||||||
|   run_slow_gradcheck_build: |  | ||||||
|     type: boolean |  | ||||||
|     default: false |  | ||||||
|  |  | ||||||
| executors: |  | ||||||
|   windows-with-nvidia-gpu: |  | ||||||
|     machine: |  | ||||||
|       resource_class: windows.gpu.nvidia.medium |  | ||||||
|       image: windows-server-2019-nvidia:previous |  | ||||||
|       shell: bash.exe |  | ||||||
|  |  | ||||||
|   windows-xlarge-cpu-with-nvidia-cuda: |  | ||||||
|     machine: |  | ||||||
|       resource_class: windows.xlarge |  | ||||||
|       image: windows-server-2019-vs2019:stable |  | ||||||
|       shell: bash.exe |  | ||||||
|  |  | ||||||
|   windows-medium-cpu-with-nvidia-cuda: |  | ||||||
|     machine: |  | ||||||
|       resource_class: windows.medium |  | ||||||
|       image: windows-server-2019-vs2019:stable |  | ||||||
|       shell: bash.exe |  | ||||||
| @ -1,14 +0,0 @@ | |||||||
|  |  | ||||||
| # There is currently no testing for libtorch TODO |  | ||||||
| #  binary_linux_libtorch_3.6m_cpu_test: |  | ||||||
| #    environment: |  | ||||||
| #      BUILD_ENVIRONMENT: "libtorch 3.6m cpu" |  | ||||||
| #    resource_class: gpu.nvidia.small |  | ||||||
| #    <<: *binary_linux_test |  | ||||||
| # |  | ||||||
| #  binary_linux_libtorch_3.6m_cu90_test: |  | ||||||
| #    environment: |  | ||||||
| #      BUILD_ENVIRONMENT: "libtorch 3.6m cu90" |  | ||||||
| #    resource_class: gpu.nvidia.small |  | ||||||
| #    <<: *binary_linux_test |  | ||||||
| # |  | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	