mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Build and test ExecuTorch on PyTorch (#113364)
This is the first part to start build and test ExecuTorch on PyTorch using a pinned commit. There will be another PR later to update the pinned commit periodically. * The pinned commit is in `.ci/docker/ci_commit_pins/executorch.txt` as part of PT Docker image * I add one simple test `source .ci/scripts/test.sh mv3 cmake xnnpack-quantization-delegation ''`. More could be added later, in fact, any ET tests on Linux could be run here * Building and installation vision and audio need to be done in CI after building PyTorch because they will be broken otherwise Next steps, in sequence: * [ ] Update this pinned commit periodically, similar to https://github.com/pytorch/pytorch/pull/113499 * [ ] Increase ET coverage on PT CI, ideally, we should run all ET pull jobs? * [ ] Switch ExecuTorch's torch, vision, and audio nightly pins to commit pins * [ ] Update ExecuTorch's torch, vision, and audio commit pins periodically Pull Request resolved: https://github.com/pytorch/pytorch/pull/113364 Approved by: https://github.com/ZainRizvi, https://github.com/malfet, https://github.com/guangy10
This commit is contained in:
@ -266,6 +266,12 @@ case "$image" in
|
||||
TRITON=yes
|
||||
DOCS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-executorch)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=12
|
||||
CONDA_CMAKE=yes
|
||||
EXECUTORCH=yes
|
||||
;;
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
@ -367,6 +373,7 @@ docker build \
|
||||
--build-arg "ONNX=${ONNX}" \
|
||||
--build-arg "DOCS=${DOCS}" \
|
||||
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
|
||||
--build-arg "EXECUTORCH=${EXECUTORCH}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
|
1
.ci/docker/ci_commit_pins/executorch.txt
Normal file
1
.ci/docker/ci_commit_pins/executorch.txt
Normal file
@ -0,0 +1 @@
|
||||
9682172576d5d9a10f3162ad91e0a32b384a3b7c
|
62
.ci/docker/common/install_executorch.sh
Executable file
62
.ci/docker/common/install_executorch.sh
Executable file
@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
clone_executorch() {
|
||||
EXECUTORCH_PINNED_COMMIT=$(get_pinned_commit executorch)
|
||||
|
||||
# Clone the Executorch
|
||||
git clone https://github.com/pytorch/executorch.git
|
||||
|
||||
# and fetch the target commit
|
||||
pushd executorch
|
||||
git checkout "${EXECUTORCH_PINNED_COMMIT}"
|
||||
git submodule update --init
|
||||
popd
|
||||
|
||||
chown -R jenkins executorch
|
||||
}
|
||||
|
||||
install_buck2() {
|
||||
pushd executorch/.ci/docker
|
||||
|
||||
BUCK2_VERSION=$(cat ci_commit_pins/buck2.txt)
|
||||
source common/install_buck.sh
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
install_conda_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install conda dependencies like flatbuffer
|
||||
conda_install --file conda-env-ci.txt
|
||||
popd
|
||||
}
|
||||
|
||||
install_pip_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install all Python dependencies
|
||||
pip_install -r requirements-ci.txt
|
||||
popd
|
||||
}
|
||||
|
||||
setup_executorch() {
|
||||
pushd executorch
|
||||
source .ci/scripts/utils.sh
|
||||
|
||||
install_flatc_from_source
|
||||
pip_install .
|
||||
build_executorch_runner "cmake"
|
||||
|
||||
# Make sure that all the newly generate files are owned by Jenkins
|
||||
chown -R jenkins .
|
||||
popd
|
||||
}
|
||||
|
||||
clone_executorch
|
||||
install_buck2
|
||||
install_conda_dependencies
|
||||
install_pip_dependencies
|
||||
setup_executorch
|
@ -146,6 +146,14 @@ COPY ci_commit_pins/triton.txt triton.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt
|
||||
|
||||
ARG EXECUTORCH
|
||||
# Build and install executorch
|
||||
COPY ./common/install_executorch.sh install_executorch.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/executorch.txt executorch.txt
|
||||
RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi
|
||||
RUN rm install_executorch.sh common_utils.sh executorch.txt
|
||||
|
||||
ARG ONNX
|
||||
# Install ONNX dependencies
|
||||
COPY ./common/install_onnx.sh ./common/common_utils.sh ./
|
||||
|
@ -63,6 +63,12 @@ else
|
||||
export LLVM_DIR=/opt/llvm/lib/cmake/llvm
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then
|
||||
# To build test_edge_op_registration
|
||||
export BUILD_EXECUTORCH=ON
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if ! which conda; then
|
||||
# In ROCm CIs, we are doing cross compilation on build machines with
|
||||
# intel cpu and later run tests on machines with amd cpu.
|
||||
|
@ -999,9 +999,28 @@ test_docs_test() {
|
||||
}
|
||||
|
||||
test_executorch() {
|
||||
pushd /executorch
|
||||
|
||||
echo "Install torchvision and torchaudio"
|
||||
# TODO(huydhn): Switch this to the pinned commits on ExecuTorch once they are
|
||||
# there. These libraries need to be built here, and not part of the Docker
|
||||
# image because they require the target version of torch to be installed first
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git"
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git"
|
||||
|
||||
echo "Run ExecuTorch regression tests for some models"
|
||||
# NB: This is a sample model, more can be added here
|
||||
export PYTHON_EXECUTABLE=python
|
||||
# TODO(huydhn): Add more coverage here using ExecuTorch's gather models script
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/test.sh mv3 cmake xnnpack-quantization-delegation ''
|
||||
|
||||
popd
|
||||
|
||||
# Test torchgen generated code for Executorch.
|
||||
echo "Testing Executorch op registration"
|
||||
echo "Testing ExecuTorch op registration"
|
||||
"$BUILD_BIN_DIR"/test_edge_op_registration
|
||||
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
@ -1016,6 +1035,8 @@ elif [[ "${TEST_CONFIG}" == *xla* ]]; then
|
||||
install_torchvision
|
||||
build_xla
|
||||
test_xla
|
||||
elif [[ "${TEST_CONFIG}" == *executorch* ]]; then
|
||||
test_executorch
|
||||
elif [[ "$TEST_CONFIG" == 'jit_legacy' ]]; then
|
||||
test_python_legacy_jit
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *libtorch* ]]; then
|
||||
@ -1117,5 +1138,4 @@ else
|
||||
test_custom_backend
|
||||
test_torch_function_benchmark
|
||||
test_benchmarks
|
||||
test_executorch
|
||||
fi
|
||||
|
1
.github/workflows/docker-builds.yml
vendored
1
.github/workflows/docker-builds.yml
vendored
@ -49,6 +49,7 @@ jobs:
|
||||
- docker-image-name: pytorch-linux-focal-py3-clang10-onnx
|
||||
- docker-image-name: pytorch-linux-focal-linter
|
||||
- docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter
|
||||
- docker-image-name: pytorch-linux-jammy-py3-clang12-executorch
|
||||
env:
|
||||
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${{ matrix.docker-image-name }}
|
||||
steps:
|
||||
|
20
.github/workflows/trunk.yml
vendored
20
.github/workflows/trunk.yml
vendored
@ -175,6 +175,26 @@ jobs:
|
||||
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge.nonephemeral" },
|
||||
]}
|
||||
|
||||
linux-jammy-py3-clang12-executorch-build:
|
||||
name: linux-jammy-py3-clang12-executorch
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-jammy-py3-clang12-executorch
|
||||
docker-image-name: pytorch-linux-jammy-py3-clang12-executorch
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "executorch", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-jammy-py3-clang12-executorch-test:
|
||||
name: linux-jammy-py3-clang12-executorch
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-jammy-py3-clang12-executorch-build
|
||||
with:
|
||||
build-environment: linux-jammy-py3-clang12-executorch
|
||||
docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-rocm5_7-py3_8-build:
|
||||
name: linux-focal-rocm5.7-py3.8
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
|
@ -1,6 +1,8 @@
|
||||
# Owner(s): ["oncall: mobile"]
|
||||
import copy
|
||||
|
||||
import pytest
|
||||
|
||||
import torch
|
||||
import torch._export as export
|
||||
|
||||
@ -17,6 +19,7 @@ def _get_ops_list(m: torch.fx.GraphModule):
|
||||
|
||||
|
||||
class TestQuantizePT2EModels(TestCase):
|
||||
@pytest.mark.xfail()
|
||||
@skip_if_no_torchvision
|
||||
def test_vit_aten_export(self):
|
||||
from torchvision.models import vit_b_16 # @manual
|
||||
@ -34,3 +37,9 @@ class TestQuantizePT2EModels(TestCase):
|
||||
if "scaled_dot_product" in str(op):
|
||||
non_core_aten_op_found = True
|
||||
self.assertFalse(non_core_aten_op_found)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from torch.testing._internal.common_utils import run_tests
|
||||
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user