Compare commits

..

6 Commits

Author SHA1 Message Date
74832f12fa Revert "CI: Specify libc and libstdcxx versions in conda environments" (#122497)
This reverts commit b4f90aae1b375bfe06d3c4a099240e06f93c81c4.
2024-03-22 11:27:50 -04:00
02cdb400d7 Use temporary name for triton package, fix lint (#122438)
* Use temporary name for triton package

* Fix lint
2024-03-21 17:30:38 -04:00
37257774c6 Triton wheel build using 2.3.x branch (#122403)
* Triton build 2.3.x

* Revert "[Release Only] Build triton using pinned version rather branch (#121765)"

This reverts commit d69c4219127e2cf5d9637b0daacc0a24e65f8133.

* Triton wheel change

* release
2024-03-21 12:52:21 -04:00
c4e5434423 necessary change to make torch2.3 work with triton2.2 (#122139) 2024-03-21 08:24:53 -04:00
b4f90aae1b CI: Specify libc and libstdcxx versions in conda environments (#121929)
Without this we get mismatches between the GLIBC and GLIBCXX ABI used
by conda packages vs pytorch.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/121556
Approved by: https://github.com/isuruf, https://github.com/malfet

(cherry picked from commit 7a53dedb07ed72b85d1e083ce38c43c7810fc5f1)

Co-authored-by: Peter Bell <peterbell10@live.co.uk>
2024-03-14 17:56:46 -04:00
94d6463255 [RELEASE ONLY CHANGES] Increase timeout for linux binary jobs, fix workflow lint (#121851)
* [release only] Increase timeout job for linux binary builds by 30min

* fix lint
2024-03-13 19:50:57 -04:00
9 changed files with 95 additions and 27 deletions

View File

@ -1 +1 @@
a9bc1a36470eefafe0e2ab2503b8698f1e89e7e3
79c6c9b209a5692b9a895398f4f3a033f8f80415

View File

@ -1 +1 @@
3.0.0
2.3.0

View File

@ -78,8 +78,8 @@ TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
# Only linux Python < 3.12 are supported wheels for triton
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.12'"
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
if [[ -n "$PYTORCH_BUILD_VERSION" ]]; then
TRITON_REQUIREMENT="pytorch-triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton.txt)
TRITON_REQUIREMENT="pytorch-triton==${TRITON_VERSION}+${TRITON_SHORTHASH}; ${TRITON_CONSTRAINT}"
fi
@ -89,7 +89,7 @@ fi
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton rocm package
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*rocm.* && $(uname) == "Linux" && "$DESIRED_PYTHON" != "3.12" ]]; then
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}"
if [[ -n "$PYTORCH_BUILD_VERSION" ]]; then
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-rocm.txt)
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}+${TRITON_SHORTHASH}"
fi

View File

@ -11,7 +11,7 @@ SCRIPT_DIR = Path(__file__).parent
REPO_DIR = SCRIPT_DIR.parent.parent
# TODO: Remove me once Triton version is again in sync for vanilla and ROCm
ROCM_TRITION_VERSION = "2.1.0"
ROCM_TRITION_VERSION = "2.2.0"
def read_triton_pin(rocm_hash: bool = False) -> str:
@ -99,7 +99,17 @@ def build_triton(
triton_repo = "https://github.com/openai/triton"
triton_pkg_name = "pytorch-triton"
check_call(["git", "clone", triton_repo], cwd=tmpdir)
check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
if release:
ver, rev, patch = version.split(".")
if build_rocm:
check_call(["git", "checkout", "release/2.2.x"], cwd=triton_basedir)
else:
check_call(
["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir
)
else:
check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
if build_conda:
with open(triton_basedir / "meta.yaml", "w") as meta:
print(
@ -109,7 +119,7 @@ def build_triton(
print("source:\n path: .\n", file=meta)
print(
"build:\n string: py{{py}}\n number: 1\n script: cd python; "
"python setup.py install --record=record.txt\n",
"python setup.py install --single-version-externally-managed --record=record.txt\n",
" script_env:\n - MAX_JOBS\n",
file=meta,
)

View File

@ -78,7 +78,7 @@ on:
jobs:
build:
runs-on: ${{ inputs.runs_on }}
timeout-minutes: 180
timeout-minutes: 210
env:
PYTORCH_ROOT: ${{ inputs.PYTORCH_ROOT }}
BUILDER_ROOT: ${{ inputs.BUILDER_ROOT }}

View File

@ -107,9 +107,14 @@ jobs:
BUILD_ROCM="--build-rocm"
fi
RELEASE=""
if [[ "${IS_RELEASE_TAG}" == true ]]; then
RELEASE="--release"
fi
docker exec -t "${container_name}" yum install -y zlib-devel zip
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U setuptools==67.4.0
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" /pytorch/.github/scripts/build_triton_wheel.py $BUILD_ROCM
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" /pytorch/.github/scripts/build_triton_wheel.py $BUILD_ROCM $RELEASE
docker exec -t "${container_name}" chown -R 1000.1000 /artifacts
- uses: actions/upload-artifact@v3
@ -230,8 +235,13 @@ jobs:
"${DOCKER_IMAGE}" \
)
RELEASE=""
if [[ "${IS_RELEASE_TAG}" == true ]]; then
RELEASE="--release"
fi
docker exec -t "${container_name}" yum install -y llvm11 llvm11-devel llvm11-static llvm11-libs zlib-devel
docker exec -t "${container_name}" python /pytorch/.github/scripts/build_triton_wheel.py --build-conda --py-version="${PY_VERS}"
docker exec -t "${container_name}" python /pytorch/.github/scripts/build_triton_wheel.py --build-conda --py-version="${PY_VERS}" $RELEASE
docker exec -t "${container_name}" chown -R 1000.1000 /artifacts
- uses: actions/upload-artifact@v3

View File

@ -113,6 +113,7 @@ jobs:
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
conda activate "${CONDA_ENV}"
export RELEASE_VERSION_TAG="2.3"
# Regenerate workflows
.github/scripts/generate_ci_workflows.py

View File

@ -95,7 +95,6 @@ def generate_ttir(kernel, kwargs):
"""
Uses Triton's internal code generation to create TTIR
"""
import triton
from triton.compiler.compiler import ASTSource
from triton.runtime.autotuner import Autotuner
from triton.runtime.jit import JITFunction
@ -145,15 +144,21 @@ def generate_ttir(kernel, kwargs):
if i not in kernel.constexprs
}
context = triton._C.libtriton.ir.context()
target = triton.runtime.driver.active.get_current_target()
backend = triton.compiler.compiler.make_backend(target)
def get_backend():
from triton.compiler.backends.cuda import CUDABackend
from triton.runtime.driver import driver
target = driver.get_current_target()
return CUDABackend(target)
backend = get_backend()
options = backend.parse_options(dict())
triton._C.libtriton.ir.load_dialects(context)
backend.load_dialects(context)
# triton._C.libtriton.triton.ir.load_dialects(context)
# backend.load_dialects(context)
src = ASTSource(kernel, signature, constants, specialization)
ttir_module = src.make_ir(options, context)
ttir_module = src.make_ir(options)
if not ttir_module.verify():
raise Exception("Verification for TTIR module has failed")

View File

@ -1,5 +1,6 @@
import functools
import hashlib
import os
from torch._dynamo.device_interface import get_interface_for_device
@ -32,18 +33,61 @@ def has_triton() -> bool:
@functools.lru_cache(None)
def triton_backend():
def triton_backend_hash():
from triton.common.backend import get_backend, get_cuda_version_key
import torch
if torch.version.hip:
# Does not work with ROCm
return None
from triton.compiler.compiler import make_backend
from triton.runtime.driver import driver
if not torch.cuda.is_available():
return None
target = driver.active.get_current_target()
return make_backend(target)
backend = get_backend("cuda")
if backend is None:
return get_cuda_version_key()
else:
return backend.get_version_key()
@functools.lru_cache
def triton_key():
import pkgutil
import triton
TRITON_PATH = os.path.dirname(os.path.abspath(triton.__file__))
contents = []
# This is redundant. Doing it to be consistent with upstream.
# frontend
with open(os.path.join(TRITON_PATH, "compiler", "compiler.py"), "rb") as f:
contents += [hashlib.sha256(f.read()).hexdigest()]
# compiler
compiler_path = os.path.join(TRITON_PATH, "compiler")
backends_path = os.path.join(TRITON_PATH, "compiler", "backends")
for lib in pkgutil.iter_modules([compiler_path, backends_path]):
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: # type: ignore[call-arg, union-attr, arg-type]
contents += [hashlib.sha256(f.read()).hexdigest()]
# backend
libtriton_hash = hashlib.sha256()
with open(os.path.join(TRITON_PATH, "_C/libtriton.so"), "rb") as f:
while True:
chunk = f.read(1024**2)
if not chunk:
break
libtriton_hash.update(chunk)
contents.append(libtriton_hash.hexdigest())
# language
language_path = os.path.join(TRITON_PATH, "language")
for lib in pkgutil.iter_modules([language_path]):
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f: # type: ignore[call-arg, union-attr, arg-type]
contents += [hashlib.sha256(f.read()).hexdigest()]
from triton import __version__
return f"{__version__}" + "-".join(contents)
@functools.lru_cache(None)
@ -54,8 +98,6 @@ def triton_hash_with_backend():
# Does not work with ROCm
return None
from triton.compiler.compiler import triton_key
backend = triton_backend()
key = f"{triton_key()}-{backend.hash()}"
backend_hash = triton_backend_hash()
key = f"{triton_key()}-{backend_hash}"
return hashlib.sha256(key.encode("utf-8")).hexdigest()