Compare commits

...

7 Commits

Author SHA1 Message Date
dcc4f0bbb6 Update README.md
Co-authored-by: Daniël de Kok <me@danieldk.eu>
2025-02-24 17:20:32 +01:00
bbd5151f34 Improve readme 2025-02-24 16:19:32 +01:00
9426e7e290 Fix package name & add CUDA shield (#27)
* package_name should not depend on build.toml

* Raise when CUDA not installed
2025-02-24 14:10:54 +01:00
df2c165d61 hf-kernels: error out when no build is available (#25) 2025-02-14 20:20:44 +01:00
d89239464a Update README.md (#24) 2025-02-07 17:52:36 +01:00
3212affd9e Set version to 0.1.6 (#23) 2025-02-05 15:18:50 +01:00
7ff40a859c write_egg_lockfile: bail out if the project does not have pyproject.toml (#22) 2025-02-05 14:55:51 +01:00
5 changed files with 76 additions and 13 deletions

View File

@ -1,6 +1,22 @@
# hf-kernels
Make sure you have `torch==2.5.1+cu124` installed.
The Kernel Hub allows Python libraries and applications to load compute
kernels directly from the [Hub](https://hf.co/). To support this kind
of dynamic loading, Hub kernels differ from traditional Python kernel
packages in that they are made to be:
- Portable: a kernel can be loaded from paths outside `PYTHONPATH`.
- Unique: multiple versions of the same kernel can be loaded in the
same Python process.
- Compatible: kernels must support all recent versions of Python and
the different PyTorch build configurations (various CUDA versions
and C++ ABIs). Furthermore, older C library versions must be supported.
## Usage
Kernels depends on `torch>=2.5` and CUDA for now.
Here is how you would use the [activation](https://huggingface.co/kernels-community/activation) kernels from the Hugging Face Hub:
```python
import torch
@ -20,9 +36,28 @@ activation.gelu_fast(y, x)
print(y)
```
These kernels can be built from the [kernel-builder library](https://github.com/huggingface/kernel-builder).
If you're looking to better understand how these kernels are structured, or looking to build your own kernels,
please take a look at the following guide:
[writing kernels](https://github.com/huggingface/kernel-builder/blob/main/docs/writing-kernels.md).
## Installation
To install `hf-kernels`, we recommend installing from the pypi package:
```bash
pip install hf-kernels
```
You should then be able to run the script above (also in [examples/basic.py](examples/basic.py)):
```bash
python examples/basic.py
```
## Docker Reference
build and run the reference [example/basic.py](example/basic.py) in a Docker container with the following commands:
build and run the reference [examples/basic.py](examples/basic.py) in a Docker container with the following commands:
```bash
docker build --platform linux/amd64 -t kernels-reference -f docker/Dockerfile.reference .
@ -68,9 +103,8 @@ The pre-downloaded kernels are used by the `get_locked_kernel` function.
want kernel loading to error when a kernel is not pre-downloaded, you can use
the `load_kernel` function instead:
````python
```python
from hf_kernels import load_kernel
activation = load_kernel("kernels-community/activation")
````
```

View File

@ -1,6 +1,6 @@
[project]
name = "hf-kernels"
version = "0.1.5"
version = "0.1.6"
description = "Download cuda kernels"
authors = [
{ name = "OlivierDehaene", email = "olivier@huggingface.co" },

View File

@ -50,6 +50,8 @@ def download_kernels(args):
with open(args.project_dir / "hf-kernels.lock", "r") as f:
lock_json = json.load(f)
all_successful = True
for kernel_lock_json in lock_json:
kernel_lock = KernelLock.from_json(kernel_lock_json)
print(
@ -59,7 +61,14 @@ def download_kernels(args):
if args.all_variants:
install_kernel_all_variants(kernel_lock.repo_id, kernel_lock.sha)
else:
install_kernel(kernel_lock.repo_id, kernel_lock.sha)
try:
install_kernel(kernel_lock.repo_id, kernel_lock.sha)
except FileNotFoundError as e:
print(e, file=sys.stderr)
all_successful = False
if not all_successful:
sys.exit(1)
def lock_kernels(args):

View File

@ -89,7 +89,12 @@ def write_egg_lockfile(cmd, basename, filename):
import logging
cwd = Path.cwd()
with open(cwd / "pyproject.toml", "rb") as f:
pyproject_path = cwd / "pyproject.toml"
if not pyproject_path.exists():
# Nothing to do if the project doesn't have pyproject.toml.
return
with open(pyproject_path, "rb") as f:
data = tomllib.load(f)
kernel_versions = data.get("tool", {}).get("kernels", {}).get("dependencies", None)

View File

@ -4,11 +4,12 @@ import importlib.metadata
import inspect
import json
import os
from pathlib import Path
import platform
import sys
from importlib.metadata import Distribution
from types import ModuleType
from typing import List, Optional
from typing import List, Optional, Tuple
from huggingface_hub import hf_hub_download, snapshot_download
from packaging.version import parse
@ -22,6 +23,9 @@ CACHE_DIR: Optional[str] = os.environ.get("HF_KERNELS_CACHE", None)
def build_variant():
import torch
if torch.version.cuda is None:
raise AssertionError("This kernel requires CUDA to be installed. Torch was not compiled with CUDA enabled.")
torch_version = parse(torch.__version__)
cuda_version = parse(torch.version.cuda)
cxxabi = "cxx11" if torch.compiled_with_cxx11_abi() else "cxx98"
@ -45,10 +49,12 @@ def import_from_path(module_name: str, file_path):
return module
def install_kernel(repo_id: str, revision: str, local_files_only: bool = False):
package_name = get_metadata(repo_id, revision, local_files_only=local_files_only)[
"torch"
]["name"]
def install_kernel(
repo_id: str, revision: str, local_files_only: bool = False
) -> Tuple[str, str]:
"""Download a kernel for the current environment to the cache."""
package_name = repo_id.split('/')[-1]
package_name = package_name.replace('-', '_')
repo_path = snapshot_download(
repo_id,
allow_patterns=f"build/{build_variant()}/*",
@ -56,7 +62,16 @@ def install_kernel(repo_id: str, revision: str, local_files_only: bool = False):
revision=revision,
local_files_only=local_files_only,
)
return package_name, f"{repo_path}/build/{build_variant()}"
variant_path = f"{repo_path}/build/{build_variant()}"
module_init_path = f"{variant_path}/{package_name}/__init__.py"
if not os.path.exists(module_init_path):
raise FileNotFoundError(
f"Kernel `{repo_id}` at revision {revision} does not have build: {build_variant()}"
)
return package_name, variant_path
def install_kernel_all_variants(