mirror of
https://github.com/vllm-project/vllm-ascend.git
synced 2025-10-20 13:43:53 +08:00
**Background:**
There are two principles about operator registration in PyTorch
- The same namespace can be only registered once by `TORCH_LIBRARY`
- The operator signatures can be only registered once by `def`
Considering that all custom operators defined in the current repo are
only used by Ascend, instead of defining a common operator schema by
vLLM, all accelerators then follow this operator schema and complete the
implementation based on their respective hardware, which is conducive to
functional abstraction.
Therefore, we can rename the operator registration namespace to an
Ascend-specific namespace(**_C_ascend**).
Related ISSUE: https://github.com/vllm-project/vllm-ascend/issues/2742
- vLLM version: main
- vLLM main:
f592b3174b
Signed-off-by: FFFrog <ljw1101.vip@gmail.com>
46 lines
1.2 KiB
Python
46 lines
1.2 KiB
Python
import gc
|
|
|
|
import torch
|
|
|
|
from vllm_ascend.utils import enable_custom_op
|
|
|
|
enable_custom_op()
|
|
|
|
DEFAULT_ATOL = 1e-3
|
|
DEFAULT_RTOL = 1e-3
|
|
|
|
|
|
def bgmv_shrink_cpu_impl(x: torch.Tensor, w: torch.Tensor,
|
|
indices: torch.Tensor, y: torch.tensor,
|
|
scaling: float) -> torch.Tensor:
|
|
W = w[indices, :, :].transpose(-1, -2).to(torch.float32)
|
|
z = torch.bmm(x.unsqueeze(1).to(torch.float32), W).squeeze()
|
|
y[:, :] += z * scaling
|
|
return y
|
|
|
|
|
|
@torch.inference_mode()
|
|
def test_bgmv_shrink():
|
|
B = 1
|
|
x = torch.randn([B, 128], dtype=torch.float16)
|
|
w = torch.randn([64, 16, 128], dtype=torch.float16)
|
|
indices = torch.zeros([B], dtype=torch.int64)
|
|
y = torch.zeros([B, 16])
|
|
|
|
x_npu = x.npu()
|
|
w_npu = w.npu()
|
|
indices_npu = indices.npu()
|
|
y_npu = y.npu()
|
|
|
|
y = bgmv_shrink_cpu_impl(x, w, indices, y, 0.5)
|
|
torch.ops._C_ascend.bgmv_shrink(x_npu, w_npu, indices_npu, y_npu, 0.5)
|
|
|
|
# Compare the results.
|
|
torch.testing.assert_close(y_npu.cpu(),
|
|
y,
|
|
atol=DEFAULT_ATOL,
|
|
rtol=DEFAULT_RTOL)
|
|
gc.collect()
|
|
torch.npu.empty_cache()
|
|
torch.npu.reset_peak_memory_stats()
|