mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Intel GPU] fix xpu not support punica kernel (which use torch.library.custom_op) (#7685)
This commit is contained in:
@ -10,8 +10,10 @@ from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union
|
||||
import torch
|
||||
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
from vllm.utils import is_xpu
|
||||
|
||||
if HAS_TRITON:
|
||||
# FIXME: xpu path doesn't support torch.library.custom_op
|
||||
if HAS_TRITON and not is_xpu():
|
||||
from vllm.lora.ops.bgmv_expand import bgmv_expand
|
||||
from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice
|
||||
from vllm.lora.ops.bgmv_shrink import bgmv_shrink
|
||||
|
Reference in New Issue
Block a user