mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[BugFix] logger is not callable (#16312)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
This commit is contained in:
@ -149,8 +149,8 @@ class HPUAttentionImpl(AttentionImpl, torch.nn.Module):
|
||||
self.fused_scaled_dot_product_attention = ModuleFusedSDPA(
|
||||
FusedSDPA)
|
||||
except ImportError:
|
||||
logger().warning("Could not import HPU FusedSDPA kernel. "
|
||||
"vLLM will use native implementation.")
|
||||
logger.warning("Could not import HPU FusedSDPA kernel. "
|
||||
"vLLM will use native implementation.")
|
||||
|
||||
suppored_head_sizes = HPUPagedAttention.get_supported_head_sizes()
|
||||
if head_size not in suppored_head_sizes:
|
||||
|
Reference in New Issue
Block a user