[Bugfix]: Fix is_embedding_layer condition in VocabParallelEmbedding (#15824)

Signed-off-by: alexwl <alexey.a.kiryushin@gmail.com>
This commit is contained in:
Alexey Kiryushin
2025-04-01 05:57:59 +00:00
committed by GitHub
parent e830b01383
commit 63d8eabed0

View File

@ -235,7 +235,7 @@ class VocabParallelEmbedding(torch.nn.Module):
# If we are making an embedding layer, then our quantization linear
# method must implement the embedding operation. If we are another
# layer type like ParallelLMHead, this is not important.
is_embedding_layer = type(self.__class__) is VocabParallelEmbedding
is_embedding_layer = type(self) is VocabParallelEmbedding
quant_method_implements_embedding = method_has_implemented_embedding(
type(quant_method))
if is_embedding_layer and not quant_method_implements_embedding: