mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[ Misc ] Refactor w8a8 to use process_weights_after_load
(Simplify Weight Loading) (#5940)
Co-authored-by: Robert Shaw <rshaw@neuralmagic>
This commit is contained in:
@ -11,14 +11,18 @@ from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tenso
|
||||
CompressedTensorsLinearMethod, CompressedTensorsW4A16Sparse24,
|
||||
CompressedTensorsW8A8DynamicToken, CompressedTensorsW8A8StaticTensor,
|
||||
CompressedTensorsWNA16)
|
||||
from vllm.model_executor.layers.quantization.compressed_tensors.utils import (
|
||||
QuantizationType)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("model_args", [
|
||||
("nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change", "tensor"),
|
||||
("nm-testing/tinyllama-oneshot-w8-channel-a8-tensor", "channel"),
|
||||
("nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change", "tensor",
|
||||
QuantizationType.INT, 2560),
|
||||
("nm-testing/tinyllama-oneshot-w8-channel-a8-tensor", "channel",
|
||||
QuantizationType.INT, 2560),
|
||||
])
|
||||
def test_compressed_tensors_w8a8_static_setup(vllm_runner, model_args):
|
||||
model_path, strategy = model_args
|
||||
model_path, strategy, quant_type, shape_0 = model_args
|
||||
with vllm_runner(model_path, enforce_eager=True) as llm:
|
||||
model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501
|
||||
layer = model.model.layers[0]
|
||||
@ -34,17 +38,23 @@ def test_compressed_tensors_w8a8_static_setup(vllm_runner, model_args):
|
||||
CompressedTensorsLinearMethod)
|
||||
assert isinstance(down_proj.quant_method,
|
||||
CompressedTensorsLinearMethod)
|
||||
|
||||
assert isinstance(qkv_proj.scheme, CompressedTensorsW8A8StaticTensor)
|
||||
|
||||
assert qkv_proj.scheme.strategy == strategy
|
||||
assert qkv_proj.weight.dtype is torch.int8
|
||||
assert o_proj.weight.dtype is torch.int8
|
||||
assert gate_up_proj.weight.dtype is torch.int8
|
||||
expected_type = (torch.int8 if quant_type == QuantizationType.INT else
|
||||
torch.float8_e4m3fn)
|
||||
|
||||
assert qkv_proj.weight.dtype is expected_type
|
||||
assert o_proj.weight.dtype is expected_type
|
||||
assert gate_up_proj.weight.dtype is expected_type
|
||||
|
||||
if qkv_proj.scheme.strategy == "tensor":
|
||||
assert qkv_proj.weight_scale.shard_splitter is not None
|
||||
assert qkv_proj.weight_scale.logical_widths is not None
|
||||
# Make sure it is a channelwise buffer
|
||||
# After running process_weights_after_loading
|
||||
assert len(qkv_proj.weight_scale.shape) == 2
|
||||
assert qkv_proj.weight_scale.shape[0] == shape_0
|
||||
assert qkv_proj.weight_scale.shape[1] == 1
|
||||
assert qkv_proj.weight_scale.dtype is torch.float32
|
||||
assert qkv_proj.input_scale.dtype is torch.float32
|
||||
|
||||
|
||||
|
@ -9,6 +9,23 @@ from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm._custom_ops import scaled_fp8_quant
|
||||
from vllm.model_executor.layers.quantization.fp8 import Fp8LinearMethod
|
||||
|
||||
MODELS = [
|
||||
"neuralmagic/Meta-Llama-3-8B-Instruct-FP8",
|
||||
"nm-testing/Phi-3-mini-128k-instruct-FP8",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
||||
reason="FP8 is not supported on this GPU type.")
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
def test_model_load_and_run(vllm_runner, model: str):
|
||||
with vllm_runner(model) as llm:
|
||||
# note: this does not test accuracy, just that we can run through
|
||||
# see lm-eval tests for accuracy
|
||||
outputs = llm.generate_greedy(prompts=["Hello my name is"],
|
||||
max_tokens=10)
|
||||
print(outputs[0][1])
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
||||
reason="FP8 is not supported on this GPU type.")
|
||||
|
@ -41,6 +41,29 @@ def adjust_bitsandbytes_shard(param: Parameter,
|
||||
return quantized_size, quantized_offset
|
||||
|
||||
|
||||
def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
|
||||
"""For fused modules (QKV and MLP) we have an array of length
|
||||
N that holds 1 scale for each "logical" matrix. So the param
|
||||
is an array of length N. The loaded_weight corresponds to
|
||||
one of the shards on disk. Here, we slice the param based on
|
||||
the shard_id for loading.
|
||||
"""
|
||||
qkv_idxs = {"q": 0, "k": 1, "v": 2}
|
||||
|
||||
if isinstance(shard_id, str):
|
||||
shard_id = qkv_idxs[shard_id]
|
||||
elif not isinstance(shard_id, int):
|
||||
raise ValueError(f"Unknown Shard Id {shard_id}")
|
||||
|
||||
# AutoFP8 scales do not have a shape
|
||||
# compressed-tensors scales do have a shape
|
||||
if len(loaded_weight.shape) != 0:
|
||||
assert loaded_weight.shape[0] == 1
|
||||
loaded_weight = loaded_weight[0]
|
||||
|
||||
return param[shard_id], loaded_weight
|
||||
|
||||
|
||||
class LinearMethodBase(QuantizeMethodBase):
|
||||
"""Base class for different (maybe quantized) linear methods."""
|
||||
|
||||
@ -358,37 +381,15 @@ class MergedColumnParallelLinear(ColumnParallelLinear):
|
||||
output_dim = getattr(param, "output_dim", None)
|
||||
# Special case for AQLM codebooks.
|
||||
is_metadata = getattr(param, "is_metadata", False)
|
||||
|
||||
param_shard_splitter = getattr(param, "shard_splitter", None)
|
||||
|
||||
if output_dim is not None and param_shard_splitter is not None:
|
||||
raise NotImplementedError(
|
||||
"We do not currently support output_dim != None and "
|
||||
"shard_splitter != None for a parameter. Please open an issue."
|
||||
)
|
||||
# If a parameter has defined a shard_splitter to be used for
|
||||
# the weight, it should be applied before the weight is
|
||||
# loaded/copied to the parameter. The shard_splitter applies
|
||||
# logic by using the loaded_shard_id to ensure that the loaded
|
||||
# param is loaded to the correct location
|
||||
# within the parameter defined by the linear method.
|
||||
if loaded_shard_id is None and param_shard_splitter is not None:
|
||||
raise NotImplementedError(
|
||||
"We do not currently support loaded_shard_id == None and "
|
||||
"shard_splitter != None for a parameter. Please open an issue."
|
||||
)
|
||||
|
||||
# Special case for Fp8 scales.
|
||||
fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer",
|
||||
None)
|
||||
# Special case for per-tensor scale to load scalar into fused array.
|
||||
needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
|
||||
|
||||
if loaded_shard_id is None:
|
||||
# Loaded weight is already fused on disk (qkv/mlp).
|
||||
if output_dim is None:
|
||||
# If fp8 + scale, need to send to each shard.
|
||||
if fp8_scales_shard_indexer is not None:
|
||||
param_data, loaded_weight = fp8_scales_shard_indexer(
|
||||
param_data, loaded_weight, loaded_shard_id)
|
||||
if needs_scalar_to_array is not None:
|
||||
param_data, loaded_weight = adjust_scalar_to_fused_array(
|
||||
param_data, loaded_weight, 0)
|
||||
|
||||
assert param_data.shape == loaded_weight.shape
|
||||
param_data.copy_(loaded_weight)
|
||||
@ -450,15 +451,9 @@ class MergedColumnParallelLinear(ColumnParallelLinear):
|
||||
shard_offset = loaded_shard_id * shard_size
|
||||
param_data = param_data.narrow(0, shard_offset, shard_size)
|
||||
|
||||
# If a param_shard_splitter is defined by the LinearMethod, use it.
|
||||
elif param_shard_splitter is not None:
|
||||
logical_widths = getattr(param, "logical_widths", None)
|
||||
param_data, loaded_weight = param_shard_splitter(
|
||||
param_data, loaded_weight, loaded_shard_id, logical_widths)
|
||||
|
||||
# Special case for Fp8 scales.
|
||||
elif fp8_scales_shard_indexer is not None:
|
||||
param_data, loaded_weight = fp8_scales_shard_indexer(
|
||||
# Special case for per-tensor scales in fused case.
|
||||
elif needs_scalar_to_array:
|
||||
param_data, loaded_weight = adjust_scalar_to_fused_array(
|
||||
param_data, loaded_weight, loaded_shard_id)
|
||||
|
||||
else:
|
||||
@ -548,36 +543,15 @@ class QKVParallelLinear(ColumnParallelLinear):
|
||||
# Special case for AQLM codebooks.
|
||||
is_metadata = getattr(param, "is_metadata", False)
|
||||
|
||||
param_shard_splitter = getattr(param, "shard_splitter", None)
|
||||
|
||||
if output_dim is not None and param_shard_splitter is not None:
|
||||
raise NotImplementedError(
|
||||
"We do not currently support output_dim != None and "
|
||||
"shard_splitter != None for a parameter. Please open an issue."
|
||||
)
|
||||
# If a parameter has defined a shard_splitter to be used for
|
||||
# the weight, it should be applied before the weight is
|
||||
# loaded/copied to the parameter. The shard_splitter applies
|
||||
# logic by using the loaded_shard_id to ensure that the loaded
|
||||
# param is loaded to the correct location
|
||||
# within the parameter defined by the linear method.
|
||||
if loaded_shard_id is None and param_shard_splitter is not None:
|
||||
raise NotImplementedError(
|
||||
"We do not currently support loaded_shard_id == None and "
|
||||
"shard_splitter != None for a parameter. Please open an issue."
|
||||
)
|
||||
|
||||
# Special case for Fp8 scales.
|
||||
fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer",
|
||||
None)
|
||||
# Special case for per-tensor scales in fused case.
|
||||
needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
|
||||
|
||||
if loaded_shard_id is None:
|
||||
# Loaded weight is already fused on disk (qkv/mlp).
|
||||
if output_dim is None:
|
||||
# If fp8 + scale, need to send to each shard.
|
||||
if fp8_scales_shard_indexer is not None:
|
||||
param_data, loaded_weight = fp8_scales_shard_indexer(
|
||||
param_data, loaded_weight, loaded_shard_id)
|
||||
if needs_scalar_to_array is not None:
|
||||
param_data, loaded_weight = adjust_scalar_to_fused_array(
|
||||
param_data, loaded_weight, 0)
|
||||
|
||||
assert param_data.shape == loaded_weight.shape
|
||||
param_data.copy_(loaded_weight)
|
||||
@ -667,15 +641,9 @@ class QKVParallelLinear(ColumnParallelLinear):
|
||||
shard_index = ["q", "k", "v"].index(loaded_shard_id)
|
||||
param_data = param_data.narrow(0, shard_index * shard_size,
|
||||
shard_size)
|
||||
# If a param_shard_splitter is defined by the LinearMethod, use it.
|
||||
elif param_shard_splitter is not None:
|
||||
logical_widths = getattr(param, "logical_widths", None)
|
||||
param_data, loaded_weight = param_shard_splitter(
|
||||
param_data, loaded_weight, loaded_shard_id, logical_widths)
|
||||
|
||||
# Special case for Fp8 scales.
|
||||
elif fp8_scales_shard_indexer is not None:
|
||||
param_data, loaded_weight = fp8_scales_shard_indexer(
|
||||
# Special case for per-tensor scales in fused case.
|
||||
elif needs_scalar_to_array:
|
||||
param_data, loaded_weight = adjust_scalar_to_fused_array(
|
||||
param_data, loaded_weight, loaded_shard_id)
|
||||
else:
|
||||
ignore_warning = getattr(param, "ignore_warning", False)
|
||||
|
@ -186,6 +186,9 @@ class CompressedTensorsLinearMethod(LinearMethodBase):
|
||||
def __init__(self, quantization_config: CompressedTensorsConfig):
|
||||
self.quantization_config = quantization_config
|
||||
|
||||
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
||||
return layer.scheme.process_weights_after_loading(layer)
|
||||
|
||||
def create_weights(self, layer: torch.nn.Module,
|
||||
input_size_per_partition: int,
|
||||
output_partition_sizes: List[int], input_size: int,
|
||||
|
@ -31,3 +31,11 @@ class CompressedTensorsScheme(ABC):
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def process_weights_after_loading(self, layer: torch.nn.Module):
|
||||
"""
|
||||
Called after weight loading is complete for any cleanup that
|
||||
needs to occur.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
@ -18,6 +18,9 @@ class CompressedTensorsUnquantized(CompressedTensorsScheme):
|
||||
in a linear transformation.
|
||||
"""
|
||||
|
||||
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
||||
pass
|
||||
|
||||
def create_weights(self, layer: torch.nn.Module,
|
||||
output_partition_sizes: List[int],
|
||||
input_size_per_partition: int,
|
||||
|
@ -29,6 +29,9 @@ class CompressedTensorsW4A16Sparse24(CompressedTensorsScheme):
|
||||
raise ValueError(
|
||||
"group_size must be given when using strategy group")
|
||||
|
||||
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
||||
pass
|
||||
|
||||
def create_weights(self, layer: torch.nn.Module, input_size: int,
|
||||
output_partition_sizes: List[int],
|
||||
input_size_per_partition: int,
|
||||
|
@ -15,70 +15,63 @@ class CompressedTensorsW8A8(CompressedTensorsScheme):
|
||||
def __init__(self, strategy: str):
|
||||
self.strategy = strategy
|
||||
|
||||
def _shard_id_as_int(self, shard_id: Union[str, int]) -> int:
|
||||
if isinstance(shard_id, int):
|
||||
return shard_id
|
||||
# Cutlass kernels support only per-tensor and per-channel cases.
|
||||
# So if we have a fused module (QKV, MLP) with per tensor scales (thus N
|
||||
# scales being passed to the kernel), we convert to the per-channel case.
|
||||
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
||||
if (self.strategy == QuantizationStrategy.TENSOR
|
||||
and len(self.logical_widths) > 1):
|
||||
|
||||
assert isinstance(shard_id, str)
|
||||
qkv_idxs = {"q": 0, "k": 1, "v": 2}
|
||||
assert shard_id in qkv_idxs
|
||||
return qkv_idxs[shard_id]
|
||||
# Load the N per-tensor scales into the channelwise buffer.
|
||||
weight_scale_channel = torch.empty(
|
||||
(sum(self.logical_widths), 1),
|
||||
dtype=torch.float32,
|
||||
device=layer.weight_scale.device)
|
||||
start = 0
|
||||
for idx, logical_width in enumerate(self.logical_widths):
|
||||
end = start + logical_width
|
||||
weight_scale_channel[start:end, :] = layer.weight_scale[idx]
|
||||
start = end
|
||||
|
||||
def scales_shard_splitter(
|
||||
self, param: torch.Tensor, loaded_weight: torch.Tensor,
|
||||
shard_id: Union[str, int],
|
||||
logical_widths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
shard_id = self._shard_id_as_int(shard_id)
|
||||
offset = sum(logical_widths[:shard_id])
|
||||
size = logical_widths[shard_id]
|
||||
# update loaded weight with copies for broadcast.
|
||||
loaded_weight = loaded_weight.repeat(size)
|
||||
return param[offset:offset + size], loaded_weight
|
||||
layer.weight_scale = Parameter(weight_scale_channel,
|
||||
requires_grad=False)
|
||||
|
||||
def create_weights(self, layer: torch.nn.Module,
|
||||
output_partition_sizes: List[int],
|
||||
input_size_per_partition: int,
|
||||
params_dtype: torch.dtype, weight_loader: Callable,
|
||||
**kwargs):
|
||||
self.logical_widths = output_partition_sizes
|
||||
|
||||
is_tensor_partitioned = len(output_partition_sizes) != 1
|
||||
weight_scale_dim = sum(output_partition_sizes) if (
|
||||
is_tensor_partitioned
|
||||
or self.strategy == QuantizationStrategy.CHANNEL) else 1
|
||||
|
||||
shape: Union[Tuple[int], Tuple[int, int]] = (weight_scale_dim, )
|
||||
# WEIGHT SCALE
|
||||
shape: Union[Tuple[int], Tuple[int, int]]
|
||||
if self.strategy == QuantizationStrategy.CHANNEL:
|
||||
shape = (weight_scale_dim, 1)
|
||||
shape = (sum(self.logical_widths), 1)
|
||||
else:
|
||||
shape = (len(self.logical_widths), )
|
||||
|
||||
weight_scale = Parameter(torch.empty(*shape, dtype=torch.float32),
|
||||
requires_grad=False)
|
||||
|
||||
layer.register_parameter("weight_scale", weight_scale)
|
||||
set_weight_attrs(weight_scale, {"weight_loader": weight_loader})
|
||||
if self.strategy == QuantizationStrategy.CHANNEL:
|
||||
set_weight_attrs(weight_scale, {
|
||||
"weight_loader": weight_loader,
|
||||
"output_dim": 0,
|
||||
})
|
||||
else:
|
||||
set_weight_attrs(weight_scale, {
|
||||
"weight_loader": weight_loader,
|
||||
"needs_scalar_to_array": True,
|
||||
})
|
||||
|
||||
# WEIGHT
|
||||
weight = Parameter(torch.empty(sum(output_partition_sizes),
|
||||
input_size_per_partition,
|
||||
dtype=torch.int8),
|
||||
requires_grad=False)
|
||||
|
||||
layer.register_parameter("weight", weight)
|
||||
set_weight_attrs(
|
||||
weight, {
|
||||
"input_dim": 1,
|
||||
"output_dim": 0,
|
||||
"weight_loader": weight_loader,
|
||||
"logical_widths": output_partition_sizes
|
||||
})
|
||||
|
||||
# Don't need a shard_splitter for channel-wise quantization
|
||||
# Use the default loading method
|
||||
if self.strategy == QuantizationStrategy.CHANNEL:
|
||||
set_weight_attrs(weight_scale, {
|
||||
"output_dim": 0,
|
||||
})
|
||||
else:
|
||||
set_weight_attrs(
|
||||
weight_scale, {
|
||||
"logical_widths": output_partition_sizes,
|
||||
"shard_splitter": self.scales_shard_splitter,
|
||||
})
|
||||
set_weight_attrs(weight, {
|
||||
"input_dim": 1,
|
||||
"output_dim": 0,
|
||||
"weight_loader": weight_loader,
|
||||
})
|
||||
|
@ -29,6 +29,9 @@ class CompressedTensorsWNA16(CompressedTensorsScheme):
|
||||
raise ValueError(
|
||||
"group_size must be given when using strategy group")
|
||||
|
||||
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
||||
pass
|
||||
|
||||
def create_weights(self, layer: torch.nn.Module, input_size: int,
|
||||
output_partition_sizes: List[int],
|
||||
input_size_per_partition: int,
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import torch
|
||||
from torch.nn import Module
|
||||
@ -98,7 +98,6 @@ class Fp8LinearMethod(LinearMethodBase):
|
||||
"""
|
||||
|
||||
def __init__(self, quant_config: Fp8Config):
|
||||
self.fused_module_in_checkpoint = False
|
||||
self.quant_config = quant_config
|
||||
self.cutlass_fp8_supported = cutlass_fp8_supported()
|
||||
|
||||
@ -114,12 +113,10 @@ class Fp8LinearMethod(LinearMethodBase):
|
||||
requires_grad=False)
|
||||
scale[:] = torch.finfo(torch.float8_e4m3fn).min
|
||||
layer.register_parameter(scale_name, scale)
|
||||
set_weight_attrs(
|
||||
scale, {
|
||||
**extra_weight_attrs,
|
||||
"fp8_scales_shard_indexer":
|
||||
self.scales_shard_indexer,
|
||||
})
|
||||
set_weight_attrs(scale, {
|
||||
**extra_weight_attrs,
|
||||
"needs_scalar_to_array": True,
|
||||
})
|
||||
|
||||
def create_weights(
|
||||
self,
|
||||
@ -170,26 +167,6 @@ class Fp8LinearMethod(LinearMethodBase):
|
||||
output_partition_sizes=output_partition_sizes,
|
||||
**extra_weight_attrs)
|
||||
|
||||
def scales_shard_indexer(
|
||||
self, param: torch.Tensor, loaded_weight: torch.Tensor,
|
||||
shard_id: Optional[Union[str,
|
||||
int]]) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
qkv_idxs = {"q": 0, "k": 1, "v": 2}
|
||||
|
||||
if shard_id is None:
|
||||
shard_id = 0
|
||||
self.fused_module_in_checkpoint = True
|
||||
elif isinstance(shard_id, int):
|
||||
pass
|
||||
elif isinstance(shard_id, str):
|
||||
if shard_id not in qkv_idxs:
|
||||
raise ValueError(f"Unknown shard_id: {shard_id}")
|
||||
shard_id = qkv_idxs[shard_id]
|
||||
else:
|
||||
ValueError(f"Shard id must be int or str but got {type(shard_id)}")
|
||||
|
||||
return param[shard_id], loaded_weight
|
||||
|
||||
def process_weights_after_loading(self, layer: Module) -> None:
|
||||
if (not hasattr(layer, "process_after_load")
|
||||
or not layer.process_after_load):
|
||||
@ -212,7 +189,17 @@ class Fp8LinearMethod(LinearMethodBase):
|
||||
# Loop over logical weights, requantizing with single scale.
|
||||
max_w_scale = layer.weight_scale.max()
|
||||
|
||||
if not self.fused_module_in_checkpoint:
|
||||
# QKV / MLP is fused in the on disk checkpoint if any of the
|
||||
# weight scales are still set to the default since we initialize
|
||||
# N weight scales for N shards but we only load 1 weight scale
|
||||
# from disk in this case. As a result, we skip dequant -> requant
|
||||
# since we already have quantized QKV together.
|
||||
# Sample Model with fused checkpoint:
|
||||
# * nm-testing/Phi-3-mini-128k-instruct-FP8
|
||||
unfused_module_in_checkpoint = (
|
||||
layer.weight_scale[-1] > torch.finfo(torch.float8_e4m3fn).min)
|
||||
|
||||
if unfused_module_in_checkpoint:
|
||||
start = 0
|
||||
for idx, logical_width in enumerate(layer.logical_widths):
|
||||
end = start + logical_width
|
||||
|
Reference in New Issue
Block a user