mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
Revert "[CI/Build] Add is_quant_method_supported
to control quantization test configurations" (#5463)
This commit is contained in:
@ -4,8 +4,17 @@ Run `pytest tests/models/test_aqlm.py`.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
|
||||
aqlm_not_supported = True
|
||||
|
||||
if torch.cuda.is_available():
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
aqlm_not_supported = (capability <
|
||||
QUANTIZATION_METHODS["aqlm"].get_min_capability())
|
||||
|
||||
# In this test we hardcode prompts and generations for the model so we don't
|
||||
# need to require the AQLM package as a dependency
|
||||
@ -58,7 +67,7 @@ ground_truth_generations = [
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("aqlm"),
|
||||
@pytest.mark.skipif(aqlm_not_supported,
|
||||
reason="AQLM is not supported on this GPU type.")
|
||||
@pytest.mark.parametrize("model", ["ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf"])
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
|
@ -8,8 +8,8 @@ import pytest
|
||||
import torch
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm import LLM, SamplingParams
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
||||
|
||||
@ -67,8 +67,16 @@ EXPECTED_STRS_MAP = {
|
||||
},
|
||||
}
|
||||
|
||||
fp8_not_supported = True
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
||||
if torch.cuda.is_available():
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
fp8_not_supported = (capability <
|
||||
QUANTIZATION_METHODS["fp8"].get_min_capability())
|
||||
|
||||
|
||||
@pytest.mark.skipif(fp8_not_supported,
|
||||
reason="fp8 is not supported on this GPU type.")
|
||||
@pytest.mark.parametrize("model_name", MODELS)
|
||||
@pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"])
|
||||
|
@ -11,8 +11,9 @@ Run `pytest tests/models/test_gptq_marlin.py`.
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
from vllm.model_executor.layers.rotary_embedding import _ROPE_DICT
|
||||
|
||||
from .utils import check_logprobs_close
|
||||
@ -21,6 +22,14 @@ os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
||||
|
||||
MAX_MODEL_LEN = 1024
|
||||
|
||||
gptq_marlin_not_supported = True
|
||||
|
||||
if torch.cuda.is_available():
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
gptq_marlin_not_supported = (
|
||||
capability < QUANTIZATION_METHODS["gptq_marlin"].get_min_capability())
|
||||
|
||||
MODELS = [
|
||||
# act_order==False, group_size=channelwise
|
||||
("robertgshaw2/zephyr-7b-beta-channelwise-gptq", "main"),
|
||||
@ -44,7 +53,7 @@ MODELS = [
|
||||
|
||||
|
||||
@pytest.mark.flaky(reruns=3)
|
||||
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
|
||||
@pytest.mark.skipif(gptq_marlin_not_supported,
|
||||
reason="gptq_marlin is not supported on this GPU type.")
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
@pytest.mark.parametrize("dtype", ["half", "bfloat16"])
|
||||
|
@ -9,9 +9,18 @@ Run `pytest tests/models/test_marlin_24.py`.
|
||||
from dataclasses import dataclass
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.models.utils import check_logprobs_close
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
|
||||
marlin_not_supported = True
|
||||
|
||||
if torch.cuda.is_available():
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
marlin_not_supported = (
|
||||
capability < QUANTIZATION_METHODS["marlin"].get_min_capability())
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -38,7 +47,7 @@ model_pairs = [
|
||||
|
||||
|
||||
@pytest.mark.flaky(reruns=2)
|
||||
@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin_24"),
|
||||
@pytest.mark.skipif(marlin_not_supported,
|
||||
reason="Marlin24 is not supported on this GPU type.")
|
||||
@pytest.mark.parametrize("model_pair", model_pairs)
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
|
@ -13,11 +13,20 @@ Run `pytest tests/models/test_marlin.py`.
|
||||
from dataclasses import dataclass
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
|
||||
from .utils import check_logprobs_close
|
||||
|
||||
marlin_not_supported = True
|
||||
|
||||
if torch.cuda.is_available():
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
marlin_not_supported = (
|
||||
capability < QUANTIZATION_METHODS["marlin"].get_min_capability())
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelPair:
|
||||
@ -36,7 +45,7 @@ model_pairs = [
|
||||
|
||||
|
||||
@pytest.mark.flaky(reruns=2)
|
||||
@pytest.mark.skipif(not is_quant_method_supported("marlin"),
|
||||
@pytest.mark.skipif(marlin_not_supported,
|
||||
reason="Marlin is not supported on this GPU type.")
|
||||
@pytest.mark.parametrize("model_pair", model_pairs)
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
|
@ -5,11 +5,15 @@ Run `pytest tests/quantization/test_bitsandbytes.py`.
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm import SamplingParams
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"),
|
||||
@pytest.mark.skipif(
|
||||
capability < QUANTIZATION_METHODS['bitsandbytes'].get_min_capability(),
|
||||
reason='bitsandbytes is not supported on this GPU type.')
|
||||
def test_load_bnb_model(vllm_runner) -> None:
|
||||
with vllm_runner('huggyllama/llama-7b',
|
||||
|
@ -5,11 +5,15 @@ Run `pytest tests/quantization/test_fp8.py --forked`.
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.quantization.utils import is_quant_method_supported
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
from vllm.model_executor.layers.quantization.fp8 import Fp8LinearMethod
|
||||
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
|
||||
@pytest.mark.skipif(not is_quant_method_supported("fp8"),
|
||||
|
||||
@pytest.mark.skipif(
|
||||
capability < QUANTIZATION_METHODS["fp8"].get_min_capability(),
|
||||
reason="FP8 is not supported on this GPU type.")
|
||||
def test_load_fp16_model(vllm_runner) -> None:
|
||||
with vllm_runner("facebook/opt-125m", quantization="fp8") as llm:
|
||||
|
@ -1,14 +0,0 @@
|
||||
import torch
|
||||
|
||||
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
|
||||
|
||||
|
||||
def is_quant_method_supported(quant_method: str) -> bool:
|
||||
# Currently, all quantization methods require Nvidia or AMD GPUs
|
||||
if not torch.cuda.is_available():
|
||||
return False
|
||||
|
||||
capability = torch.cuda.get_device_capability()
|
||||
capability = capability[0] * 10 + capability[1]
|
||||
return (capability <
|
||||
QUANTIZATION_METHODS[quant_method].get_min_capability())
|
Reference in New Issue
Block a user