From e3c12bf6d22999cfbe267a7c788f6875340616cd Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Wed, 12 Jun 2024 12:03:24 -0500 Subject: [PATCH] Revert "[CI/Build] Add `is_quant_method_supported` to control quantization test configurations" (#5463) --- tests/models/test_aqlm.py | 13 +++++++++++-- tests/models/test_fp8.py | 12 ++++++++++-- tests/models/test_gptq_marlin.py | 13 +++++++++++-- tests/models/test_gptq_marlin_24.py | 13 +++++++++++-- tests/models/test_marlin.py | 13 +++++++++++-- tests/quantization/test_bitsandbytes.py | 10 +++++++--- tests/quantization/test_fp8.py | 10 +++++++--- tests/quantization/utils.py | 14 -------------- 8 files changed, 68 insertions(+), 30 deletions(-) delete mode 100644 tests/quantization/utils.py diff --git a/tests/models/test_aqlm.py b/tests/models/test_aqlm.py index 80034a51188..c4ecf846e63 100644 --- a/tests/models/test_aqlm.py +++ b/tests/models/test_aqlm.py @@ -4,8 +4,17 @@ Run `pytest tests/models/test_aqlm.py`. """ import pytest +import torch -from tests.quantization.utils import is_quant_method_supported +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +aqlm_not_supported = True + +if torch.cuda.is_available(): + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + aqlm_not_supported = (capability < + QUANTIZATION_METHODS["aqlm"].get_min_capability()) # In this test we hardcode prompts and generations for the model so we don't # need to require the AQLM package as a dependency @@ -58,7 +67,7 @@ ground_truth_generations = [ ] -@pytest.mark.skipif(not is_quant_method_supported("aqlm"), +@pytest.mark.skipif(aqlm_not_supported, reason="AQLM is not supported on this GPU type.") @pytest.mark.parametrize("model", ["ISTA-DASLab/Llama-2-7b-AQLM-2Bit-1x16-hf"]) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/models/test_fp8.py b/tests/models/test_fp8.py index b24c17cf38f..61aee0d0a6e 100644 --- a/tests/models/test_fp8.py +++ b/tests/models/test_fp8.py @@ -8,8 +8,8 @@ import pytest import torch from transformers import AutoTokenizer -from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS os.environ["TOKENIZERS_PARALLELISM"] = "true" @@ -67,8 +67,16 @@ EXPECTED_STRS_MAP = { }, } +fp8_not_supported = True -@pytest.mark.skipif(not is_quant_method_supported("fp8"), +if torch.cuda.is_available(): + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + fp8_not_supported = (capability < + QUANTIZATION_METHODS["fp8"].get_min_capability()) + + +@pytest.mark.skipif(fp8_not_supported, reason="fp8 is not supported on this GPU type.") @pytest.mark.parametrize("model_name", MODELS) @pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"]) diff --git a/tests/models/test_gptq_marlin.py b/tests/models/test_gptq_marlin.py index e30100d9bf5..e957450cce9 100644 --- a/tests/models/test_gptq_marlin.py +++ b/tests/models/test_gptq_marlin.py @@ -11,8 +11,9 @@ Run `pytest tests/models/test_gptq_marlin.py`. import os import pytest +import torch -from tests.quantization.utils import is_quant_method_supported +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS from vllm.model_executor.layers.rotary_embedding import _ROPE_DICT from .utils import check_logprobs_close @@ -21,6 +22,14 @@ os.environ["TOKENIZERS_PARALLELISM"] = "true" MAX_MODEL_LEN = 1024 +gptq_marlin_not_supported = True + +if torch.cuda.is_available(): + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + gptq_marlin_not_supported = ( + capability < QUANTIZATION_METHODS["gptq_marlin"].get_min_capability()) + MODELS = [ # act_order==False, group_size=channelwise ("robertgshaw2/zephyr-7b-beta-channelwise-gptq", "main"), @@ -44,7 +53,7 @@ MODELS = [ @pytest.mark.flaky(reruns=3) -@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"), +@pytest.mark.skipif(gptq_marlin_not_supported, reason="gptq_marlin is not supported on this GPU type.") @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half", "bfloat16"]) diff --git a/tests/models/test_gptq_marlin_24.py b/tests/models/test_gptq_marlin_24.py index 60d9ae2f1c6..195c3e5b586 100644 --- a/tests/models/test_gptq_marlin_24.py +++ b/tests/models/test_gptq_marlin_24.py @@ -9,9 +9,18 @@ Run `pytest tests/models/test_marlin_24.py`. from dataclasses import dataclass import pytest +import torch from tests.models.utils import check_logprobs_close -from tests.quantization.utils import is_quant_method_supported +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +marlin_not_supported = True + +if torch.cuda.is_available(): + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + marlin_not_supported = ( + capability < QUANTIZATION_METHODS["marlin"].get_min_capability()) @dataclass @@ -38,7 +47,7 @@ model_pairs = [ @pytest.mark.flaky(reruns=2) -@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin_24"), +@pytest.mark.skipif(marlin_not_supported, reason="Marlin24 is not supported on this GPU type.") @pytest.mark.parametrize("model_pair", model_pairs) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/models/test_marlin.py b/tests/models/test_marlin.py index e86f6e29d15..761ba6aa4d5 100644 --- a/tests/models/test_marlin.py +++ b/tests/models/test_marlin.py @@ -13,11 +13,20 @@ Run `pytest tests/models/test_marlin.py`. from dataclasses import dataclass import pytest +import torch -from tests.quantization.utils import is_quant_method_supported +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS from .utils import check_logprobs_close +marlin_not_supported = True + +if torch.cuda.is_available(): + capability = torch.cuda.get_device_capability() + capability = capability[0] * 10 + capability[1] + marlin_not_supported = ( + capability < QUANTIZATION_METHODS["marlin"].get_min_capability()) + @dataclass class ModelPair: @@ -36,7 +45,7 @@ model_pairs = [ @pytest.mark.flaky(reruns=2) -@pytest.mark.skipif(not is_quant_method_supported("marlin"), +@pytest.mark.skipif(marlin_not_supported, reason="Marlin is not supported on this GPU type.") @pytest.mark.parametrize("model_pair", model_pairs) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/quantization/test_bitsandbytes.py b/tests/quantization/test_bitsandbytes.py index 953fd9ba939..31e938d15a1 100644 --- a/tests/quantization/test_bitsandbytes.py +++ b/tests/quantization/test_bitsandbytes.py @@ -5,12 +5,16 @@ Run `pytest tests/quantization/test_bitsandbytes.py`. import pytest import torch -from tests.quantization.utils import is_quant_method_supported from vllm import SamplingParams +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS + +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] -@pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"), - reason='bitsandbytes is not supported on this GPU type.') +@pytest.mark.skipif( + capability < QUANTIZATION_METHODS['bitsandbytes'].get_min_capability(), + reason='bitsandbytes is not supported on this GPU type.') def test_load_bnb_model(vllm_runner) -> None: with vllm_runner('huggyllama/llama-7b', quantization='bitsandbytes', diff --git a/tests/quantization/test_fp8.py b/tests/quantization/test_fp8.py index 3db12f3798b..fccce7f7b59 100644 --- a/tests/quantization/test_fp8.py +++ b/tests/quantization/test_fp8.py @@ -5,12 +5,16 @@ Run `pytest tests/quantization/test_fp8.py --forked`. import pytest import torch -from tests.quantization.utils import is_quant_method_supported +from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS from vllm.model_executor.layers.quantization.fp8 import Fp8LinearMethod +capability = torch.cuda.get_device_capability() +capability = capability[0] * 10 + capability[1] -@pytest.mark.skipif(not is_quant_method_supported("fp8"), - reason="FP8 is not supported on this GPU type.") + +@pytest.mark.skipif( + capability < QUANTIZATION_METHODS["fp8"].get_min_capability(), + reason="FP8 is not supported on this GPU type.") def test_load_fp16_model(vllm_runner) -> None: with vllm_runner("facebook/opt-125m", quantization="fp8") as llm: diff --git a/tests/quantization/utils.py b/tests/quantization/utils.py deleted file mode 100644 index 0c92d565d0d..00000000000 --- a/tests/quantization/utils.py +++ /dev/null @@ -1,14 +0,0 @@ -import torch - -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS - - -def is_quant_method_supported(quant_method: str) -> bool: - # Currently, all quantization methods require Nvidia or AMD GPUs - if not torch.cuda.is_available(): - return False - - capability = torch.cuda.get_device_capability() - capability = capability[0] * 10 + capability[1] - return (capability < - QUANTIZATION_METHODS[quant_method].get_min_capability())