mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Bug] Temporally Disable VLLM_ALLREDUCE_USE_SYMM_MEM
by Default (#26925)
Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
@ -198,7 +198,7 @@ if TYPE_CHECKING:
|
||||
VLLM_USE_FLASHINFER_MOE_MXFP4_BF16: bool = False
|
||||
VLLM_ROCM_FP8_MFMA_PAGE_ATTN: bool = False
|
||||
VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS: bool = False
|
||||
VLLM_ALLREDUCE_USE_SYMM_MEM: bool = True
|
||||
VLLM_ALLREDUCE_USE_SYMM_MEM: bool = False
|
||||
VLLM_TUNED_CONFIG_FOLDER: str | None = None
|
||||
VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS: bool = False
|
||||
VLLM_CUSTOM_SCOPES_FOR_PROFILING: bool = False
|
||||
@ -1343,7 +1343,7 @@ environment_variables: dict[str, Callable[[], Any]] = {
|
||||
),
|
||||
# Whether to use pytorch symmetric memory for allreduce
|
||||
"VLLM_ALLREDUCE_USE_SYMM_MEM": lambda: bool(
|
||||
int(os.getenv("VLLM_ALLREDUCE_USE_SYMM_MEM", "1"))
|
||||
int(os.getenv("VLLM_ALLREDUCE_USE_SYMM_MEM", "0"))
|
||||
),
|
||||
# Allows vllm to find tuned config under customized folder
|
||||
"VLLM_TUNED_CONFIG_FOLDER": lambda: os.getenv("VLLM_TUNED_CONFIG_FOLDER", None),
|
||||
|
Reference in New Issue
Block a user