[Misc] local import code clean (#23420)

Signed-off-by: Andy Xie <andy.xning@gmail.com>
This commit is contained in:
Ning Xie
2025-08-22 22:01:35 +08:00
committed by GitHub
parent a073be6d87
commit 325aa3dee9
2 changed files with 1 additions and 3 deletions

View File

@ -292,7 +292,6 @@ class Worker(WorkerBase):
allocator = CuMemAllocator.get_instance() allocator = CuMemAllocator.get_instance()
context = allocator.use_memory_pool(tag="kv_cache") context = allocator.use_memory_pool(tag="kv_cache")
else: else:
from contextlib import nullcontext
context = nullcontext() context = nullcontext()
with context: with context:
self.model_runner.initialize_kv_cache(kv_cache_config) self.model_runner.initialize_kv_cache(kv_cache_config)

View File

@ -3,6 +3,7 @@
"""A GPU worker class.""" """A GPU worker class."""
import gc import gc
import os import os
from contextlib import nullcontext
from typing import Dict, List, Optional, Set, Tuple, Type, Union from typing import Dict, List, Optional, Set, Tuple, Type, Union
import torch import torch
@ -206,7 +207,6 @@ class Worker(LocalOrDistributedWorkerBase):
"used for one instance per process.") "used for one instance per process.")
context = allocator.use_memory_pool(tag="weights") context = allocator.use_memory_pool(tag="weights")
else: else:
from contextlib import nullcontext
context = nullcontext() context = nullcontext()
with context: with context:
self.model_runner.load_model() self.model_runner.load_model()
@ -330,7 +330,6 @@ class Worker(LocalOrDistributedWorkerBase):
allocator = CuMemAllocator.get_instance() allocator = CuMemAllocator.get_instance()
context = allocator.use_memory_pool(tag="kv_cache") context = allocator.use_memory_pool(tag="kv_cache")
else: else:
from contextlib import nullcontext
context = nullcontext() context = nullcontext()
with context: with context:
self._init_cache_engine() self._init_cache_engine()