mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Bugfix] Add strong reference to CUDA pluggable allocator callbacks (#23477)
Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Eric Marcus <eric.marcus@kaiko.ai> Co-authored-by: youkaichao <youkaichao@gmail.com>
This commit is contained in:
@ -152,8 +152,13 @@ class CuMemAllocator:
|
||||
self.pointer_to_data: dict[int, AllocationData] = {}
|
||||
self.current_tag: str = CuMemAllocator.default_tag
|
||||
self.allocator_and_pools: dict[str, Any] = {}
|
||||
# Creating strong references to the two callbacks here to prevent
|
||||
# these ephemeral bound-method objects being garbage collected.
|
||||
# See discussions in https://github.com/vllm-project/vllm/pull/22724
|
||||
self.python_malloc_callback = self._python_malloc_callback
|
||||
self.python_free_callback = self._python_free_callback
|
||||
|
||||
def python_malloc_callback(self, allocation_handle: HandleType) -> None:
|
||||
def _python_malloc_callback(self, allocation_handle: HandleType) -> None:
|
||||
"""
|
||||
Internal method to store the allocation data
|
||||
when memory is allocated in the memory pool."""
|
||||
@ -162,7 +167,7 @@ class CuMemAllocator:
|
||||
allocation_handle, self.current_tag)
|
||||
return
|
||||
|
||||
def python_free_callback(self, ptr: int) -> HandleType:
|
||||
def _python_free_callback(self, ptr: int) -> HandleType:
|
||||
"""
|
||||
Internal method to look up the allocation data
|
||||
when memory is freed in the memory pool."""
|
||||
@ -212,9 +217,9 @@ class CuMemAllocator:
|
||||
def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
||||
"""
|
||||
Wake up the allocator from sleep mode.
|
||||
All data that is previously offloaded will be loaded back to GPU
|
||||
All data that is previously offloaded will be loaded back to GPU
|
||||
memory, and the rest of the data will have empty memory.
|
||||
|
||||
|
||||
:param tags: The tags of the memory allocation that will be loaded
|
||||
back to GPU memory. If None, all memory allocation will be loaded
|
||||
back to GPU memory.
|
||||
|
Reference in New Issue
Block a user