mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Per title Pull Request resolved: https://github.com/pytorch/pytorch/pull/161238 Approved by: https://github.com/kwen2501, https://github.com/syed-ahmed
28 lines
1.0 KiB
C++
28 lines
1.0 KiB
C++
#include <torch/csrc/python_headers.h>
|
|
|
|
#include <torch/csrc/jit/python/pybind_utils.h>
|
|
#include <torch/csrc/utils/device_lazy_init.h>
|
|
#include <torch/csrc/utils/pybind.h>
|
|
|
|
#include <c10/cuda/CUDACachingAllocator.h>
|
|
|
|
template <typename T>
|
|
using shared_ptr_class_ = py::class_<T, std::shared_ptr<T>>;
|
|
|
|
// NOLINTNEXTLINE(misc-use-internal-linkage)
|
|
void THCPMemPool_init(PyObject* module) {
|
|
auto torch_C_m = py::handle(module).cast<py::module>();
|
|
shared_ptr_class_<::c10::cuda::MemPool>(torch_C_m, "_MemPool")
|
|
.def(
|
|
py::init([](c10::cuda::CUDACachingAllocator::CUDAAllocator* allocator,
|
|
bool is_user_created,
|
|
bool use_on_oom) {
|
|
torch::utils::device_lazy_init(at::kCUDA);
|
|
return std::make_shared<::c10::cuda::MemPool>(
|
|
allocator, is_user_created, use_on_oom);
|
|
}))
|
|
.def_property_readonly("id", &::c10::cuda::MemPool::id)
|
|
.def_property_readonly("allocator", &::c10::cuda::MemPool::allocator)
|
|
.def("use_count", &::c10::cuda::MemPool::use_count);
|
|
}
|