mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
Summary: This is to fix #16141 and similar issues. The idea is to track a reference to every shared CUDA Storage and deallocate memory only after a consumer process deallocates received Storage. ezyang Done with cleanup. Same (insignificantly better) performance as in file-per-share solution, but handles millions of shared tensors easily. Note [ ] documentation in progress. Pull Request resolved: https://github.com/pytorch/pytorch/pull/16854 Differential Revision: D13994490 Pulled By: VitalyFedyunin fbshipit-source-id: 565148ec3ac4fafb32d37fde0486b325bed6fbd1
22 lines
618 B
C++
22 lines
618 B
C++
#define __STDC_FORMAT_MACROS
|
|
|
|
#include <torch/csrc/python_headers.h>
|
|
#include <structmember.h>
|
|
|
|
// See Note [TH abstraction violation]
|
|
// - Used to get at allocator from storage
|
|
#include <TH/THTensor.hpp>
|
|
#include <THC/THCTensor.hpp>
|
|
#include <torch/csrc/cuda/THCP.h>
|
|
|
|
#include <torch/csrc/cuda/override_macros.h>
|
|
#include <torch/csrc/copy_utils.h>
|
|
#include <torch/csrc/DynamicTypes.h>
|
|
#include <torch/csrc/CudaIPCTypes.h>
|
|
|
|
#define THC_GENERIC_FILE "torch/csrc/generic/Storage.cpp"
|
|
#include <THC/THCGenerateAllTypes.h>
|
|
|
|
#define THC_GENERIC_FILE "torch/csrc/generic/Storage.cpp"
|
|
#include <THC/THCGenerateBoolType.h>
|