mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Guard NumPy usage using USE_NUMPY (#11798)
Summary: All usages of the `ndarray` construct have now been guarded with `USE_NUMPY`. This eliminates the requirement of NumPy while building PyTorch from source. Fixes #11757 Reviewed By: Yangqing Differential Revision: D10031862 Pulled By: SsnL fbshipit-source-id: 32d84fd770a7714d544e2ca1895a3d7c75b3d712
This commit is contained in:
committed by
Facebook Github Bot
parent
c064f8a89d
commit
39bd73ae51
@ -99,6 +99,7 @@ option(USE_NUMA "Use NUMA (only available on Linux)" ON)
|
||||
cmake_dependent_option(
|
||||
USE_NVRTC "Use NVRTC. Only available if USE_CUDA is on." OFF
|
||||
"USE_CUDA" OFF)
|
||||
option(USE_NUMPY "Use NumPy" ON)
|
||||
option(USE_OBSERVERS "Use observers module." OFF)
|
||||
option(USE_OPENCL "Use OpenCL" OFF)
|
||||
option(USE_OPENCV "Use OpenCV" ON)
|
||||
|
@ -45,6 +45,10 @@ static_assert(
|
||||
#cmakedefine CAFFE2_USE_TRT
|
||||
#cmakedefine CAFFE2_DISABLE_NUMA
|
||||
|
||||
#ifndef USE_NUMPY
|
||||
#cmakedefine USE_NUMPY
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_MPL2_ONLY
|
||||
#cmakedefine EIGEN_MPL2_ONLY
|
||||
#endif
|
||||
|
@ -84,6 +84,7 @@ static_assert(
|
||||
"We make an assumption that int is always int32 for numpy "
|
||||
"type mapping.");
|
||||
int CaffeToNumpyType(const TypeMeta& meta) {
|
||||
#ifdef USE_NUMPY
|
||||
static std::map<TypeIdentifier, int> numpy_type_map{
|
||||
{TypeMeta::Id<bool>(), NPY_BOOL},
|
||||
{TypeMeta::Id<double>(), NPY_DOUBLE},
|
||||
@ -100,9 +101,13 @@ int CaffeToNumpyType(const TypeMeta& meta) {
|
||||
};
|
||||
const auto it = numpy_type_map.find(meta.id());
|
||||
return it == numpy_type_map.end() ? -1 : it->second;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
|
||||
const TypeMeta& NumpyTypeToCaffe(int numpy_type) {
|
||||
#ifdef USE_NUMPY
|
||||
static std::map<int, TypeMeta> caffe_type_map{
|
||||
{NPY_BOOL, TypeMeta::Make<bool>()},
|
||||
{NPY_DOUBLE, TypeMeta::Make<double>()},
|
||||
@ -126,6 +131,9 @@ const TypeMeta& NumpyTypeToCaffe(int numpy_type) {
|
||||
static TypeMeta unknown_type;
|
||||
const auto it = caffe_type_map.find(numpy_type);
|
||||
return it == caffe_type_map.end() ? unknown_type : it->second;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
|
||||
template <typename Registry>
|
||||
@ -341,16 +349,19 @@ void addObjectMethods(py::module& m) {
|
||||
CAFFE_ENFORCE(ParseProtoFromLargeString(
|
||||
py::bytes(device_option).cast<std::string>(), &option));
|
||||
}
|
||||
#ifdef USE_NUMPY
|
||||
if (PyArray_Check(arg.ptr())) { // numpy array
|
||||
PyArrayObject* array =
|
||||
reinterpret_cast<PyArrayObject*>(arg.ptr());
|
||||
PyArrayObject* array
|
||||
= reinterpret_cast<PyArrayObject*>(arg.ptr());
|
||||
auto feeder = CreateFeeder(option.device_type());
|
||||
CAFFE_ENFORCE(
|
||||
feeder, "Unknown device type encountered in FeedBlob.");
|
||||
feeder->Feed(option, array, blob);
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
if (PyBytes_Check(arg.ptr()) || PyUnicode_Check(arg.ptr())) {
|
||||
*blob->GetMutable<std::string>() = arg.cast<std::string>();
|
||||
return true;
|
||||
@ -413,12 +424,16 @@ void addObjectMethods(py::module& m) {
|
||||
.def(
|
||||
"feed",
|
||||
[](TensorCPU* t, py::object obj) {
|
||||
#ifdef USE_NUMPY
|
||||
if (!PyArray_Check(obj.ptr())) {
|
||||
CAFFE_THROW(
|
||||
"Unexpected type of argument -- expected numpy array");
|
||||
}
|
||||
TensorFeeder<CPUContext>().FeedTensor(
|
||||
DeviceOption{}, reinterpret_cast<PyArrayObject*>(obj.ptr()), t);
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
},
|
||||
"Copy data from given numpy array into this tensor.")
|
||||
.def(
|
||||
@ -733,6 +748,7 @@ void addObjectMethods(py::module& m) {
|
||||
const auto& name = pair.first;
|
||||
const auto& input = pair.second;
|
||||
tensors_data.emplace(name, Tensor(CPU));
|
||||
#ifdef USE_NUMPY
|
||||
CAFFE_ENFORCE(
|
||||
PyArray_Check(input.ptr()),
|
||||
"Input must be of type numpy array.");
|
||||
@ -740,6 +756,9 @@ void addObjectMethods(py::module& m) {
|
||||
reinterpret_cast<PyArrayObject*>(input.ptr());
|
||||
TensorFeeder<CPUContext>().FeedTensor(
|
||||
DeviceOption(), array, &tensors_data.at(name));
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
caffe2::Predictor::TensorList out;
|
||||
instance.RunMap(tensors_data, &out);
|
||||
@ -757,6 +776,7 @@ void addObjectMethods(py::module& m) {
|
||||
for (auto i = 0; i < inputs.size(); ++i) {
|
||||
tensors_data.emplace_back(caffe2::CPU);
|
||||
}
|
||||
#ifdef USE_NUMPY
|
||||
for (auto i = 0; i < inputs.size(); ++i) {
|
||||
auto input = inputs[i];
|
||||
CAFFE_ENFORCE(
|
||||
@ -767,6 +787,9 @@ void addObjectMethods(py::module& m) {
|
||||
TensorFeeder<CPUContext>().FeedTensor(
|
||||
DeviceOption(), array, &(tensors_data[i]));
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
std::vector<TensorCPU> out;
|
||||
instance.Run(tensors_data, &out);
|
||||
std::vector<py::object> pyout;
|
||||
@ -866,6 +889,7 @@ void addObjectMethods(py::module& m) {
|
||||
for (auto i = 0; i < inputs.size(); ++i) {
|
||||
tensors_data.emplace_back(CPU);
|
||||
}
|
||||
#ifdef USE_NUMPY
|
||||
for (auto i = 0; i < inputs.size(); ++i) {
|
||||
auto input = inputs[i];
|
||||
CAFFE_ENFORCE(
|
||||
@ -876,6 +900,9 @@ void addObjectMethods(py::module& m) {
|
||||
TensorFeeder<CPUContext>().FeedTensor(
|
||||
DeviceOption(), array, &(tensors_data[i]));
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
std::vector<TensorCPU> out;
|
||||
instance(tensors_data, &out);
|
||||
std::vector<py::object> pyout;
|
||||
@ -889,6 +916,7 @@ void addObjectMethods(py::module& m) {
|
||||
[](Predictor& instance, std::map<std::string, py::object> inputs)
|
||||
-> std::vector<py::object> {
|
||||
Predictor::TensorMap tensors_data;
|
||||
#ifdef USE_NUMPY
|
||||
for (const auto pair : inputs) {
|
||||
const auto& name = pair.first;
|
||||
const auto& input = pair.second;
|
||||
@ -901,6 +929,9 @@ void addObjectMethods(py::module& m) {
|
||||
TensorFeeder<CPUContext>().FeedTensor(
|
||||
DeviceOption(), array, &tensors_data.at(name));
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
Predictor::TensorList out;
|
||||
instance(tensors_data, &out);
|
||||
std::vector<py::object> pyout;
|
||||
@ -1423,6 +1454,7 @@ void addGlobalMethods(py::module& m) {
|
||||
py::bytes(device_option).cast<std::string>(), &option));
|
||||
}
|
||||
auto* blob = gWorkspace->CreateBlob(name);
|
||||
#ifdef USE_NUMPY
|
||||
if (PyArray_Check(arg.ptr())) { // numpy array
|
||||
PyArrayObject* array = reinterpret_cast<PyArrayObject*>(arg.ptr());
|
||||
auto feeder = CreateFeeder(option.device_type());
|
||||
@ -1433,6 +1465,9 @@ void addGlobalMethods(py::module& m) {
|
||||
feeder->Feed(option, array, blob);
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
if (PyBytes_Check(arg.ptr()) || PyUnicode_Check(arg.ptr())) { // string
|
||||
*blob->GetMutable<std::string>() = arg.cast<std::string>();
|
||||
return true;
|
||||
@ -1748,10 +1783,12 @@ void addGlobalMethods(py::module& m) {
|
||||
|
||||
auto initialize = [&]() {
|
||||
// Initialization of the module
|
||||
#ifdef USE_NUMPY
|
||||
([]() -> void {
|
||||
// import_array1() forces a void return value.
|
||||
import_array1();
|
||||
})();
|
||||
#endif // USE_NUMPY
|
||||
// Single threaded, so safe
|
||||
static bool initialized = false;
|
||||
if (initialized) {
|
||||
|
@ -19,6 +19,9 @@
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
#include <Python.h>
|
||||
|
||||
#ifdef USE_NUMPY
|
||||
|
||||
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
||||
#define PY_ARRAY_UNIQUE_SYMBOL caffe2_python_ARRAY_API
|
||||
#include <numpy/arrayobject.h>
|
||||
@ -30,6 +33,12 @@
|
||||
#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x))
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
struct PyArrayObject; // Forward declaring PyArrayObject for safety
|
||||
|
||||
#endif // USE_NUMPY
|
||||
|
||||
namespace caffe2 {
|
||||
namespace python {
|
||||
|
||||
@ -100,11 +109,16 @@ class TensorFetcher : public BlobFetcherBase {
|
||||
// Checks whether the data with type `meta` needs to be copied in the context
|
||||
// of `tensor`
|
||||
bool NeedsCopy(const Tensor* tensor, const TypeMeta& meta) const {
|
||||
#ifdef USE_NUMPY
|
||||
return tensor->GetStaticContext() != GetCPUStaticContext() ||
|
||||
CaffeToNumpyType(meta) == NPY_OBJECT;
|
||||
#else
|
||||
return tensor->GetStaticContext() != GetCPUStaticContext();
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
|
||||
FetchedBlob FetchTensor(const Tensor& tensor, bool force_copy) {
|
||||
#ifdef USE_NUMPY
|
||||
FetchedBlob result;
|
||||
CAFFE_ENFORCE_GE(tensor.size(), 0, "Trying to fetch uninitialized tensor");
|
||||
const int numpy_type = CaffeToNumpyType(tensor.meta());
|
||||
@ -153,6 +167,9 @@ class TensorFetcher : public BlobFetcherBase {
|
||||
context->FinishDeviceComputation();
|
||||
}
|
||||
return result;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
};
|
||||
|
||||
@ -163,6 +180,7 @@ class TensorFeeder : public BlobFeederBase {
|
||||
const DeviceOption& option,
|
||||
PyArrayObject* original_array,
|
||||
Tensor* tensor) {
|
||||
#ifdef USE_NUMPY
|
||||
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
|
||||
auto g = MakeGuard([&]() { Py_XDECREF(array); });
|
||||
|
||||
@ -227,6 +245,9 @@ class TensorFeeder : public BlobFeederBase {
|
||||
tensor->raw_mutable_data(meta));
|
||||
}
|
||||
context.FinishDeviceComputation();
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
|
||||
virtual void
|
||||
|
@ -53,6 +53,7 @@ public:
|
||||
}
|
||||
|
||||
FetchedBlob FetchTensor(const itensor &atensor, bool force_copy) {
|
||||
#ifdef USE_NUMPY
|
||||
FetchedBlob result;
|
||||
CAFFE_ENFORCE(atensor.materialized(),
|
||||
"Trying to fetch uninitialized tensor");
|
||||
@ -86,6 +87,9 @@ public:
|
||||
}
|
||||
|
||||
return result;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
};
|
||||
|
||||
@ -108,6 +112,7 @@ public:
|
||||
const DeviceOption &option,
|
||||
PyArrayObject *original_array,
|
||||
itensor *tensor) {
|
||||
#ifdef USE_NUMPY
|
||||
PyArrayObject *array = PyArray_GETCONTIGUOUS(original_array);
|
||||
auto g = MakeGuard([&]() { Py_XDECREF(array); });
|
||||
const auto npy_type = PyArray_TYPE(array);
|
||||
@ -139,17 +144,25 @@ public:
|
||||
tensor->reorder_from(adims, type,
|
||||
static_cast<void *>(PyArray_DATA(array)));
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
|
||||
bool ZeroDim(PyArrayObject *array) {
|
||||
#ifdef USE_NUMPY
|
||||
int ndim = PyArray_NDIM(array);
|
||||
npy_intp *npy_dims = PyArray_DIMS(array);
|
||||
return ndim == 0 ||
|
||||
std::find(npy_dims, npy_dims + ndim, 0) != npy_dims + ndim;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif
|
||||
}
|
||||
|
||||
void Feed(const DeviceOption &option, PyArrayObject *original_array,
|
||||
Blob *blob) {
|
||||
#ifdef USE_NUMPY
|
||||
try {
|
||||
PyArrayObject *array = PyArray_GETCONTIGUOUS(original_array);
|
||||
auto g = MakeGuard([&]() { Py_XDECREF(array); });
|
||||
@ -170,6 +183,9 @@ public:
|
||||
LOG(ERROR) << "IDEEP error: " << e.message;
|
||||
throw;
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -32,6 +32,7 @@ namespace python {
|
||||
class Int8TensorFetcher : public BlobFetcherBase {
|
||||
public:
|
||||
pybind11::object Fetch(const Blob& blob) override {
|
||||
#ifdef USE_NUMPY
|
||||
const caffe2::int8::Int8TensorCPU& src =
|
||||
blob.template Get<caffe2::int8::Int8TensorCPU>();
|
||||
const int numpy_type = CaffeToNumpyType(src.t.meta());
|
||||
@ -51,6 +52,9 @@ class Int8TensorFetcher : public BlobFetcherBase {
|
||||
auto result = pybind11::cast<pybind11::object>(
|
||||
pybind11::make_tuple(data_array, src.scale, src.zero_point));
|
||||
return result;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -23,6 +23,7 @@ template <typename T>
|
||||
class MKLMemoryFetcher : public BlobFetcherBase {
|
||||
public:
|
||||
pybind11::object Fetch(const Blob& blob) override {
|
||||
#ifdef USE_NUMPY
|
||||
const MKLMemory<T>& src = blob.Get<MKLMemory<T>>();
|
||||
CAFFE_ENFORCE(src.buffer(), "Trying to fetch uninitialized tensor");
|
||||
const int numpy_type = CaffeToNumpyType(TypeMeta::Make<T>());
|
||||
@ -40,6 +41,9 @@ class MKLMemoryFetcher : public BlobFetcherBase {
|
||||
PyArray_DATA(reinterpret_cast<PyArrayObject*>(result.ptr())));
|
||||
src.CopyTo(ptr);
|
||||
return result;
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
};
|
||||
|
||||
@ -47,6 +51,7 @@ class MKLMemoryFeeder : public BlobFeederBase {
|
||||
public:
|
||||
void Feed(const DeviceOption&, PyArrayObject* original_array, Blob* blob)
|
||||
override {
|
||||
#ifdef USE_NUMPY
|
||||
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
|
||||
auto g = MakeGuard([&]() { Py_XDECREF(array); });
|
||||
|
||||
@ -63,10 +68,14 @@ class MKLMemoryFeeder : public BlobFeederBase {
|
||||
PyArray_TYPE(array),
|
||||
". Only float and double are supported by MKLDNN.");
|
||||
}
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void FeedMKL(PyArrayObject* array, Blob* blob) {
|
||||
#ifdef USE_NUMPY
|
||||
// numpy requires long int as its dims.
|
||||
int ndim = PyArray_NDIM(array);
|
||||
npy_intp* npy_dims = PyArray_DIMS(array);
|
||||
@ -83,6 +92,9 @@ class MKLMemoryFeeder : public BlobFeederBase {
|
||||
}
|
||||
blob->GetMutable<MKLMemory<T>>()->CopyFrom(
|
||||
static_cast<const void*>(PyArray_DATA(array)));
|
||||
#else
|
||||
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
|
||||
#endif // USE_NUMPY
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -382,9 +382,14 @@ if(BUILD_PYTHON)
|
||||
set(Python_ADDITIONAL_VERSIONS 3.7 3.6 3.5 2.8 2.7 2.6)
|
||||
find_package(PythonInterp 2.7)
|
||||
find_package(PythonLibs 2.7)
|
||||
find_package(NumPy REQUIRED)
|
||||
if(PYTHONINTERP_FOUND AND PYTHONLIBS_FOUND AND NUMPY_FOUND)
|
||||
include_directories(SYSTEM ${PYTHON_INCLUDE_DIR} ${NUMPY_INCLUDE_DIR})
|
||||
find_package(NumPy)
|
||||
if(PYTHONINTERP_FOUND AND PYTHONLIBS_FOUND)
|
||||
include_directories(SYSTEM ${PYTHON_INCLUDE_DIR})
|
||||
caffe2_update_option(USE_NUMPY OFF)
|
||||
if(NUMPY_FOUND)
|
||||
caffe2_update_option(USE_NUMPY ON)
|
||||
include_directories(SYSTEM ${NUMPY_INCLUDE_DIR})
|
||||
endif()
|
||||
# Observers are required in the python build
|
||||
caffe2_update_option(USE_OBSERVERS ON)
|
||||
else()
|
||||
|
@ -109,6 +109,7 @@ function (caffe2_print_configuration_summary)
|
||||
message(STATUS " NERVANA_GPU version : ${NERVANA_GPU_VERSION}")
|
||||
endif()
|
||||
message(STATUS " USE_NNPACK : ${USE_NNPACK}")
|
||||
message(STATUS " USE_NUMPY : ${USE_NUMPY}")
|
||||
message(STATUS " USE_OBSERVERS : ${USE_OBSERVERS}")
|
||||
message(STATUS " USE_OPENCL : ${USE_OPENCL}")
|
||||
message(STATUS " USE_OPENCV : ${USE_OPENCV}")
|
||||
|
@ -184,6 +184,7 @@ goto:eof
|
||||
-DBUILD_CAFFE2_OPS=%BUILD_CAFFE2_OPS% ^
|
||||
-DONNX_NAMESPACE=%ONNX_NAMESPACE% ^
|
||||
-DUSE_CUDA=%USE_CUDA% ^
|
||||
-DUSE_NUMPY=%USE_NUMPY% ^
|
||||
-DUSE_CUDNN=OFF ^
|
||||
-DUSE_NNPACK=%USE_NNPACK% ^
|
||||
-DUSE_LEVELDB=%USE_LEVELDB% ^
|
||||
|
@ -284,6 +284,7 @@ function build_caffe2() {
|
||||
-DBUILD_CAFFE2_OPS=$BUILD_CAFFE2_OPS \
|
||||
-DONNX_NAMESPACE=$ONNX_NAMESPACE \
|
||||
-DUSE_CUDA=$USE_CUDA \
|
||||
-DUSE_NUMPY=$USE_NUMPY \
|
||||
-DCAFFE2_STATIC_LINK_CUDA=$CAFFE2_STATIC_LINK_CUDA \
|
||||
-DUSE_ROCM=$USE_ROCM \
|
||||
-DUSE_NNPACK=$USE_NNPACK \
|
||||
|
Reference in New Issue
Block a user