Files
pytorch/torch/csrc/utils/init.cpp
James Reed 6ba60ec9b0 Add flag to temporarily disable MKL-DNN conv (#23837)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/23837

This is a temporary workaround to an issue in MKL-DNN's Convolution backwards implementation: https://github.com/pytorch/pytorch/issues/23825

It is only used to enable testing quantization

Test Plan: Imported from OSS

Differential Revision: D16659081

Pulled By: jamesr66a

fbshipit-source-id: de18ebe98dec2a042f28b23373e20da2b44a42a2
2019-08-06 11:20:26 -07:00

61 lines
2.3 KiB
C++

#include <ATen/core/ivalue.h>
#include <torch/csrc/utils/init.h>
#include <torch/csrc/utils/throughput_benchmark.h>
#include <ATen/native/Convolution.h>
#include <pybind11/functional.h>
namespace torch {
namespace throughput_benchmark {
void initThroughputBenchmarkBindings(PyObject* module) {
auto m = py::handle(module).cast<py::module>();
using namespace torch::throughput_benchmark;
py::class_<BenchmarkConfig>(m, "BenchmarkConfig")
.def(py::init<>())
.def_readwrite(
"num_calling_threads", &BenchmarkConfig::num_calling_threads)
.def_readwrite("num_worker_threads", &BenchmarkConfig::num_worker_threads)
.def_readwrite("num_warmup_iters", &BenchmarkConfig::num_warmup_iters)
.def_readwrite("num_iters", &BenchmarkConfig::num_iters);
py::class_<BenchmarkExecutionStats>(m, "BenchmarkExecutionStats")
.def_readonly("latency_avg_ms", &BenchmarkExecutionStats::latency_avg_ms)
.def_readonly("num_iters", &BenchmarkExecutionStats::num_iters);
py::class_<ThroughputBenchmark>(m, "ThroughputBenchmark", py::dynamic_attr())
.def(py::init<jit::script::Module>())
.def(py::init<py::object>())
.def(
"add_input",
[](ThroughputBenchmark& self, py::args args, py::kwargs kwargs) {
self.addInput(std::move(args), std::move(kwargs));
})
.def(
"run_once",
[](ThroughputBenchmark& self, py::args args, py::kwargs kwargs) {
// Depending on this being ScriptModule of nn.Module we will release
// the GIL or not further down in the stack
return self.runOnce(std::move(args), std::move(kwargs));
})
.def("benchmark", [](ThroughputBenchmark& self, BenchmarkConfig config) {
// The benchmark always runs without the GIL. GIL will be used where
// needed. This will happen only in the nn.Module mode when manipulating
// inputs and running actual inference
AutoNoGIL no_gil_guard;
return self.benchmark(config);
});
m.def("_enable_mkldnn_conv", []() {
at::native::disable_mkldnn_conv.exchange(false);
});
m.def("_disable_mkldnn_conv", []() {
at::native::disable_mkldnn_conv.exchange(true);
});
}
} // namespace throughput_benchmark
} // namespace torch