Files
pytorch/torch/csrc/Exceptions.cpp
Edward Yang 517c7c9861 Canonicalize all includes in PyTorch. (#14849)
Summary:
Anywhere we used #include "foo.h", we now say #include <foo.h>
Paths are adjusted to be rooted out of aten/src, torch/lib, or
the root level directory.

I modified CMakeLists.txt by hand to remove TH and THC from
the include paths.

I used the following script to do the canonicalization:

```
  import subprocess
  import re
  import os.path

  files = subprocess.check_output(['git', 'ls-files']).decode('utf-8').rstrip().split('\n')
  for fn in files:
      if not any(fn.endswith(suff) for suff in ['.cu', '.cpp', '.in', '.h', '.hpp', '.cu', '.cuh', '.cc']):
          continue
      if not any(fn.startswith(pref) for pref in ["aten/", "torch/"]):
          continue
      with open(fn, 'r') as f:
          c = f.read()
      def fmt(p):
          return "#include <{}>".format(p)
      def repl(m):
          p = m.group(1)
          if p in ["dlfcn.h", "unistd.h", "nvrtc.h", "cuda.h", "cuda_runtime.h", "cstdint", "cudnn.h", "Python.h", "cusparse.h", "cuda_runtime_api.h", "cuda_fp16.h", "cublas_v2.h", "stdint.h", "curand_kernel.h"]:
              return fmt(p)
          if any(p.startswith(pref) for pref in ["torch/csrc", "c10/", "ATen/", "caffe2/", "TH/", "THC/", "Eigen/", "gtest/", "zdl/", "gloo/", "onnx/", "miopen/"]):
              return fmt(p)
          for root in ["aten/src", "torch/lib", ""]:
              for bad_root in [os.path.dirname(fn), "aten/src/TH", "aten/src/THC", "torch/csrc"]:
                  new_p = os.path.relpath(os.path.join(bad_root, p), root)
                  if not new_p.startswith("../") and (os.path.exists(os.path.join(root, new_p)) or os.path.exists(os.path.join(root, new_p + ".in"))):
                      return fmt(new_p)
          print("ERROR: ", fn, p)
          return m.group(0)
      new_c = re.sub(r'#include "([^"]+)"', repl, c)
      if new_c != c:
          print(fn)
          with open(fn, 'w') as f:
              f.write(new_c)
```

Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14849

Reviewed By: dzhulgakov

Differential Revision: D13363445

Pulled By: ezyang

fbshipit-source-id: 52361f878a672785f9306c9e9ab2513128092b68
2018-12-08 19:38:30 -08:00

142 lines
5.4 KiB
C++

#include <torch/csrc/Exceptions.h>
#include <torch/csrc/python_headers.h>
#include <utility>
#include <vector>
#include <cstdarg>
#include <torch/csrc/THP.h>
PyObject *THPException_FatalError;
#define ASSERT_TRUE(cond) if (!(cond)) return false
bool THPException_init(PyObject *module)
{
ASSERT_TRUE(THPException_FatalError = PyErr_NewException("torch.FatalError", nullptr, nullptr));
ASSERT_TRUE(PyModule_AddObject(module, "FatalError", THPException_FatalError) == 0);
return true;
}
namespace torch {
void replaceAll(std::string & str,
const std::string & old_str,
const std::string & new_str) {
std::string::size_type pos = 0u;
while ((pos = str.find(old_str, pos)) != std::string::npos) {
str.replace(pos, old_str.length(), new_str);
}
}
std::string processErrorMsg(std::string str) {
// Translate Aten types to their respective pytorch ones
std::vector<std::pair<std::string, std::string>> changes {
{"Variable[SparseCUDAByteType]", "torch.cuda.sparse.ByteTensor"},
{"Variable[SparseCUDACharType]", "torch.cuda.sparse.CharTensor"},
{"Variable[SparseCUDADoubleType]", "torch.cuda.sparse.DoubleTensor"},
{"Variable[SparseCUDAFloatType]", "torch.cuda.sparse.FloatTensor"},
{"Variable[SparseCUDAIntType]", "torch.cuda.sparse.IntTensor"},
{"Variable[SparseCUDALongType]", "torch.cuda.sparse.LongTensor"},
{"Variable[SparseCUDAShortType]", "torch.cuda.sparse.ShortTensor"},
{"Variable[SparseCUDAHalfType]", "torch.cuda.sparse.HalfTensor"},
{"Variable[SparseCPUByteType]", "torch.sparse.ByteTensor"},
{"Variable[SparseCPUCharType]", "torch.sparse.CharTensor"},
{"Variable[SparseCPUDoubleType]", "torch.sparse.DoubleTensor"},
{"Variable[SparseCPUFloatType]", "torch.sparse.FloatTensor"},
{"Variable[SparseCPUIntType]", "torch.sparse.IntTensor"},
{"Variable[SparseCPULongType]", "torch.sparse.LongTensor"},
{"Variable[SparseCPUShortType]", "torch.sparse.ShortTensor"},
{"Variable[SparseCPUHalfType]", "torch.sparse.HalfTensor"},
{"Variable[CUDAByteType]", "torch.cuda.ByteTensor"},
{"Variable[CUDACharType]", "torch.cuda.CharTensor"},
{"Variable[CUDADoubleType]", "torch.cuda.DoubleTensor"},
{"Variable[CUDAFloatType]", "torch.cuda.FloatTensor"},
{"Variable[CUDAIntType]", "torch.cuda.IntTensor"},
{"Variable[CUDALongType]", "torch.cuda.LongTensor"},
{"Variable[CUDAShortType]", "torch.cuda.ShortTensor"},
{"Variable[CUDAHalfType]", "torch.cuda.HalfTensor"},
{"Variable[CPUByteType]", "torch.ByteTensor"},
{"Variable[CPUCharType]", "torch.CharTensor"},
{"Variable[CPUDoubleType]", "torch.DoubleTensor"},
{"Variable[CPUFloatType]", "torch.FloatTensor"},
{"Variable[CPUIntType]", "torch.IntTensor"},
{"Variable[CPULongType]", "torch.LongTensor"},
{"Variable[CPUShortType]", "torch.ShortTensor"},
{"Variable[CPUHalfType]", "torch.HalfTensor"},
{"SparseCUDAByteType", "torch.cuda.sparse.ByteTensor"},
{"SparseCUDACharType", "torch.cuda.sparse.CharTensor"},
{"SparseCUDADoubleType", "torch.cuda.sparse.DoubleTensor"},
{"SparseCUDAFloatType", "torch.cuda.sparse.FloatTensor"},
{"SparseCUDAIntType", "torch.cuda.sparse.IntTensor"},
{"SparseCUDALongType", "torch.cuda.sparse.LongTensor"},
{"SparseCUDAShortType", "torch.cuda.sparse.ShortTensor"},
{"SparseCUDAHalfType", "torch.cuda.sparse.HalfTensor"},
{"SparseCPUByteType", "torch.sparse.ByteTensor"},
{"SparseCPUCharType", "torch.sparse.CharTensor"},
{"SparseCPUDoubleType", "torch.sparse.DoubleTensor"},
{"SparseCPUFloatType", "torch.sparse.FloatTensor"},
{"SparseCPUIntType", "torch.sparse.IntTensor"},
{"SparseCPULongType", "torch.sparse.LongTensor"},
{"SparseCPUShortType", "torch.sparse.ShortTensor"},
{"SparseCPUHalfType", "torch.sparse.HalfTensor"},
{"CUDAByteType", "torch.cuda.ByteTensor"},
{"CUDACharType", "torch.cuda.CharTensor"},
{"CUDADoubleType", "torch.cuda.DoubleTensor"},
{"CUDAFloatType", "torch.cuda.FloatTensor"},
{"CUDAIntType", "torch.cuda.IntTensor"},
{"CUDALongType", "torch.cuda.LongTensor"},
{"CUDAShortType", "torch.cuda.ShortTensor"},
{"CUDAHalfType", "torch.cuda.HalfTensor"},
{"CPUByteType", "torch.ByteTensor"},
{"CPUCharType", "torch.CharTensor"},
{"CPUDoubleType", "torch.DoubleTensor"},
{"CPUFloatType", "torch.FloatTensor"},
{"CPUIntType", "torch.IntTensor"},
{"CPULongType", "torch.LongTensor"},
{"CPUShortType", "torch.ShortTensor"},
{"CPUHalfType", "torch.HalfTensor"},
};
for (const auto & it : changes) {
replaceAll(str, it.first, it.second);
}
return str;
}
static std::string formatMessage(const char *format, va_list fmt_args) {
static const size_t ERROR_BUF_SIZE = 1024;
char error_buf[ERROR_BUF_SIZE];
vsnprintf(error_buf, ERROR_BUF_SIZE, format, fmt_args);
// Ensure that the string is null terminated
error_buf[sizeof(error_buf) / sizeof(*error_buf) - 1] = 0;
return std::string(error_buf);
}
IndexError::IndexError(const char *format, ...) {
va_list fmt_args;
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}
TypeError::TypeError(const char *format, ...) {
va_list fmt_args;
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}
ValueError::ValueError(const char *format, ...) {
va_list fmt_args;
va_start(fmt_args, format);
msg = formatMessage(format, fmt_args);
va_end(fmt_args);
}
} // namespace torch