[TensorExpr] Add a class for representing data type. (#33217)

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/33217

Test Plan: Imported from OSS

Differential Revision: D19848380

Pulled By: ZolotukhinM

fbshipit-source-id: d8683f8fc4555d2456cd2a7c827d8e8231915b49
This commit is contained in:
Mikhail Zolotukhin
2020-02-21 13:06:13 -08:00
committed by Facebook Github Bot
parent 089d658153
commit 1a4f997178
12 changed files with 435 additions and 0 deletions

View File

@ -456,6 +456,7 @@ if (NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
${TORCH_SRC_DIR}/csrc/jit/vararg_functions.cpp
${TORCH_SRC_DIR}/csrc/jit/tensorexpr/mem_arena.cpp
${TORCH_SRC_DIR}/csrc/jit/tensorexpr/types.cpp
)
if (NOT INTERN_DISABLE_MOBILE_INTERP)
@ -757,6 +758,7 @@ ENDIF()
if (BUILD_TEST AND NOT MSVC AND NOT USE_ROCM)
add_subdirectory(${TORCH_ROOT}/test/cpp/jit ${CMAKE_BINARY_DIR}/test_jit)
add_subdirectory(${TORCH_ROOT}/test/cpp/tensorexpr ${CMAKE_BINARY_DIR}/test_tensorexpr)
if (USE_DISTRIBUTED)
add_subdirectory(${TORCH_ROOT}/test/cpp/rpc ${CMAKE_BINARY_DIR}/test_cpp_rpc)
endif()

View File

@ -0,0 +1,39 @@
set(TENSOREXPR_TEST_ROOT ${TORCH_ROOT}/test/cpp/tensorexpr)
file(GLOB TENSOREXPR_TEST_SRCS ${TENSOREXPR_TEST_ROOT}/test_*.cpp)
set(TENSOREXPR_TEST_SRCS ${TENSOREXPR_TEST_SRCS} PARENT_SCOPE)
add_executable(test_tensorexpr
${TORCH_ROOT}/test/cpp/common/main.cpp
${TENSOREXPR_TEST_ROOT}/gtest.cpp
${TENSOREXPR_TEST_SRCS})
target_link_libraries(test_tensorexpr PRIVATE torch gtest)
target_include_directories(test_tensorexpr PRIVATE ${ATen_CPU_INCLUDE})
if (USE_CUDA)
target_link_libraries(test_tensorexpr PRIVATE
${CUDA_LIBRARIES}
${CUDA_NVRTC_LIB}
${CUDA_CUDA_LIB}
${TORCH_CUDA_LIBRARIES})
target_compile_definitions(test_tensorexpr PRIVATE USE_CUDA)
elseif (USE_ROCM)
target_link_libraries(test_tensorexpr PRIVATE
${ROCM_HIPRTC_LIB}
${PYTORCH_HIP_HCC_LIBRARIES}
${TORCH_CUDA_LIBRARIES})
target_link_libraries(test_tensorexpr PRIVATE caffe2_gpu)
target_compile_definitions(test_tensorexpr PRIVATE USE_ROCM)
endif()
if (INSTALL_TEST)
install(TARGETS test_tensorexpr DESTINATION bin)
# Install PDB files for MSVC builds
if (MSVC AND BUILD_SHARED_LIBS)
install(FILES $<TARGET_PDB_FILE:test_tensorexpr> DESTINATION bin OPTIONAL)
endif()
endif()

View File

@ -0,0 +1,55 @@
# TensorExpr C++ Tests
## How to add a new test
First, create a new test file. Test files should have be placed in this
directory, with a name that starts with `test_`, like `test_foo.cpp`.
Here is an example test file you can copy-paste.
```cpp
#include <test/cpp/tensorexpr/test_base.h>
// Tests go in torch::jit
namespace torch {
namespace jit {
// 1. Test cases are void() functions.
// 2. They start with the prefix `test`
void testCaseOne() {
// ...
}
void testCaseTwo() {
// ...
}
}
}
```
Then, register your test in `tests.h`:
```cpp
// Add to TH_FORALL_TESTS_CUDA instead for CUDA-requiring tests
#define TH_FORALL_TESTS(_) \
_(ADFormulas) \
_(Attributes) \
...
_(CaseOne) // note that the `test` prefix is omitted.
_(CaseTwo)
```
We glob all the test files together in `CMakeLists.txt` so that you don't
have to edit it every time you add a test. Unfortunately, this means that in
order to get the build to pick up your new test file, you need to re-run
cmake:
```
python setup.py build --cmake
```
## How do I run the tests?
The following commands assume you are in PyTorch root.
```bash
# (re)build the test binary
ninja build/bin/test_tensorexpr
# run
build/bin/test_tensorexpr --gtest_filter='glob_style_filter*'
```

View File

View File

@ -0,0 +1,16 @@
#include <test/cpp/tensorexpr/tests.h>
#include <gtest/gtest.h>
namespace torch {
namespace jit {
#define TENSOREXPR_GTEST(name) \
TEST(TensorExprTest, name) { \
test##name(); \
}
TH_FORALL_TESTS(TENSOREXPR_GTEST)
#undef TENSOREXPR_GTEST
} // namespace jit
} // namespace torch

View File

@ -0,0 +1,31 @@
#pragma once
#include <gtest/gtest.h>
#include <test/cpp/common/support.h>
namespace torch {
namespace jit {
namespace tensorexpr {
template <typename U, typename V>
void ExpectAllNear(
const std::vector<U>& v1,
const std::vector<U>& v2,
V threshold,
const std::string& name = "") {
ASSERT_EQ(v1.size(), v2.size());
for (int i = 0; i < v1.size(); i++) {
EXPECT_NEAR(v1[i], v2[i], threshold)
<< "element index: " << i << ", name: " << name;
}
}
template <typename T>
static void assertAllEqual(const std::vector<T>& vec, const T& val) {
for (auto const& elt : vec) {
ASSERT_EQ(elt, val);
}
}
} // namespace tensorexpr
} // namespace jit
} // namespace torch

View File

@ -0,0 +1,37 @@
#include "test/cpp/tensorexpr/test_base.h"
#include "torch/csrc/jit/tensorexpr/mem_arena.h"
#include "torch/csrc/jit/tensorexpr/types.h"
namespace torch {
namespace jit {
using namespace torch::jit::tensorexpr;
void testTypeTest01() {
KernelScope kernel_scope;
{
Dtype dt1 = kInt32;
EXPECT_EQ(dt1, kInt32);
}
{
Dtype dt2_a(kInt32, 8);
Dtype dt2_b(kInt32, 4);
Dtype dt2_c(kInt32, 8);
EXPECT_EQ(dt2_a, dt2_c);
EXPECT_NE(dt2_a, dt2_b);
}
{
EXPECT_EQ(kInt32, ToDtype<int>());
EXPECT_EQ(kFloat32, ToDtype<float>());
}
{
Dtype int32x8(kInt32, 8);
Dtype float32x8(kFloat32, 8);
EXPECT_NE(int32x8, float32x8);
EXPECT_EQ(float32x8, BinaryOpDtype(int32x8, float32x8));
EXPECT_EQ(float32x8, BinaryOpDtype(float32x8, int32x8));
EXPECT_EQ(int32x8, BinaryOpDtype(int32x8, int32x8));
EXPECT_EQ(float32x8, BinaryOpDtype(float32x8, float32x8));
}
}
} // namespace jit
} // namespace torch

View File

@ -0,0 +1,14 @@
#pragma once
#include <memory>
#include <vector>
#include "test/cpp/tensorexpr/test_base.h"
#include "torch/csrc/jit/testing/file_check.h"
namespace torch {
namespace jit {
using namespace torch::jit::tensorexpr;
} // namespace jit
} // namespace torch

View File

@ -0,0 +1,24 @@
#pragma once
/**
* See README.md for instructions on how to add a new test.
*/
#include <c10/macros/Export.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
namespace torch {
namespace jit {
#define TH_FORALL_TESTS(_) \
_(TypeTest01) \
#define TH_FORALL_TESTS_CUDA(_) \
#define DECLARE_TENSOREXPR_TEST(name) void test##name();
TH_FORALL_TESTS(DECLARE_TENSOREXPR_TEST)
#ifdef USE_CUDA
TH_FORALL_TESTS_CUDA(DECLARE_TENSOREXPR_TEST)
#endif
#undef DECLARE_TENSOREXPR_TEST
} // namespace jit
} // namespace torch

View File

@ -191,10 +191,12 @@ libtorch_sources = [
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/type_parser.cpp",
"torch/csrc/jit/tensorexpr/mem_arena.cpp",
"torch/csrc/jit/tensorexpr/types.cpp",
"torch/csrc/utils/byte_order.cpp",
"torch/csrc/utils/tensor_flatten.cpp",
"torch/csrc/utils/variadic.cpp",
"torch/csrc/jit/tensorexpr/mem_arena.cpp",
"torch/csrc/jit/tensorexpr/types.cpp",
]
libtorch_cuda_sources = [

View File

@ -0,0 +1,100 @@
#include "torch/csrc/jit/tensorexpr/types.h"
#include <torch/csrc/WindowsTorchApiMacro.h>
#include <c10/util/Logging.h>
namespace torch {
namespace jit {
namespace tensorexpr {
enum ScalarType {
kScalarUninitialized,
kScalarHandle,
kScalarInt32,
kScalarFloat32,
};
Dtype Dtype::scalar_type() const {
switch (static_cast<ScalarType>(scalar_type_)) {
case kScalarUninitialized:
return kUninitialized;
case kScalarHandle:
return kHandle;
case kScalarInt32:
return kInt32;
case kScalarFloat32:
return kFloat32;
default:
LOG(FATAL) << "invalid scalar type: " << scalar_type_;
return kUninitialized;
}
}
TORCH_API Dtype kInt32(kScalarInt32, 1);
TORCH_API Dtype kFloat32(kScalarFloat32, 1);
TORCH_API Dtype kHandle(kScalarHandle, 1);
TORCH_API Dtype kUninitialized(kScalarUninitialized, 1);
TORCH_API std::ostream& operator<<(std::ostream& stream, const Dtype& dtype) {
switch (static_cast<ScalarType>(dtype.scalar_type_)) {
case kScalarUninitialized:
stream << "uninitialized";
break;
case kScalarHandle:
stream << "handle";
break;
case kScalarInt32:
stream << "int32";
break;
case kScalarFloat32:
stream << "float32";
break;
default:
LOG(FATAL) << "invalid scalar type: " << dtype.scalar_type_;
}
if (dtype.lanes() > 1) {
stream << "x" << dtype.lanes();
;
}
return stream;
}
int Dtype::byte_size() const {
int scalar_size = -1;
switch (scalar_type_) {
case kScalarInt32:
scalar_size = sizeof(int32);
break;
case kScalarFloat32:
scalar_size = sizeof(float);
break;
default:
throw std::runtime_error(
"invalid scalar type; " + std::to_string(scalar_type_));
}
return scalar_size * lanes();
}
std::string Dtype::ToCppString() const {
if (scalar_type_ == kScalarInt32) {
return "int";
} else if (scalar_type_ == kScalarFloat32) {
return "float";
} else {
throw std::runtime_error("Invalid dtype: " + std::to_string(scalar_type_));
}
}
} // namespace tensorexpr
} // namespace jit
} // namespace torch
namespace std {
std::string to_string(const Dtype& dtype) {
std::ostringstream oss;
oss << dtype;
return oss.str();
}
} // namespace std

View File

@ -0,0 +1,115 @@
#pragma once
#include <cstdint>
#include <iostream>
#include <c10/util/Logging.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
namespace torch {
namespace jit {
namespace tensorexpr {
using int32 = std::int32_t;
class Dtype;
TORCH_API std::ostream& operator<<(std::ostream& stream, const Dtype& dtype);
// Switch to PT/Aten dtypes
// Data types for scalar and vector elements.
class TORCH_API Dtype {
public:
explicit Dtype(int type) : scalar_type_(type), lanes_(1) {}
Dtype(int scalar_type, int lanes)
: scalar_type_(scalar_type), lanes_(lanes) {}
Dtype(Dtype type, int lanes)
: scalar_type_(type.scalar_type_), lanes_(lanes) {
CHECK(type.lanes() == 1);
}
int lanes() const {
return lanes_;
}
Dtype scalar_type() const;
bool operator==(const Dtype& other) const {
return scalar_type_ == other.scalar_type_ && lanes_ == other.lanes_;
}
bool operator!=(const Dtype& other) const {
return !(*this == other);
}
int byte_size() const;
std::string ToCppString() const;
private:
friend std::ostream& operator<<(std::ostream& stream, const Dtype& dtype);
int scalar_type_;
int lanes_; // the width of the element for a vector time
};
extern TORCH_API Dtype kUninitialized;
extern TORCH_API Dtype kInt32;
extern TORCH_API Dtype kFloat32;
extern TORCH_API Dtype kHandle;
template <typename T>
Dtype ToDtype();
template <>
inline Dtype ToDtype<int>() {
return kInt32;
}
template <>
inline Dtype ToDtype<float>() {
return kFloat32;
}
// Optional return type in case
// the binary Op is a CompareSelect Op
enum ReturnType {
knone,
kint32,
kfloat32,
};
inline Dtype BinaryOpDtype(
Dtype op1_dtype,
Dtype op2_dtype,
ReturnType ret_type = ReturnType::knone) {
if (op1_dtype == op2_dtype) {
switch (ret_type) {
case ReturnType::knone:
return op1_dtype;
case ReturnType::kint32:
return ToDtype<int>();
case ReturnType::kfloat32:
return ToDtype<float>();
default:
throw std::runtime_error("invalid operator return type");
}
}
CHECK_EQ(op1_dtype.lanes(), op2_dtype.lanes()) << "vector lengths must match";
Dtype op1_scalar = op1_dtype.scalar_type();
Dtype op2_scalar = op2_dtype.scalar_type();
if (op1_scalar == kInt32 && op2_scalar == kFloat32) {
return op2_dtype;
}
if (op1_scalar == kFloat32 && op2_scalar == kInt32) {
return op1_dtype;
}
LOG(FATAL) << "Invalid dtypes: " << op1_dtype << ", " << op2_dtype;
return op1_dtype;
}
} // namespace tensorexpr
} // namespace jit
} // namespace torch
namespace std {
using torch::jit::tensorexpr::Dtype;
std::string to_string(const Dtype& dtype);
} // namespace std