mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-22 06:11:27 +08:00
Summary: In TorchScript and C++ extensions we currently advocate a mix of `torch::` and `at::` namespace usage. In the C++ frontend I had instead exported all symbols from `at::` and some from `c10::` into the `torch::` namespace. This is far, far easier for users to understand, and also avoid bugs around creating tensors vs. variables. The same should from now on be true for the TorchScript C++ API (for running and loading models) and all C++ extensions. Note that since we're just talking about typedefs, this change does not break any existing code. Once this lands I will update stuff in `pytorch/tutorials` too. zdevito ezyang gchanan Pull Request resolved: https://github.com/pytorch/pytorch/pull/13523 Differential Revision: D12942787 Pulled By: goldsborough fbshipit-source-id: 76058936bd8707b33d9e5bbc2d0705fc3d820763
30 lines
736 B
C++
30 lines
736 B
C++
#include <gtest/gtest.h>
|
|
|
|
#include <torch/jit.h>
|
|
#include <torch/types.h>
|
|
|
|
#include <string>
|
|
|
|
TEST(TorchScriptTest, CanCompileMultipleFunctions) {
|
|
auto module = torch::jit::compile(R"JIT(
|
|
def test_mul(a, b):
|
|
return a * b
|
|
def test_relu(a, b):
|
|
return torch.relu(a + b)
|
|
def test_while(a, i):
|
|
while bool(i < 10):
|
|
a += a
|
|
i += 1
|
|
return a
|
|
)JIT");
|
|
auto a = torch::ones(1);
|
|
auto b = torch::ones(1);
|
|
|
|
ASSERT_EQ(1, module->run_method("test_mul", a, b).toTensor().item<int64_t>());
|
|
|
|
ASSERT_EQ(2, module->run_method("test_relu", a, b).toTensor().item<int64_t>());
|
|
|
|
ASSERT_TRUE(
|
|
0x200 == module->run_method("test_while", a, b).toTensor().item<int64_t>());
|
|
}
|