diff --git a/docs/cpp/source/index.rst b/docs/cpp/source/index.rst index 6bf375e05129..572ed031b864 100644 --- a/docs/cpp/source/index.rst +++ b/docs/cpp/source/index.rst @@ -61,15 +61,15 @@ a taste of this interface: #include #include - at::Tensor a = torch::ones({2, 2}, at::requires_grad()); - at::Tensor b = torch::randn({2, 2}); + torch::Tensor a = torch::ones({2, 2}, torch::requires_grad()); + torch::Tensor b = torch::randn({2, 2}); auto c = a + b; c.backward(); // a.grad() will now hold the gradient of c w.r.t. a. The ``at::Tensor`` class in ATen is not differentiable by default. To add the differentiability of tensors the autograd API provides, you must use tensor factory functions from the `torch::` namespace instead of the `at` namespace. -For example, while a tensor created with `at::ones` will not be differentiable, +For example, while a tensor created with `torch::ones` will not be differentiable, a tensor created with `torch::ones` will be. C++ Frontend diff --git a/docs/cpp/source/installing.rst b/docs/cpp/source/installing.rst index a0f74e7e3bb1..9aabfe4aaa8d 100644 --- a/docs/cpp/source/installing.rst +++ b/docs/cpp/source/installing.rst @@ -6,7 +6,7 @@ configuration files required to depend on PyTorch. We call this distribution *LibTorch*, and you can download ZIP archives containing the latest LibTorch distribution on `our website `_. Below is a small example of writing a minimal application that depends on LibTorch -and uses the ``at::Tensor`` class which comes with the PyTorch C++ API. +and uses the ``torch::Tensor`` class which comes with the PyTorch C++ API. Minimal Example --------------- @@ -37,7 +37,7 @@ this: target_link_libraries(example-app "${TORCH_LIBRARIES}") set_property(TARGET example-app PROPERTY CXX_STANDARD 11) -The implementation of our example will simply create a new `at::Tensor` and +The implementation of our example will simply create a new `torch::Tensor` and print it: .. code-block:: cpp @@ -46,7 +46,7 @@ print it: #include int main() { - at::Tensor tensor = torch::rand({2, 3}); + torch::Tensor tensor = torch::rand({2, 3}); std::cout << tensor << std::endl; } diff --git a/test/cpp/api/cursor.cpp b/test/cpp/api/cursor.cpp index e5d50c1fc60d..c66bfca551db 100644 --- a/test/cpp/api/cursor.cpp +++ b/test/cpp/api/cursor.cpp @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/test/cpp/api/dataloader.cpp b/test/cpp/api/dataloader.cpp index 786923698d57..99273b48695a 100644 --- a/test/cpp/api/dataloader.cpp +++ b/test/cpp/api/dataloader.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include diff --git a/test/cpp/api/integration.cpp b/test/cpp/api/integration.cpp index b2d10097b239..7338a5cc8035 100644 --- a/test/cpp/api/integration.cpp +++ b/test/cpp/api/integration.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/jit.cpp b/test/cpp/api/jit.cpp index 9aa6968df71f..77326bc4e566 100644 --- a/test/cpp/api/jit.cpp +++ b/test/cpp/api/jit.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include diff --git a/test/cpp/api/misc.cpp b/test/cpp/api/misc.cpp index f3f1cc6f4e3e..169ae9095e17 100644 --- a/test/cpp/api/misc.cpp +++ b/test/cpp/api/misc.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/module.cpp b/test/cpp/api/module.cpp index 0789b42da44b..7612deac5cf5 100644 --- a/test/cpp/api/module.cpp +++ b/test/cpp/api/module.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -54,8 +54,8 @@ TEST_F(ModuleTest, ZeroGrad) { TEST_F(ModuleTest, ZeroGradWithUndefined) { struct TestModule : torch::nn::Module { TestModule() { - x = register_parameter("x", torch::ones(5, at::requires_grad())); - y = register_parameter("y", torch::ones(5, at::requires_grad())); + x = register_parameter("x", torch::ones(5, torch::requires_grad())); + y = register_parameter("y", torch::ones(5, torch::requires_grad())); } torch::Tensor x, y; }; @@ -194,7 +194,7 @@ TEST_F(ModuleTest, Conversion_MultiCUDA) { ASSERT_EQ(parameter->device().type(), torch::Device::Type::CUDA); ASSERT_EQ(parameter->device().index(), 0); } - module->to({at::kCUDA, 1}); + module->to({torch::kCUDA, 1}); for (auto& parameter : module->parameters()) { ASSERT_EQ(parameter->device().type(), torch::Device::Type::CUDA); ASSERT_EQ(parameter->device().index(), 1); diff --git a/test/cpp/api/modules.cpp b/test/cpp/api/modules.cpp index fd9416eb3b9b..3e67a9f26c03 100644 --- a/test/cpp/api/modules.cpp +++ b/test/cpp/api/modules.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/optim.cpp b/test/cpp/api/optim.cpp index 04f3c28411ad..9490960c548e 100644 --- a/test/cpp/api/optim.cpp +++ b/test/cpp/api/optim.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/optim_baseline.h b/test/cpp/api/optim_baseline.h index c1e3482a1609..4e192d2699f0 100644 --- a/test/cpp/api/optim_baseline.h +++ b/test/cpp/api/optim_baseline.h @@ -1,6 +1,6 @@ // @generated from test/cpp/api/optim_baseline.py -#include +#include #include diff --git a/test/cpp/api/optim_baseline.py b/test/cpp/api/optim_baseline.py index bc2e06fd6292..a5820becd957 100644 --- a/test/cpp/api/optim_baseline.py +++ b/test/cpp/api/optim_baseline.py @@ -9,7 +9,7 @@ import torch.optim HEADER = """ -#include +#include #include diff --git a/test/cpp/api/parallel.cpp b/test/cpp/api/parallel.cpp index 2b5a772a51b6..d4be5785ed76 100644 --- a/test/cpp/api/parallel.cpp +++ b/test/cpp/api/parallel.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include diff --git a/test/cpp/api/rnn.cpp b/test/cpp/api/rnn.cpp index e0d511fb0993..c6932bf848de 100644 --- a/test/cpp/api/rnn.cpp +++ b/test/cpp/api/rnn.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/sequential.cpp b/test/cpp/api/sequential.cpp index 19aab60d3194..3a042907765a 100644 --- a/test/cpp/api/sequential.cpp +++ b/test/cpp/api/sequential.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/serialize.cpp b/test/cpp/api/serialize.cpp index 858a6d40a17e..982ed40b10fb 100644 --- a/test/cpp/api/serialize.cpp +++ b/test/cpp/api/serialize.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/cpp/api/support.h b/test/cpp/api/support.h index 3d0ccfa21a1d..0e8c7af4f92b 100644 --- a/test/cpp/api/support.h +++ b/test/cpp/api/support.h @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include diff --git a/test/cpp/api/tensor.cpp b/test/cpp/api/tensor.cpp index 1c1a7804312d..5f2ed0257209 100644 --- a/test/cpp/api/tensor.cpp +++ b/test/cpp/api/tensor.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include diff --git a/test/cpp/api/tensor_options.cpp b/test/cpp/api/tensor_options.cpp index 319d2ccccf44..1d0780c86a33 100644 --- a/test/cpp/api/tensor_options.cpp +++ b/test/cpp/api/tensor_options.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/test/cpp_extensions/cuda_extension.cpp b/test/cpp_extensions/cuda_extension.cpp index 9946b4f9cb97..ad7396fe7f45 100644 --- a/test/cpp_extensions/cuda_extension.cpp +++ b/test/cpp_extensions/cuda_extension.cpp @@ -5,10 +5,10 @@ // into one shared library. void sigmoid_add_cuda(const float* x, const float* y, float* output, int size); -at::Tensor sigmoid_add(at::Tensor x, at::Tensor y) { +torch::Tensor sigmoid_add(torch::Tensor x, torch::Tensor y) { AT_CHECK(x.type().is_cuda(), "x must be a CUDA tensor"); AT_CHECK(y.type().is_cuda(), "y must be a CUDA tensor"); - auto output = at::zeros_like(x); + auto output = torch::zeros_like(x); sigmoid_add_cuda( x.data(), y.data(), output.data(), output.numel()); return output; diff --git a/test/cpp_extensions/cudnn_extension.cpp b/test/cpp_extensions/cudnn_extension.cpp index 498e01a116a1..a626c82b5e6e 100644 --- a/test/cpp_extensions/cudnn_extension.cpp +++ b/test/cpp_extensions/cudnn_extension.cpp @@ -1,53 +1,53 @@ /* * CuDNN ReLU extension. Simple function but contains the general structure of * most CuDNN extensions: - * 1) Check arguments. at::check* functions provide a standard way to validate - * input and provide pretty errors. - * 2) Create descriptors. Most CuDNN functions require creating and setting a - * variety of descriptors. - * 3) Apply the CuDNN function. - * 4) Destroy your descriptors. - * 5) Return something (optional). + * 1) Check arguments. torch::check* functions provide a standard way to + * validate input and provide pretty errors. 2) Create descriptors. Most CuDNN + * functions require creating and setting a variety of descriptors. 3) Apply the + * CuDNN function. 4) Destroy your descriptors. 5) Return something (optional). */ #include -#include // for TensorDescriptor #include // for CUDNN_CHECK +#include // for TensorDescriptor #include // for getCudnnHandle // Name of function in python module and name used for error messages by -// at::check* functions. +// torch::check* functions. const char* cudnn_relu_name = "cudnn_relu"; // Check arguments to cudnn_relu -void cudnn_relu_check(const at::Tensor& inputs, const at::Tensor& outputs) { +void cudnn_relu_check( + const torch::Tensor& inputs, + const torch::Tensor& outputs) { // Create TensorArgs. These record the names and positions of each tensor as a // parameter. - at::TensorArg arg_inputs(inputs, "inputs", 0); - at::TensorArg arg_outputs(outputs, "outputs", 1); + torch::TensorArg arg_inputs(inputs, "inputs", 0); + torch::TensorArg arg_outputs(outputs, "outputs", 1); // Check arguments. No need to return anything. These functions with throw an // error if they fail. Messages are populated using information from // TensorArgs. - at::checkContiguous(cudnn_relu_name, arg_inputs); - at::checkScalarType(cudnn_relu_name, arg_inputs, at::kFloat); - at::checkBackend(cudnn_relu_name, arg_inputs.tensor, at::Backend::CUDA); - at::checkContiguous(cudnn_relu_name, arg_outputs); - at::checkScalarType(cudnn_relu_name, arg_outputs, at::kFloat); - at::checkBackend(cudnn_relu_name, arg_outputs.tensor, at::Backend::CUDA); - at::checkSameSize(cudnn_relu_name, arg_inputs, arg_outputs); + torch::checkContiguous(cudnn_relu_name, arg_inputs); + torch::checkScalarType(cudnn_relu_name, arg_inputs, torch::kFloat); + torch::checkBackend(cudnn_relu_name, arg_inputs.tensor, torch::Backend::CUDA); + torch::checkContiguous(cudnn_relu_name, arg_outputs); + torch::checkScalarType(cudnn_relu_name, arg_outputs, torch::kFloat); + torch::checkBackend( + cudnn_relu_name, arg_outputs.tensor, torch::Backend::CUDA); + torch::checkSameSize(cudnn_relu_name, arg_inputs, arg_outputs); } -void cudnn_relu(const at::Tensor& inputs, const at::Tensor& outputs) { +void cudnn_relu(const torch::Tensor& inputs, const torch::Tensor& outputs) { // Most CuDNN extensions will follow a similar pattern. // Step 1: Check inputs. This will throw an error if inputs are invalid, so no // need to check return codes here. cudnn_relu_check(inputs, outputs); // Step 2: Create descriptors - cudnnHandle_t cuDnn = at::native::getCudnnHandle(); + cudnnHandle_t cuDnn = torch::native::getCudnnHandle(); // Note: 4 is minimum dim for a TensorDescriptor. Input and output are same // size and type and contiguous, so one descriptor is sufficient. - at::native::TensorDescriptor input_tensor_desc(inputs, 4); + torch::native::TensorDescriptor input_tensor_desc(inputs, 4); cudnnActivationDescriptor_t activationDesc; // Note: Always check return value of cudnn functions using CUDNN_CHECK AT_CUDNN_CHECK(cudnnCreateActivationDescriptor(&activationDesc)); diff --git a/test/cpp_extensions/doubler.h b/test/cpp_extensions/doubler.h index afd4a007019d..4edd4de8fd6e 100644 --- a/test/cpp_extensions/doubler.h +++ b/test/cpp_extensions/doubler.h @@ -3,15 +3,15 @@ struct Doubler { Doubler(int A, int B) { tensor_ = - torch::ones({A, B}, torch::dtype(torch::kDouble).requires_grad(true)); + torch::ones({A, B}, torch::dtype(torch::kFloat64).requires_grad(true)); } - at::Tensor forward() { + torch::Tensor forward() { return tensor_ * 2; } - at::Tensor get() const { + torch::Tensor get() const { return tensor_; } private: - at::Tensor tensor_; + torch::Tensor tensor_; }; diff --git a/test/cpp_extensions/extension.cpp b/test/cpp_extensions/extension.cpp index e217b210860f..8aff0dd018a4 100644 --- a/test/cpp_extensions/extension.cpp +++ b/test/cpp_extensions/extension.cpp @@ -1,26 +1,26 @@ #include -at::Tensor sigmoid_add(at::Tensor x, at::Tensor y) { +torch::Tensor sigmoid_add(torch::Tensor x, torch::Tensor y) { return x.sigmoid() + y.sigmoid(); } struct MatrixMultiplier { MatrixMultiplier(int A, int B) { tensor_ = - torch::ones({A, B}, torch::dtype(torch::kDouble).requires_grad(true)); + torch::ones({A, B}, torch::dtype(torch::kFloat64).requires_grad(true)); } - at::Tensor forward(at::Tensor weights) { + torch::Tensor forward(torch::Tensor weights) { return tensor_.mm(weights); } - at::Tensor get() const { + torch::Tensor get() const { return tensor_; } private: - at::Tensor tensor_; + torch::Tensor tensor_; }; -bool function_taking_optional(c10::optional tensor) { +bool function_taking_optional(c10::optional tensor) { return tensor.has_value(); } diff --git a/test/custom_operator/op.cpp b/test/custom_operator/op.cpp index f63742091f18..c26bdb02325e 100644 --- a/test/custom_operator/op.cpp +++ b/test/custom_operator/op.cpp @@ -5,11 +5,11 @@ #include #include -std::vector custom_op( - at::Tensor tensor, +std::vector custom_op( + torch::Tensor tensor, double scalar, int64_t repeat) { - std::vector output; + std::vector output; output.reserve(repeat); for (int64_t i = 0; i < repeat; ++i) { output.push_back(tensor * scalar); diff --git a/test/custom_operator/op.h b/test/custom_operator/op.h index a05798ce52a2..b18a48edc10b 100644 --- a/test/custom_operator/op.h +++ b/test/custom_operator/op.h @@ -15,7 +15,7 @@ # endif // clang-format on -CUSTOM_OP_API std::vector custom_op( - at::Tensor tensor, +CUSTOM_OP_API std::vector custom_op( + torch::Tensor tensor, double scalar, int64_t repeat); diff --git a/test/custom_operator/test_custom_ops.cpp b/test/custom_operator/test_custom_ops.cpp index 645afec5c378..8f35e3d5bd14 100644 --- a/test/custom_operator/test_custom_ops.cpp +++ b/test/custom_operator/test_custom_ops.cpp @@ -33,7 +33,7 @@ void get_operator_from_registry_and_execute() { torch::jit::Stack stack; torch::jit::push(stack, torch::ones(5), 2.0, 3); op->getOperation()(stack); - std::vector output; + std::vector output; torch::jit::pop(stack, output); const auto manual = custom_op(torch::ones(5), 2.0, 3); @@ -99,19 +99,19 @@ void test_move_to_device(const std::string& path_to_exported_script_module) { torch::jit::load(path_to_exported_script_module); AT_ASSERT(module != nullptr); - helpers::check_all_parameters(*module, [](const at::Tensor& tensor) { + helpers::check_all_parameters(*module, [](const torch::Tensor& tensor) { return tensor.device().is_cpu(); }); - module->to(at::kCUDA); + module->to(torch::kCUDA); - helpers::check_all_parameters(*module, [](const at::Tensor& tensor) { + helpers::check_all_parameters(*module, [](const torch::Tensor& tensor) { return tensor.device().is_cuda(); }); - module->to(at::kCPU); + module->to(torch::kCPU); - helpers::check_all_parameters(*module, [](const at::Tensor& tensor) { + helpers::check_all_parameters(*module, [](const torch::Tensor& tensor) { return tensor.device().is_cpu(); }); } @@ -121,16 +121,16 @@ void test_move_to_dtype(const std::string& path_to_exported_script_module) { torch::jit::load(path_to_exported_script_module); AT_ASSERT(module != nullptr); - module->to(at::kInt); + module->to(torch::kInt); - helpers::check_all_parameters(*module, [](const at::Tensor& tensor) { - return tensor.dtype() == at::kInt; + helpers::check_all_parameters(*module, [](const torch::Tensor& tensor) { + return tensor.dtype() == torch::kInt; }); - module->to(at::kDouble); + module->to(torch::kDouble); - helpers::check_all_parameters(*module, [](const at::Tensor& tensor) { - return tensor.dtype() == at::kDouble; + helpers::check_all_parameters(*module, [](const torch::Tensor& tensor) { + return tensor.dtype() == torch::kDouble; }); } @@ -147,7 +147,7 @@ int main(int argc, const char* argv[]) { test_argument_checking_for_serialized_modules(path_to_exported_script_module); test_move_to_dtype(path_to_exported_script_module); - if (at::globalContext().getNumGPUs() > 0) { + if (torch::globalContext().getNumGPUs() > 0) { test_move_to_device(path_to_exported_script_module); } diff --git a/test/test_cpp_extensions.py b/test/test_cpp_extensions.py index 185356b04187..0d44cdc35649 100755 --- a/test/test_cpp_extensions.py +++ b/test/test_cpp_extensions.py @@ -149,7 +149,7 @@ class TestCppExtension(common.TestCase): def test_inline_jit_compile_extension_with_functions_as_list(self): cpp_source = ''' - at::Tensor tanh_add(at::Tensor x, at::Tensor y) { + torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) { return x.tanh() + y.tanh(); } ''' @@ -170,7 +170,7 @@ class TestCppExtension(common.TestCase): def test_inline_jit_compile_extension_with_functions_as_dict(self): cpp_source = ''' - at::Tensor tanh_add(at::Tensor x, at::Tensor y) { + torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) { return x.tanh() + y.tanh(); } ''' @@ -186,14 +186,14 @@ class TestCppExtension(common.TestCase): def test_inline_jit_compile_extension_multiple_sources_and_no_functions(self): cpp_source1 = ''' - at::Tensor sin_add(at::Tensor x, at::Tensor y) { + torch::Tensor sin_add(torch::Tensor x, torch::Tensor y) { return x.sin() + y.sin(); } ''' cpp_source2 = ''' #include - at::Tensor sin_add(at::Tensor x, at::Tensor y); + torch::Tensor sin_add(torch::Tensor x, torch::Tensor y); PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("sin_add", &sin_add, "sin(x) + sin(y)"); } @@ -224,8 +224,8 @@ class TestCppExtension(common.TestCase): } } - at::Tensor cos_add(at::Tensor x, at::Tensor y) { - auto output = at::zeros_like(x); + torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) { + auto output = torch::zeros_like(x); const int threads = 1024; const int blocks = (output.numel() + threads - 1) / threads; cos_add_kernel<<>>(x.data(), y.data(), output.data(), output.numel()); @@ -234,7 +234,7 @@ class TestCppExtension(common.TestCase): ''' # Here, the C++ source need only declare the function signature. - cpp_source = 'at::Tensor cos_add(at::Tensor x, at::Tensor y);' + cpp_source = 'torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);' module = torch.utils.cpp_extension.load_inline( name='inline_jit_extension_cuda', @@ -258,7 +258,7 @@ class TestCppExtension(common.TestCase): def test_lenient_flag_handling_in_jit_extensions(self): cpp_source = ''' - at::Tensor tanh_add(at::Tensor x, at::Tensor y) { + torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) { return x.tanh() + y.tanh(); } ''' @@ -303,8 +303,8 @@ class TestCppExtension(common.TestCase): } } - at::Tensor half_test(at::Tensor input) { - auto output = at::empty(1, input.options().dtype(at::kFloat)); + torch::Tensor half_test(torch::Tensor input) { + auto output = torch::empty(1, input.options().dtype(torch::kFloat)); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "half_test", [&] { half_test_kernel<<<1, 1>>>( input.data(), @@ -316,7 +316,7 @@ class TestCppExtension(common.TestCase): module = torch.utils.cpp_extension.load_inline( name='half_test_extension', - cpp_sources='at::Tensor half_test(at::Tensor input);', + cpp_sources='torch::Tensor half_test(torch::Tensor input);', cuda_sources=cuda_source, functions=['half_test'], verbose=True) diff --git a/torch/csrc/api/include/torch/all.h b/torch/csrc/api/include/torch/all.h index dcd2367873de..bdce7585682f 100644 --- a/torch/csrc/api/include/torch/all.h +++ b/torch/csrc/api/include/torch/all.h @@ -6,5 +6,5 @@ #include #include #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/data/dataloader.h b/torch/csrc/api/include/torch/data/dataloader.h index 62bcf382ca1a..95a588ec7027 100644 --- a/torch/csrc/api/include/torch/data/dataloader.h +++ b/torch/csrc/api/include/torch/data/dataloader.h @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/dataloader_options.h b/torch/csrc/api/include/torch/data/dataloader_options.h index 305ddd040e8b..e16e2a59bbef 100644 --- a/torch/csrc/api/include/torch/data/dataloader_options.h +++ b/torch/csrc/api/include/torch/data/dataloader_options.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/datasets/base.h b/torch/csrc/api/include/torch/data/datasets/base.h index b6a486dd705b..e31618ffd49c 100644 --- a/torch/csrc/api/include/torch/data/datasets/base.h +++ b/torch/csrc/api/include/torch/data/datasets/base.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/data/datasets/map.h b/torch/csrc/api/include/torch/data/datasets/map.h index 07d0f4400a39..0429ced62a03 100644 --- a/torch/csrc/api/include/torch/data/datasets/map.h +++ b/torch/csrc/api/include/torch/data/datasets/map.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/data/datasets/mnist.h b/torch/csrc/api/include/torch/data/datasets/mnist.h index ddca9f463d88..de8c410415d1 100644 --- a/torch/csrc/api/include/torch/data/datasets/mnist.h +++ b/torch/csrc/api/include/torch/data/datasets/mnist.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/datasets/tensor.h b/torch/csrc/api/include/torch/data/datasets/tensor.h index 8b83ef481e17..3059dfcf8108 100644 --- a/torch/csrc/api/include/torch/data/datasets/tensor.h +++ b/torch/csrc/api/include/torch/data/datasets/tensor.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/detail/data_shuttle.h b/torch/csrc/api/include/torch/data/detail/data_shuttle.h index 37a4b3657ff6..4f087a01f433 100644 --- a/torch/csrc/api/include/torch/data/detail/data_shuttle.h +++ b/torch/csrc/api/include/torch/data/detail/data_shuttle.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/detail/queue.h b/torch/csrc/api/include/torch/data/detail/queue.h index 9918b7b4fb54..332914bb1405 100644 --- a/torch/csrc/api/include/torch/data/detail/queue.h +++ b/torch/csrc/api/include/torch/data/detail/queue.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include diff --git a/torch/csrc/api/include/torch/data/detail/sequencers.h b/torch/csrc/api/include/torch/data/detail/sequencers.h index 6da98a972d0c..f8c26f7acb1a 100644 --- a/torch/csrc/api/include/torch/data/detail/sequencers.h +++ b/torch/csrc/api/include/torch/data/detail/sequencers.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/example.h b/torch/csrc/api/include/torch/data/example.h index dfd1749baf07..b43ef2ca1955 100644 --- a/torch/csrc/api/include/torch/data/example.h +++ b/torch/csrc/api/include/torch/data/example.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace torch { namespace data { diff --git a/torch/csrc/api/include/torch/data/iterator.h b/torch/csrc/api/include/torch/data/iterator.h index 7196c89df40d..bf5b7429bf48 100644 --- a/torch/csrc/api/include/torch/data/iterator.h +++ b/torch/csrc/api/include/torch/data/iterator.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/data/samplers/base.h b/torch/csrc/api/include/torch/data/samplers/base.h index c74a2acf0d58..2c7d6b4c53de 100644 --- a/torch/csrc/api/include/torch/data/samplers/base.h +++ b/torch/csrc/api/include/torch/data/samplers/base.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/samplers/random.h b/torch/csrc/api/include/torch/data/samplers/random.h index 97f47f980fa6..cb0de8890e70 100644 --- a/torch/csrc/api/include/torch/data/samplers/random.h +++ b/torch/csrc/api/include/torch/data/samplers/random.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/samplers/sequential.h b/torch/csrc/api/include/torch/data/samplers/sequential.h index e870b39589f8..21dc012d2252 100644 --- a/torch/csrc/api/include/torch/data/samplers/sequential.h +++ b/torch/csrc/api/include/torch/data/samplers/sequential.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/samplers/stream.h b/torch/csrc/api/include/torch/data/samplers/stream.h index de3d5fa7ad09..68148a3f9362 100644 --- a/torch/csrc/api/include/torch/data/samplers/stream.h +++ b/torch/csrc/api/include/torch/data/samplers/stream.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/data/transforms/base.h b/torch/csrc/api/include/torch/data/transforms/base.h index 19d21b7875fb..0bc1f2ea7b14 100644 --- a/torch/csrc/api/include/torch/data/transforms/base.h +++ b/torch/csrc/api/include/torch/data/transforms/base.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/transforms/stack.h b/torch/csrc/api/include/torch/data/transforms/stack.h index 83beb4ee4594..4be1bd920b71 100644 --- a/torch/csrc/api/include/torch/data/transforms/stack.h +++ b/torch/csrc/api/include/torch/data/transforms/stack.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/data/transforms/tensor.h b/torch/csrc/api/include/torch/data/transforms/tensor.h index 8c053ad9e0e3..c1fed2011636 100644 --- a/torch/csrc/api/include/torch/data/transforms/tensor.h +++ b/torch/csrc/api/include/torch/data/transforms/tensor.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/detail/static.h b/torch/csrc/api/include/torch/detail/static.h index b5a3e542aaaa..c85fc7fff4b4 100644 --- a/torch/csrc/api/include/torch/detail/static.h +++ b/torch/csrc/api/include/torch/detail/static.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/cloneable.h b/torch/csrc/api/include/torch/nn/cloneable.h index b4840fee8f75..86b3308fe9d7 100644 --- a/torch/csrc/api/include/torch/nn/cloneable.h +++ b/torch/csrc/api/include/torch/nn/cloneable.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/cursor.h b/torch/csrc/api/include/torch/nn/cursor.h index 072c4e20a9ab..0e3989e80c45 100644 --- a/torch/csrc/api/include/torch/nn/cursor.h +++ b/torch/csrc/api/include/torch/nn/cursor.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/init.h b/torch/csrc/api/include/torch/nn/init.h index 0fd68df5bbd0..9bc7b8fe8a25 100644 --- a/torch/csrc/api/include/torch/nn/init.h +++ b/torch/csrc/api/include/torch/nn/init.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace torch { namespace nn { diff --git a/torch/csrc/api/include/torch/nn/module.h b/torch/csrc/api/include/torch/nn/module.h index 9fe9eaf19bbf..49b8fe300cdc 100644 --- a/torch/csrc/api/include/torch/nn/module.h +++ b/torch/csrc/api/include/torch/nn/module.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/nn/modules/any.h b/torch/csrc/api/include/torch/nn/modules/any.h index 4983ad711a02..769b662c9ef4 100644 --- a/torch/csrc/api/include/torch/nn/modules/any.h +++ b/torch/csrc/api/include/torch/nn/modules/any.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/batchnorm.h b/torch/csrc/api/include/torch/nn/modules/batchnorm.h index ff5cebc7ed6b..c0ee0a316ef1 100644 --- a/torch/csrc/api/include/torch/nn/modules/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/modules/batchnorm.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/nn/modules/conv.h b/torch/csrc/api/include/torch/nn/modules/conv.h index c6e6a2392dbe..d558ba454e64 100644 --- a/torch/csrc/api/include/torch/nn/modules/conv.h +++ b/torch/csrc/api/include/torch/nn/modules/conv.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/dropout.h b/torch/csrc/api/include/torch/nn/modules/dropout.h index 48b89642864e..06468493a121 100644 --- a/torch/csrc/api/include/torch/nn/modules/dropout.h +++ b/torch/csrc/api/include/torch/nn/modules/dropout.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/embedding.h b/torch/csrc/api/include/torch/nn/modules/embedding.h index c2a701eb3ede..c1293730842d 100644 --- a/torch/csrc/api/include/torch/nn/modules/embedding.h +++ b/torch/csrc/api/include/torch/nn/modules/embedding.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/functional.h b/torch/csrc/api/include/torch/nn/modules/functional.h index 98f72963686f..962bee17db7a 100644 --- a/torch/csrc/api/include/torch/nn/modules/functional.h +++ b/torch/csrc/api/include/torch/nn/modules/functional.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/linear.h b/torch/csrc/api/include/torch/nn/modules/linear.h index 3e8044f57158..99170f025b7a 100644 --- a/torch/csrc/api/include/torch/nn/modules/linear.h +++ b/torch/csrc/api/include/torch/nn/modules/linear.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/rnn.h b/torch/csrc/api/include/torch/nn/modules/rnn.h index ba87afbd0111..96496976068e 100644 --- a/torch/csrc/api/include/torch/nn/modules/rnn.h +++ b/torch/csrc/api/include/torch/nn/modules/rnn.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/modules/sequential.h b/torch/csrc/api/include/torch/nn/modules/sequential.h index a5e3690f7c31..779e32ad2514 100644 --- a/torch/csrc/api/include/torch/nn/modules/sequential.h +++ b/torch/csrc/api/include/torch/nn/modules/sequential.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/nn/parallel/data_parallel.h b/torch/csrc/api/include/torch/nn/parallel/data_parallel.h index 01b7dfdee3fb..adbab17b2288 100644 --- a/torch/csrc/api/include/torch/nn/parallel/data_parallel.h +++ b/torch/csrc/api/include/torch/nn/parallel/data_parallel.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/nn/pimpl.h b/torch/csrc/api/include/torch/nn/pimpl.h index 41a172f895f6..778ee5e68004 100644 --- a/torch/csrc/api/include/torch/nn/pimpl.h +++ b/torch/csrc/api/include/torch/nn/pimpl.h @@ -2,7 +2,7 @@ #include #include -#include +#include #include diff --git a/torch/csrc/api/include/torch/optim/adagrad.h b/torch/csrc/api/include/torch/optim/adagrad.h index ebfaf9daf41c..87bc29b137a5 100644 --- a/torch/csrc/api/include/torch/optim/adagrad.h +++ b/torch/csrc/api/include/torch/optim/adagrad.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/optim/rmsprop.h b/torch/csrc/api/include/torch/optim/rmsprop.h index 6643d045c258..ca2f45bcb8b7 100644 --- a/torch/csrc/api/include/torch/optim/rmsprop.h +++ b/torch/csrc/api/include/torch/optim/rmsprop.h @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/optim/serialize.h b/torch/csrc/api/include/torch/optim/serialize.h index 1c85fa74e006..0aec75c610dd 100644 --- a/torch/csrc/api/include/torch/optim/serialize.h +++ b/torch/csrc/api/include/torch/optim/serialize.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/optim/sgd.h b/torch/csrc/api/include/torch/optim/sgd.h index e5b8a4fc9b90..7988586b02cd 100644 --- a/torch/csrc/api/include/torch/optim/sgd.h +++ b/torch/csrc/api/include/torch/optim/sgd.h @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/python.h b/torch/csrc/api/include/torch/python.h index e35db625b40b..892f1ad7451d 100644 --- a/torch/csrc/api/include/torch/python.h +++ b/torch/csrc/api/include/torch/python.h @@ -1,11 +1,11 @@ #pragma once #include -#include +#include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/include/torch/tensor.h b/torch/csrc/api/include/torch/types.h similarity index 95% rename from torch/csrc/api/include/torch/tensor.h rename to torch/csrc/api/include/torch/types.h index 89e6ddde8243..185989619b6c 100644 --- a/torch/csrc/api/include/torch/tensor.h +++ b/torch/csrc/api/include/torch/types.h @@ -13,9 +13,6 @@ using namespace at; // NOLINT using c10::optional; using c10::nullopt; -using c10::optional; -using c10::nullopt; - using Dtype = at::ScalarType; /// Fixed width dtypes. diff --git a/torch/csrc/api/src/data/datasets/mnist.cpp b/torch/csrc/api/src/data/datasets/mnist.cpp index d2c1c5b27999..0ef3fbcb2d1c 100644 --- a/torch/csrc/api/src/data/datasets/mnist.cpp +++ b/torch/csrc/api/src/data/datasets/mnist.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include diff --git a/torch/csrc/api/src/data/samplers/random.cpp b/torch/csrc/api/src/data/samplers/random.cpp index 5cc16d987cbf..4ea975b035be 100644 --- a/torch/csrc/api/src/data/samplers/random.cpp +++ b/torch/csrc/api/src/data/samplers/random.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/data/samplers/sequential.cpp b/torch/csrc/api/src/data/samplers/sequential.cpp index 83260bf98df4..3072346115c0 100644 --- a/torch/csrc/api/src/data/samplers/sequential.cpp +++ b/torch/csrc/api/src/data/samplers/sequential.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/data/samplers/stream.cpp b/torch/csrc/api/src/data/samplers/stream.cpp index 5e41fb5d0ad6..2ac175541384 100644 --- a/torch/csrc/api/src/data/samplers/stream.cpp +++ b/torch/csrc/api/src/data/samplers/stream.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include diff --git a/torch/csrc/api/src/nn/cursor.cpp b/torch/csrc/api/src/nn/cursor.cpp index 4cf65145ed97..c3fe075456fe 100644 --- a/torch/csrc/api/src/nn/cursor.cpp +++ b/torch/csrc/api/src/nn/cursor.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/nn/init.cpp b/torch/csrc/api/src/nn/init.cpp index f6f977fe4104..a2f49bb98c35 100644 --- a/torch/csrc/api/src/nn/init.cpp +++ b/torch/csrc/api/src/nn/init.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/api/src/nn/modules/batchnorm.cpp b/torch/csrc/api/src/nn/modules/batchnorm.cpp index 4896dcfb6a98..a8aa886f4a1d 100644 --- a/torch/csrc/api/src/nn/modules/batchnorm.cpp +++ b/torch/csrc/api/src/nn/modules/batchnorm.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include diff --git a/torch/csrc/api/src/nn/modules/conv.cpp b/torch/csrc/api/src/nn/modules/conv.cpp index e4c2ff35050a..14c804e269c9 100644 --- a/torch/csrc/api/src/nn/modules/conv.cpp +++ b/torch/csrc/api/src/nn/modules/conv.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/nn/modules/dropout.cpp b/torch/csrc/api/src/nn/modules/dropout.cpp index f980a4c7e058..b7887f4f04a8 100644 --- a/torch/csrc/api/src/nn/modules/dropout.cpp +++ b/torch/csrc/api/src/nn/modules/dropout.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include diff --git a/torch/csrc/api/src/nn/modules/embedding.cpp b/torch/csrc/api/src/nn/modules/embedding.cpp index 3911da67cbe4..d09afe057b84 100644 --- a/torch/csrc/api/src/nn/modules/embedding.cpp +++ b/torch/csrc/api/src/nn/modules/embedding.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/api/src/nn/modules/functional.cpp b/torch/csrc/api/src/nn/modules/functional.cpp index 1820e8f4df23..e35a2e60d2af 100644 --- a/torch/csrc/api/src/nn/modules/functional.cpp +++ b/torch/csrc/api/src/nn/modules/functional.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/api/src/nn/modules/linear.cpp b/torch/csrc/api/src/nn/modules/linear.cpp index d95d6b3e90bf..311558311a2d 100644 --- a/torch/csrc/api/src/nn/modules/linear.cpp +++ b/torch/csrc/api/src/nn/modules/linear.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/api/src/nn/modules/rnn.cpp b/torch/csrc/api/src/nn/modules/rnn.cpp index c828c49f6866..a8b49eae2aab 100644 --- a/torch/csrc/api/src/nn/modules/rnn.cpp +++ b/torch/csrc/api/src/nn/modules/rnn.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/optim/optimizer.cpp b/torch/csrc/api/src/optim/optimizer.cpp index 297a1341b87f..fe89c75690ac 100644 --- a/torch/csrc/api/src/optim/optimizer.cpp +++ b/torch/csrc/api/src/optim/optimizer.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/optim/serialize.cpp b/torch/csrc/api/src/optim/serialize.cpp index 24f9096c6ac3..808b604a2484 100644 --- a/torch/csrc/api/src/optim/serialize.cpp +++ b/torch/csrc/api/src/optim/serialize.cpp @@ -1,7 +1,7 @@ #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/optim/sgd.cpp b/torch/csrc/api/src/optim/sgd.cpp index 8d03872b0d18..0a927b78a13e 100644 --- a/torch/csrc/api/src/optim/sgd.cpp +++ b/torch/csrc/api/src/optim/sgd.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include diff --git a/torch/csrc/api/src/serialize/input-archive.cpp b/torch/csrc/api/src/serialize/input-archive.cpp index 9d7b83c8168c..1e55182383f8 100644 --- a/torch/csrc/api/src/serialize/input-archive.cpp +++ b/torch/csrc/api/src/serialize/input-archive.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/api/src/serialize/output-archive.cpp b/torch/csrc/api/src/serialize/output-archive.cpp index 7b516265c870..5dd8cd644eb2 100644 --- a/torch/csrc/api/src/serialize/output-archive.cpp +++ b/torch/csrc/api/src/serialize/output-archive.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include diff --git a/torch/csrc/api/src/serialize/tensor.cpp b/torch/csrc/api/src/serialize/tensor.cpp index 9fc92cf779e0..7b67d7f07261 100644 --- a/torch/csrc/api/src/serialize/tensor.cpp +++ b/torch/csrc/api/src/serialize/tensor.cpp @@ -1,4 +1,4 @@ -#include +#include #include namespace torch { diff --git a/torch/csrc/jit/type.h b/torch/csrc/jit/type.h index a236e0aa985f..0d6f728690d9 100644 --- a/torch/csrc/jit/type.h +++ b/torch/csrc/jit/type.h @@ -423,7 +423,6 @@ private: template struct SingleElementType : public Type { static const TypeKind Kind = K; - static constexpr bool is_singleton = true; TypePtr getElementType() const { return elem; } @@ -488,9 +487,6 @@ struct FutureType; using FutureTypePtr = std::shared_ptr; struct TORCH_API FutureType : public Type { - // It's not exactly a singleton, but there should be exactly once instance of - // Future[T] for every T - static constexpr bool is_singleton = true; friend struct Type; template static FutureTypePtr create(TypePtr elem) { diff --git a/torch/script.h b/torch/script.h index 982d3c9170c9..8c8cc5f563f6 100644 --- a/torch/script.h +++ b/torch/script.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index 28f0d6c08902..0b715d1010ad 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -633,13 +633,13 @@ def load_inline(name, as its docstring. The sources in ``cuda_sources`` are concatenated into a separate ``.cu`` - file and prepended with ``ATen/ATen.h``, ``cuda.h`` and ``cuda_runtime.h`` - includes. The ``.cpp`` and ``.cu`` files are compiled separately, but - ultimately linked into a single library. Note that no bindings are - generated for functions in ``cuda_sources`` per se. To bind to a CUDA - kernel, you must create a C++ function that calls it, and either declare or - define this C++ function in one of the ``cpp_sources`` (and include its - name in ``functions``). + file and prepended with ``torch/types.h``, ``cuda.h`` and + ``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled + separately, but ultimately linked into a single library. Note that no + bindings are generated for functions in ``cuda_sources`` per se. To bind + to a CUDA kernel, you must create a C++ function that calls it, and either + declare or define this C++ function in one of the ``cpp_sources`` (and + include its name in ``functions``). See :func:`load` for a description of arguments omitted below. @@ -702,7 +702,7 @@ def load_inline(name, sources = [cpp_source_path] if cuda_sources: - cuda_sources.insert(0, '#include ') + cuda_sources.insert(0, '#include ') cuda_sources.insert(1, '#include ') cuda_sources.insert(2, '#include ')