mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "[12/N] Apply clang-tidy and fix warnings in headers of torch/csrc (#116486)"
This reverts commit 5aa258eb09d5ecd62aea4d2bd02bbfa5eda0d554. Reverted https://github.com/pytorch/pytorch/pull/116486 on behalf of https://github.com/izaitsevfb due to Reverting, as it depends on https://github.com/pytorch/pytorch/pull/116353, which has to be reverted ([comment](https://github.com/pytorch/pytorch/pull/116486#issuecomment-1876042948))
This commit is contained in:
@ -145,7 +145,7 @@ extern PyObject *THPException_FatalError, *THPException_LinAlgError,
|
||||
// Throwing this exception means that the python error flags have been already
|
||||
// set and control should be immediately returned to the interpreter.
|
||||
struct python_error : public std::exception {
|
||||
python_error() {}
|
||||
python_error() : type(nullptr), value(nullptr), traceback(nullptr) {}
|
||||
|
||||
python_error(const python_error& other)
|
||||
: type(other.type),
|
||||
@ -244,9 +244,9 @@ struct python_error : public std::exception {
|
||||
PyErr_Restore(type, value, traceback);
|
||||
}
|
||||
|
||||
PyObject* type{nullptr};
|
||||
PyObject* value{nullptr};
|
||||
PyObject* traceback{nullptr};
|
||||
PyObject* type;
|
||||
PyObject* value;
|
||||
PyObject* traceback;
|
||||
|
||||
// Message to return to the user when 'what()' is invoked.
|
||||
std::string message;
|
||||
|
@ -12,7 +12,10 @@
|
||||
#include <ATen/ATen.h>
|
||||
#include <torch/csrc/autograd/generated/Functions.h>
|
||||
|
||||
namespace torch::autograd::generated::details {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace generated {
|
||||
namespace details {
|
||||
|
||||
extern const char* kCudnnDoubleBackwardMsg;
|
||||
|
||||
@ -1098,4 +1101,7 @@ mkldnn_rnn_layer_differentiable_backward(
|
||||
|
||||
Tensor values_backward(const Tensor& grad, const Tensor& self);
|
||||
|
||||
} // namespace torch::autograd::generated::details
|
||||
} // namespace details
|
||||
} // namespace generated
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -3,8 +3,10 @@
|
||||
#include <c10/core/InferenceMode.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using InferenceMode = c10::InferenceMode;
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -17,9 +17,14 @@
|
||||
#include <torch/csrc/autograd/jit_decomp_interface.h>
|
||||
#include <torch/csrc/utils/variadic.h>
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@ -112,8 +117,8 @@ inline void rebase_history(Variable& var, std::shared_ptr<Node> grad_fn) {
|
||||
}
|
||||
|
||||
inline void rebase_history(
|
||||
const std::vector<Variable>& vars,
|
||||
const std::shared_ptr<Node>& grad_fn) {
|
||||
std::vector<Variable>&& vars,
|
||||
std::shared_ptr<Node> grad_fn) {
|
||||
if (grad_fn) {
|
||||
for (auto& var : vars) {
|
||||
if (var.defined()) {
|
||||
@ -132,7 +137,6 @@ inline void increment_version(const at::Tensor& t) {
|
||||
|
||||
struct Flatten : IterArgs<Flatten> {
|
||||
Flatten(variable_list& out) : out(out) {}
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
variable_list& out;
|
||||
void operator()(const at::Tensor& x) {
|
||||
out.emplace_back(x);
|
||||
|
@ -4,7 +4,8 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
// forward declaration of Node from function.h
|
||||
struct Node;
|
||||
@ -68,4 +69,5 @@ struct TORCH_API AnomalyMetadata {
|
||||
std::shared_ptr<Node> parent_;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -2,7 +2,8 @@
|
||||
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
/// Computes the sum of gradients of given tensors with respect to graph leaves.
|
||||
///
|
||||
@ -101,4 +102,5 @@ TORCH_API uint64_t enter_dual_level();
|
||||
TORCH_API void exit_dual_level(uint64_t level);
|
||||
|
||||
} // namespace forward_ad
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -2,7 +2,8 @@
|
||||
|
||||
#include <torch/library.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
// Default DispatchKey::Autograd fallback for built-in operators.
|
||||
// Can be registered for custom operators.
|
||||
@ -29,4 +30,5 @@ enum class AutogradFallbackMode {
|
||||
TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode);
|
||||
TORCH_API AutogradFallbackMode getAutogradFallbackMode();
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -3,7 +3,8 @@
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using hooks_list =
|
||||
std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
|
||||
@ -26,4 +27,5 @@ struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
|
||||
size_t value_idx_;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -8,7 +8,8 @@
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using optional_variable_list = std::vector<c10::optional<Variable>>;
|
||||
using _jvp_fn_t = std::function<variable_list(variable_list, variable_list)>;
|
||||
@ -96,7 +97,7 @@ struct TORCH_API Function {
|
||||
// the parameter X.
|
||||
template <typename X = T, typename... Args>
|
||||
static auto apply(Args&&... args)
|
||||
-> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>>;
|
||||
-> std::enable_if_t<std::is_same<X, T>::value, forward_t<X, Args...>>;
|
||||
};
|
||||
|
||||
/// Context to save information during `forward` that can be accessed in
|
||||
@ -227,8 +228,8 @@ inline void extract_vars(
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
|
||||
std::vector<c10::optional<Variable>>& output_list) {
|
||||
typename std::enable_if<std::is_same<T, variable_list>::value, T>::type
|
||||
to_output_type(std::vector<c10::optional<Variable>>& output_list) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
variable_list result;
|
||||
std::transform(
|
||||
@ -240,8 +241,8 @@ std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::enable_if_t<std::is_same_v<T, Variable>, T> to_output_type(
|
||||
std::vector<c10::optional<Variable>>& output_list) {
|
||||
typename std::enable_if<std::is_same<T, Variable>::value, T>::type
|
||||
to_output_type(std::vector<c10::optional<Variable>>& output_list) {
|
||||
return *output_list[0];
|
||||
}
|
||||
|
||||
@ -263,7 +264,7 @@ inline std::vector<c10::optional<Variable>> to_optional(variable_list& output) {
|
||||
template <class T>
|
||||
template <typename X, typename... Args>
|
||||
auto Function<T>::apply(Args&&... args)
|
||||
-> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>> {
|
||||
-> std::enable_if_t<std::is_same<X, T>::value, forward_t<X, Args...>> {
|
||||
const auto& functorch_tls = at::functorch::functorchTLSAccessor();
|
||||
if (functorch_tls) {
|
||||
// Function support for functorch is handled in Python.
|
||||
@ -433,4 +434,5 @@ void CppNode<T>::set_ctx_grad_fn(const std::shared_ptr<Node>& node) {
|
||||
ctx_.grad_fn_ = node;
|
||||
}
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -6,7 +6,8 @@
|
||||
|
||||
#include <c10/util/hash.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct Node;
|
||||
|
||||
@ -37,7 +38,8 @@ struct Edge {
|
||||
/// The identifier of a particular input to the function.
|
||||
uint32_t input_nr;
|
||||
};
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
||||
// The idiomatic way of enabling use of a custom type as the key of hash
|
||||
// containers in C++11. This method removes the requirement of having to pass
|
||||
|
@ -27,11 +27,14 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
struct ReadyQueue;
|
||||
}
|
||||
} // namespace torch
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
// Maximum reentrant backward depth before switching to a new thread
|
||||
// This limit is based on the TSAN's deadlock detector, where it will
|
||||
@ -288,4 +291,5 @@ struct TORCH_API Engine {
|
||||
using EngineStub = Engine& (*)();
|
||||
TORCH_API void set_default_engine_stub(EngineStub stub);
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -3,7 +3,8 @@
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
// [ Using ForwardGrad ]
|
||||
// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner
|
||||
@ -207,4 +208,5 @@ struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
|
||||
mutable std::mutex mutex_;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -24,7 +24,8 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct Edge;
|
||||
struct FunctionPostHook;
|
||||
@ -756,4 +757,5 @@ struct TypeAndSize {
|
||||
at::TensorOptions options;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -12,7 +12,8 @@ class SwapSavedVariables;
|
||||
|
||||
// A hook that's called on gradients
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using Variable = at::Tensor;
|
||||
using variable_list = std::vector<Variable>;
|
||||
@ -61,4 +62,5 @@ struct TORCH_API PostAccumulateGradHook {
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -3,9 +3,11 @@
|
||||
#include <ATen/core/grad_mode.h>
|
||||
#include <torch/csrc/Export.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using GradMode = at::GradMode;
|
||||
using AutoGradMode = at::AutoGradMode;
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -6,7 +6,8 @@
|
||||
#include <torch/csrc/autograd/utils/warnings.h>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using edge_list = std::vector<Edge>;
|
||||
struct ReadyQueue;
|
||||
@ -238,4 +239,5 @@ TORCH_API std::vector<Node*> get_current_graph_task_execution_order();
|
||||
TORCH_API int get_current_graph_task_id();
|
||||
void add_node_to_current_graph_task_exec_info(Node* fn);
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -13,7 +13,8 @@
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/csrc/autograd/variable.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct InputBuffer {
|
||||
explicit InputBuffer(size_t size) : buffer(size) {}
|
||||
@ -43,4 +44,5 @@ struct InputBuffer {
|
||||
std::vector<Variable> buffer;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -22,7 +22,8 @@
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using SymIntSmallVec = c10::SmallVector<c10::SymInt, c10::kDimVectorStaticSize>;
|
||||
using MetadataShape = std::variant<SymIntSmallVec, at::Tensor>;
|
||||
@ -108,4 +109,5 @@ struct TORCH_API InputMetadata {
|
||||
bool is_nested_ = false;
|
||||
bool was_default_constructed_ = true;
|
||||
};
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -27,7 +27,9 @@
|
||||
// For extra context, see VariableHooksInterface.h, where a similar technique
|
||||
// is used
|
||||
|
||||
namespace torch::autograd::impl {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace impl {
|
||||
|
||||
struct TORCH_API JitDecompInterface {
|
||||
virtual ~JitDecompInterface() = default;
|
||||
@ -47,4 +49,6 @@ struct TORCH_API JitDecompRegisterer {
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace torch::autograd::impl
|
||||
} // namespace impl
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -9,15 +9,16 @@
|
||||
#include <torch/csrc/profiler/util.h>
|
||||
|
||||
namespace torch {
|
||||
|
||||
namespace profiler::impl {
|
||||
namespace profiler {
|
||||
namespace impl {
|
||||
struct Result;
|
||||
namespace kineto {
|
||||
struct ActivityTraceWrapper;
|
||||
} // namespace kineto
|
||||
} // namespace profiler::impl
|
||||
|
||||
namespace autograd::profiler {
|
||||
} // namespace impl
|
||||
} // namespace profiler
|
||||
namespace autograd {
|
||||
namespace profiler {
|
||||
using experimental_event_t = std::shared_ptr<torch::profiler::impl::Result>;
|
||||
using extra_meta_t = std::unordered_map<std::string, std::string>;
|
||||
|
||||
@ -176,13 +177,16 @@ TORCH_API void prepareProfiler(
|
||||
const torch::profiler::impl::ProfilerConfig& config,
|
||||
const std::set<torch::profiler::impl::ActivityType>& activities);
|
||||
|
||||
} // namespace autograd::profiler
|
||||
} // namespace profiler
|
||||
} // namespace autograd
|
||||
|
||||
namespace profiler::impl {
|
||||
namespace profiler {
|
||||
namespace impl {
|
||||
|
||||
// Experimental.
|
||||
TORCH_API void _reportVulkanEventToProfiler(vulkan_id_t id);
|
||||
|
||||
} // namespace profiler::impl
|
||||
} // namespace impl
|
||||
} // namespace profiler
|
||||
|
||||
} // namespace torch
|
||||
|
@ -15,7 +15,8 @@
|
||||
#include <torch/csrc/profiler/stubs/base.h>
|
||||
#include <torch/csrc/profiler/util.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct Node;
|
||||
|
||||
@ -412,4 +413,5 @@ struct TORCH_API TLSLegacyProfilerGuard {
|
||||
};
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -1,7 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd::profiler::python_tracer {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace profiler {
|
||||
namespace python_tracer {
|
||||
|
||||
void init();
|
||||
|
||||
}
|
||||
} // namespace profiler
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -4,11 +4,13 @@
|
||||
PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
|
||||
void THPAutograd_initFunctions();
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
PyMethodDef* python_functions();
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
||||
#include <torch/csrc/autograd/python_engine.h>
|
||||
#include <torch/csrc/autograd/python_function.h>
|
||||
|
@ -8,7 +8,8 @@
|
||||
#include <torch/csrc/autograd/function.h>
|
||||
#include <torch/csrc/utils/object_ptr.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct THPCppFunction {
|
||||
PyObject_HEAD std::shared_ptr<Node> cdata;
|
||||
@ -102,4 +103,5 @@ PyObject* functionToPyObject(const std::shared_ptr<Node>& cdata);
|
||||
|
||||
bool THPCppFunction_Check(PyObject* obj);
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -7,7 +7,9 @@
|
||||
|
||||
bool THPEngine_initModule(PyObject* module);
|
||||
|
||||
namespace torch::autograd::python {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace python {
|
||||
|
||||
struct PythonEngine : public Engine {
|
||||
static Engine& get_python_engine();
|
||||
@ -41,4 +43,6 @@ struct PythonEngine : public Engine {
|
||||
PythonEngine();
|
||||
};
|
||||
|
||||
} // namespace torch::autograd::python
|
||||
} // namespace python
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include <torch/csrc/python_headers.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
void initEnumTag(PyObject* module);
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -1,7 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
void initFFTFunctions(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -16,11 +16,13 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::jit {
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
struct Graph;
|
||||
}
|
||||
|
||||
namespace torch::autograd {
|
||||
} // namespace torch
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
// A Function which is implemented by a Python object (i.e., a THPFunction).
|
||||
// Calls to 'apply' are forwarded to the Python method implementation.
|
||||
@ -69,7 +71,8 @@ inline bool ensure_tuple(THPObjectPtr& obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
struct THPFunction {
|
||||
|
@ -8,7 +8,8 @@ namespace torch::dynamo::autograd {
|
||||
class SwapSavedVariables;
|
||||
} // namespace torch::dynamo::autograd
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct PyFunctionTensorPreHook : public FunctionPreHook {
|
||||
PyFunctionTensorPreHook(PyObject* dict, size_t value_idx);
|
||||
@ -52,4 +53,5 @@ struct PyFunctionTensorPostAccGradHooks : public PostAccumulateGradHook {
|
||||
PyObject* dict;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -5,8 +5,10 @@
|
||||
|
||||
#include <torch/csrc/python_headers.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
void init_legacy_variable(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -1,7 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
void initLinalgFunctions(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -1,9 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
PyMethodDef* get_nested_functions_manual();
|
||||
|
||||
void initNestedFunctions(PyObject* module);
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -1,7 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
void initNNFunctions(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -10,7 +10,8 @@
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct PySavedVariableHooks : public SavedVariableHooks {
|
||||
PySavedVariableHooks(py::function& pack_hook, py::function& unpack_hook);
|
||||
@ -30,4 +31,5 @@ struct PyDefaultSavedVariableHooks {
|
||||
static std::unique_ptr<SavedVariableHooks> get_hooks();
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -1,7 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
void initSparseFunctions(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -1,7 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
void initSpecialFunctions(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace torch
|
||||
|
@ -2,7 +2,8 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
extern PyObject* THPVariableFunctionsModule;
|
||||
|
||||
@ -24,4 +25,5 @@ inline PyObject* TypeError_to_NotImplemented_(
|
||||
|
||||
void initTorchFunctions();
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -6,7 +6,8 @@
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
#include <torch/csrc/utils/python_symnode.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct UnpackedSlice {
|
||||
c10::SymInt start;
|
||||
@ -99,4 +100,5 @@ Variable valueToTensor(
|
||||
PyObject* value,
|
||||
const at::Device& device);
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -3,7 +3,9 @@
|
||||
#include <c10/util/Optional.h>
|
||||
#include <torch/custom_class.h>
|
||||
|
||||
namespace torch::autograd::profiler {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace profiler {
|
||||
|
||||
struct PythonRecordFunction : public torch::CustomClassHolder {
|
||||
at::RecordFunction record;
|
||||
@ -24,4 +26,6 @@ TORCH_API c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut_new
|
||||
const c10::intrusive_ptr<PythonRecordFunction>& record,
|
||||
const c10::intrusive_ptr<c10::ivalue::Future>& fut);
|
||||
|
||||
} // namespace torch::autograd::profiler
|
||||
} // namespace profiler
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -9,7 +9,8 @@
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
using Variable = at::Tensor;
|
||||
struct Node;
|
||||
@ -118,4 +119,5 @@ class TORCH_API SavedVariable {
|
||||
std::unique_ptr<SavedVariableHooks>&& hooks,
|
||||
const Variable& data);
|
||||
};
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -2,7 +2,8 @@
|
||||
|
||||
#include <ATen/core/Tensor.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct TORCH_API SavedVariableHooks {
|
||||
virtual void call_pack_hook(const at::Tensor& tensor) = 0;
|
||||
@ -10,4 +11,5 @@ struct TORCH_API SavedVariableHooks {
|
||||
virtual ~SavedVariableHooks() = default;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -4,7 +4,8 @@
|
||||
#include <torch/csrc/onnx/onnx.h>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct SymbolicContext {
|
||||
jit::Block* block;
|
||||
@ -14,4 +15,5 @@ struct symbolic_unconvertible : public std::runtime_error {
|
||||
using std::runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
@ -21,7 +21,8 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
/// `Variable` is exactly the same as `Tensor` (i.e. we have `using Variable =
|
||||
/// at::Tensor`). This means you can perform all the usual mathematical and
|
||||
@ -32,7 +33,8 @@ namespace torch::autograd {
|
||||
/// is to eliminate the `Variable` class in the near future.
|
||||
using Variable = at::Tensor;
|
||||
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
||||
// The following are all internal APIs and should not be shown in libtorch docs.
|
||||
// Therefore, we wrap the following code with `#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
||||
@ -40,7 +42,8 @@ using Variable = at::Tensor;
|
||||
|
||||
#ifndef DOXYGEN_SHOULD_SKIP_THIS
|
||||
|
||||
namespace torch::autograd {
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
/// Check if this type is supported by the autograd engine.
|
||||
/// If you change this, update the doc at the top of the
|
||||
@ -841,6 +844,7 @@ namespace utils {
|
||||
TORCH_API bool has_same_meta(const Variable& base, const Variable& other);
|
||||
|
||||
} // namespace utils
|
||||
} // namespace torch::autograd
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
|
||||
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
|
||||
|
@ -1,7 +1,12 @@
|
||||
#include <Python.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
|
||||
namespace torch::functorch::impl {
|
||||
namespace torch {
|
||||
namespace functorch {
|
||||
namespace impl {
|
||||
|
||||
void initFuncTorchBindings(PyObject* module);
|
||||
|
||||
}
|
||||
} // namespace functorch
|
||||
} // namespace torch
|
||||
|
@ -11,7 +11,8 @@
|
||||
#include <sstream>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace torch::jit {
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
|
||||
class SourceRangeUnpickler;
|
||||
struct SourceRange;
|
||||
@ -443,7 +444,8 @@ using SourceRangeRecords = std::vector<TaggedRange>;
|
||||
using SourceRangeTagMap =
|
||||
std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
|
||||
|
||||
} // namespace torch::jit
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
|
@ -17,7 +17,8 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
namespace torch::jit {
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
struct Node;
|
||||
struct Value;
|
||||
struct Graph;
|
||||
@ -381,13 +382,13 @@ TORCH_API void ensureUniqueIfOutOfPlaced(
|
||||
|
||||
template <
|
||||
typename T,
|
||||
typename = torch::enable_if_t<
|
||||
(!std::is_convertible_v<torch::decay_t<T>, at::TensorList> &&
|
||||
!std::is_convertible_v<torch::decay_t<T>, c10::List<at::Tensor>> &&
|
||||
!std::is_convertible_v<torch::decay_t<T>, at::Tensor> &&
|
||||
!std::is_convertible_v<
|
||||
torch::decay_t<T>,
|
||||
c10::intrusive_ptr<c10::ivalue::Object>>)>>
|
||||
typename = torch::enable_if_t<(
|
||||
!std::is_convertible<torch::decay_t<T>, at::TensorList>::value &&
|
||||
!std::is_convertible<torch::decay_t<T>, c10::List<at::Tensor>>::value &&
|
||||
!std::is_convertible<torch::decay_t<T>, at::Tensor>::value &&
|
||||
!std::is_convertible<
|
||||
torch::decay_t<T>,
|
||||
c10::intrusive_ptr<c10::ivalue::Object>>::value)>>
|
||||
void addOutput(Node* node, T&&) {
|
||||
AT_ERROR(
|
||||
"Found an unsupported argument type ",
|
||||
@ -409,4 +410,5 @@ TORCH_API autograd::Variable getSizeOf(
|
||||
TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
|
||||
|
||||
} // namespace tracer
|
||||
} // namespace torch::jit
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
|
@ -28,7 +28,8 @@ PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>);
|
||||
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true);
|
||||
|
||||
namespace pybind11::detail {
|
||||
namespace pybind11 {
|
||||
namespace detail {
|
||||
|
||||
// torch.Tensor <-> at::Tensor conversions (without unwrapping)
|
||||
template <>
|
||||
@ -323,9 +324,11 @@ struct type_caster<c10::complex<T>> {
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace pybind11::detail
|
||||
} // namespace detail
|
||||
} // namespace pybind11
|
||||
|
||||
namespace torch::impl {
|
||||
namespace torch {
|
||||
namespace impl {
|
||||
|
||||
// Use this function if you have a C++ object that is used from both C++
|
||||
// and Python contexts, and you need its GIL to be released when you
|
||||
@ -381,4 +384,5 @@ inline void destroy_without_gil(T* ptr) {
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace torch::impl
|
||||
} // namespace impl
|
||||
} // namespace torch
|
||||
|
@ -4,9 +4,11 @@
|
||||
#include <torch/csrc/Export.h>
|
||||
#include <torch/csrc/utils/python_stub.h>
|
||||
|
||||
namespace torch::utils {
|
||||
namespace torch {
|
||||
namespace utils {
|
||||
|
||||
void initializeMemoryFormats();
|
||||
TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat);
|
||||
|
||||
} // namespace torch::utils
|
||||
} // namespace utils
|
||||
} // namespace torch
|
||||
|
@ -3,7 +3,8 @@
|
||||
#include <ATen/core/Tensor.h>
|
||||
#include <torch/csrc/python_headers.h>
|
||||
|
||||
namespace torch::utils {
|
||||
namespace torch {
|
||||
namespace utils {
|
||||
|
||||
PyObject* tensor_to_numpy(const at::Tensor& tensor, bool force = false);
|
||||
at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable = true);
|
||||
@ -22,4 +23,5 @@ at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
|
||||
void validate_numpy_for_dlpack_deleter_bug();
|
||||
bool is_numpy_dlpack_deleter_bugged();
|
||||
|
||||
} // namespace torch::utils
|
||||
} // namespace utils
|
||||
} // namespace torch
|
||||
|
@ -73,13 +73,13 @@ struct MakeIndices<0, Is...> {
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
template <bool value, typename T = void>
|
||||
using enable_if_t = std::enable_if_t<value, T>;
|
||||
using enable_if_t = typename std::enable_if<value, T>::type;
|
||||
|
||||
template <bool value, typename T = void>
|
||||
using disable_if_t = enable_if_t<!value, T>;
|
||||
|
||||
template <typename T>
|
||||
using decay_t = std::decay_t<T>;
|
||||
using decay_t = typename std::decay<T>::type;
|
||||
|
||||
namespace detail {
|
||||
template <bool...>
|
||||
@ -112,7 +112,7 @@ using enable_if_all_of_t = enable_if_t<all_of<values...>::value>;
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
using disable_if_contains_t =
|
||||
enable_if_all_of_t<(!std::is_same_v<T, decay_t<Ts>>)...>;
|
||||
enable_if_all_of_t<(!std::is_same<T, decay_t<Ts>>::value)...>;
|
||||
|
||||
template <typename Function, typename... Ts>
|
||||
void apply(Function function, Ts&&... ts) {
|
||||
|
Reference in New Issue
Block a user