mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[2/N] Fix clang-tidy warnings in torch/csrc/autograd (#133295)
Follows #133180 Pull Request resolved: https://github.com/pytorch/pytorch/pull/133295 Approved by: https://github.com/Skylion007
This commit is contained in:
@ -20,8 +20,8 @@ using torch::autograd::as_view;
|
||||
using torch::autograd::CreationMeta;
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace VariableType {
|
||||
|
||||
namespace autograd::VariableType {
|
||||
|
||||
static std::vector<at::DeprecatedTypeProperties*> allTypesForBackends(
|
||||
at::ArrayRef<at::Backend> backends) {
|
||||
@ -372,8 +372,7 @@ TORCH_LIBRARY_IMPL(aten, Autograd, m) {
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace VariableType
|
||||
} // namespace autograd
|
||||
} // namespace autograd::VariableType
|
||||
|
||||
namespace ADInplaceOrView {
|
||||
#define CREATION_META_DEFINITION \
|
||||
|
@ -29,8 +29,7 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
enum class can_mutate_inplace_result {
|
||||
success,
|
||||
non_default_backward_view,
|
||||
@ -438,5 +437,4 @@ Return run_jit_decomposition_with_args_for_jvp(
|
||||
|
||||
} // namespace impl
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -4,8 +4,7 @@
|
||||
#include <torch/csrc/autograd/function.h>
|
||||
#include <mutex>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
bool AnomalyMode::_enabled = false;
|
||||
bool AnomalyMode::_check_nan = true;
|
||||
@ -75,5 +74,4 @@ void AnomalyMetadata::assign_parent(const std::shared_ptr<Node>& parent_node) {
|
||||
parent_ = parent_node;
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -14,8 +14,7 @@
|
||||
|
||||
#include <c10/util/irange.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
// NB: This code duplicates existing logic at torch/autograd/__init__.py and
|
||||
// torch._C._EngineBase.run_backward in torch/csrc/autograd/python_engine.cpp
|
||||
@ -215,5 +214,4 @@ void exit_dual_level(uint64_t level) {
|
||||
|
||||
} // namespace forward_ad
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include <ATen/ops/zeros.h>
|
||||
#endif
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
using at::Tensor;
|
||||
|
||||
@ -315,5 +314,4 @@ const Variable& AutogradMeta::fw_grad(
|
||||
return direct_fw_grad;
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -17,8 +17,7 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
namespace {
|
||||
|
||||
@ -630,5 +629,4 @@ torch::CppFunction autogradNotImplementedInplaceOrViewFallback() {
|
||||
&autogradNotImplementedInplaceOrViewFallbackImpl>();
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -19,8 +19,7 @@ void check_single_result(
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
CppFunctionTensorPreHook::CppFunctionTensorPreHook(
|
||||
std::shared_ptr<hooks_list> hooks,
|
||||
@ -65,5 +64,4 @@ variable_list CppFunctionSingleTensorPreHook::operator()(
|
||||
return results;
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -97,7 +97,6 @@ static void _process_forward_mode_AD(
|
||||
forward_grads = jvp_user_function(inputs, std::move(input_grads));
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
const auto num_forward_grads = forward_grads.size();
|
||||
// contrary to backward mode, we don't allow returning too many gradients
|
||||
TORCH_CHECK(
|
||||
|
@ -124,7 +124,6 @@ struct TORCH_API AutogradContext {
|
||||
AutogradContext& operator=(const AutogradContext& other) = delete;
|
||||
|
||||
/// Can be used to save non-variable data for `backward`.
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
ska::flat_hash_map<std::string, at::IValue> saved_data;
|
||||
|
||||
/// Saves the list of variables for a future call to `backward`. This
|
||||
@ -248,7 +247,6 @@ struct ExtractVariables : IterArgs<ExtractVariables> {
|
||||
ExtractVariables(std::vector<bool>& is_var, variable_list& list)
|
||||
: is_var_(is_var), list_(list) {}
|
||||
void operator()(const std::optional<at::Tensor>& x) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
if (x.has_value() && x.value().defined()) {
|
||||
is_var_.push_back(true);
|
||||
list_.emplace_back(x.value());
|
||||
@ -283,7 +281,6 @@ inline void extract_vars(
|
||||
template <typename T>
|
||||
std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
|
||||
std::vector<std::optional<Variable>>& output_list) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
variable_list result;
|
||||
std::transform(
|
||||
output_list.begin(),
|
||||
@ -304,7 +301,6 @@ inline std::vector<std::optional<Variable>> to_optional(Variable& output) {
|
||||
}
|
||||
|
||||
inline std::vector<std::optional<Variable>> to_optional(variable_list& output) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
std::vector<std::optional<Variable>> result;
|
||||
std::transform(
|
||||
output.begin(),
|
||||
@ -327,7 +323,6 @@ auto Function<T>::apply(Args&&... args)
|
||||
}
|
||||
|
||||
std::shared_ptr<CppNode<T>> node(new CppNode<T>(), deleteNode);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
variable_list input_vars;
|
||||
|
||||
const size_t num_inputs = sizeof...(Args);
|
||||
@ -336,7 +331,6 @@ auto Function<T>::apply(Args&&... args)
|
||||
// TODO Add tracing here
|
||||
extract_vars(node->is_variable_input_, input_vars, args...);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool is_executable =
|
||||
GradMode::is_enabled() && any_variable_requires_grad(input_vars);
|
||||
auto next_edges =
|
||||
@ -351,7 +345,6 @@ auto Function<T>::apply(Args&&... args)
|
||||
}
|
||||
|
||||
using forward_return_t = forward_t<X, Args...>;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
forward_return_t outputs;
|
||||
{
|
||||
AutoGradMode grad_mode(false);
|
||||
@ -406,7 +399,6 @@ variable_list CppNode<T>::apply(variable_list&& inputs) {
|
||||
at::OptionalDeviceGuard _device_guard;
|
||||
|
||||
auto num_inputs = inputs.size();
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
variable_list backward_inputs;
|
||||
backward_inputs.reserve(num_inputs);
|
||||
for (const auto i : c10::irange(num_inputs)) {
|
||||
@ -449,7 +441,6 @@ variable_list CppNode<T>::apply(variable_list&& inputs) {
|
||||
throw std::runtime_error(msg);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
variable_list results;
|
||||
results.reserve(num_outputs);
|
||||
for (const auto i : c10::irange(num_outputs)) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <torch/csrc/autograd/forward_grad.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
namespace {
|
||||
// See discussion in forward_grad.h for why these are global variables and not
|
||||
@ -76,5 +75,4 @@ const at::Tensor& ForwardGrad::undef_grad() {
|
||||
return singleton_undefined_tensor;
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -17,8 +17,7 @@
|
||||
#include <ATen/ops/zeros.h>
|
||||
#endif
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
// The current evaluating node. This is useful to assign the current node as a
|
||||
// parent of new nodes created during the evaluation of this node in anomaly
|
||||
@ -110,5 +109,4 @@ at::Tensor TypeAndSize::zeros() {
|
||||
return at::zeros_symint(sym_sizes, options);
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -1280,7 +1280,7 @@ PyObject* THPModule_increment_version(
|
||||
}
|
||||
|
||||
// autograd methods on torch._C
|
||||
static PyMethodDef methods[] = { // NOLINT
|
||||
static PyMethodDef methods[] = {
|
||||
{"_set_grad_enabled",
|
||||
castPyCFunctionWithKeywords(set_grad_enabled),
|
||||
METH_VARARGS | METH_KEYWORDS,
|
||||
|
@ -17,8 +17,7 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
namespace {
|
||||
// look what you made me do >.<
|
||||
@ -246,5 +245,4 @@ auto InputBuffer::variables(InputBuffer&& g) -> std::vector<Variable> {
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -3,8 +3,7 @@
|
||||
// TODO: we may be able to move some imports from input_metadata.h to here, but
|
||||
// it seems that function.h transitively depends on some of them.
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
namespace {
|
||||
|
||||
@ -158,7 +157,6 @@ std::stringstream InputMetadata::incompatible_shape_error_message(
|
||||
}
|
||||
|
||||
bool InputMetadata::is_cpp_nested_tensor() const {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
bool ret = std::holds_alternative<at::Tensor>(shape_);
|
||||
TORCH_INTERNAL_ASSERT(ret == (is_nested_ && !is_tensor_subclass_))
|
||||
return ret;
|
||||
@ -203,5 +201,4 @@ bool InputMetadata::maybe_expandable_to(const at::Tensor& grad) const {
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -1,8 +1,6 @@
|
||||
#include <torch/csrc/autograd/jit_decomp_interface.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace impl {
|
||||
namespace torch::autograd::impl {
|
||||
|
||||
namespace {
|
||||
JitDecompInterface* impl = nullptr;
|
||||
@ -16,6 +14,4 @@ JitDecompInterface* getJitDecompImpl() {
|
||||
return impl;
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::impl
|
||||
|
@ -645,7 +645,7 @@ RecordProfile::RecordProfile(std::ostream& out) : out_(out) {
|
||||
}
|
||||
|
||||
RecordProfile::RecordProfile(const std::string& filename)
|
||||
: file_(new std::ofstream(filename)), out_(*file_) {
|
||||
: file_(std::make_unique<std::ofstream>(filename)), out_(*file_) {
|
||||
init();
|
||||
}
|
||||
|
||||
|
@ -12,11 +12,7 @@
|
||||
#include <torch/csrc/profiler/stubs/base.h>
|
||||
#include <torch/csrc/profiler/util.h>
|
||||
|
||||
namespace torch::autograd {
|
||||
|
||||
struct Node;
|
||||
|
||||
namespace profiler {
|
||||
namespace torch::autograd::profiler {
|
||||
|
||||
enum class C10_API_ENUM EventKind : uint16_t {
|
||||
Mark,
|
||||
@ -402,5 +398,4 @@ struct TORCH_API TLSLegacyProfilerGuard {
|
||||
const std::optional<ProfilerDisableOptions> profilerDisableOptions_;
|
||||
};
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace torch::autograd
|
||||
} // namespace torch::autograd::profiler
|
||||
|
@ -8,8 +8,7 @@
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
#include <torch/csrc/utils/python_strings.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
void PyAnomalyMetadata::store_stack() {
|
||||
pybind11::gil_scoped_acquire gil;
|
||||
@ -125,5 +124,4 @@ void _print_stack(
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -5,8 +5,7 @@
|
||||
#include <torch/csrc/python_headers.h>
|
||||
#include <torch/csrc/utils/pybind.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
struct PyAnomalyMetadata : public AnomalyMetadata {
|
||||
static constexpr const char* ANOMALY_TRACE_KEY = "traceback_";
|
||||
@ -40,5 +39,4 @@ void _print_stack(
|
||||
const std::string& current_node_name,
|
||||
bool is_parent);
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -297,8 +297,7 @@ PyObject* functionToPyObject(const std::shared_ptr<Node>& cdata) {
|
||||
} else {
|
||||
auto& fn = *cdata;
|
||||
auto it = cpp_function_types_map.find(std::type_index(typeid(fn)));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
PyTypeObject* type;
|
||||
PyTypeObject* type = nullptr;
|
||||
if (it == cpp_function_types_map.end()) {
|
||||
type = get_default_type();
|
||||
} else {
|
||||
|
@ -413,8 +413,7 @@ PyObject* PyNode::to_py_args(
|
||||
throw_python_error();
|
||||
auto& output_info = py_fn->output_info;
|
||||
for (const auto i : c10::irange(num_inputs)) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
PyObject* input;
|
||||
PyObject* input = nullptr;
|
||||
if (inputs[i].defined() || !py_fn->materialize_grads ||
|
||||
(input_metadata(i).was_default_constructed() &&
|
||||
!py_fn->materialize_non_diff_grads)) {
|
||||
@ -1648,8 +1647,8 @@ PyObject* THPFunction_metadata(THPFunction* self, void* _unused) {
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
typedef PyObject* (*getter)(PyObject*, void*);
|
||||
typedef int (*setter)(PyObject*, PyObject*, void*);
|
||||
using getter = PyObject* (*)(PyObject*, void*);
|
||||
using setter = int (*)(PyObject*, PyObject*, void*);
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -26,8 +26,7 @@ static void check_single_result(
|
||||
PyObject* result,
|
||||
PyObject* hook);
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
namespace {
|
||||
|
||||
@ -253,8 +252,7 @@ void PyFunctionTensorPostAccGradHooks::apply_with_saved(
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
||||
static PyObject* wrap_variables(const variable_list& c_variables) {
|
||||
size_t num_vars = c_variables.size();
|
||||
|
@ -10,8 +10,7 @@
|
||||
|
||||
using namespace at;
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
static PyObject* THPVariable_pynew(
|
||||
PyTypeObject* type,
|
||||
@ -163,5 +162,4 @@ void init_legacy_variable(PyObject* module) {
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -3,8 +3,7 @@
|
||||
#include <torch/csrc/utils/python_arg_parser.h>
|
||||
#include <torch/torch.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
static PyObject* THPVariable_nested_tensor(
|
||||
PyObject* /*self*/,
|
||||
@ -40,5 +39,4 @@ PyMethodDef* get_nested_functions_manual() {
|
||||
return nested_functions_manual;
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -41,8 +41,7 @@ using at::TensorOptions;
|
||||
using torch::utils::check_out_type_matches;
|
||||
using namespace torch::autograd::utils;
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace torch::autograd {
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
PyObject* THPVariableFunctionsModule = nullptr;
|
||||
@ -796,5 +795,4 @@ void initTorchFunctions(PyObject* module) {
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd
|
||||
|
@ -43,8 +43,7 @@ inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
|
||||
if (torch::is_symint(r->step)) {
|
||||
step_sym = py::handle(r->step).cast<c10::SymInt>();
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
Py_ssize_t step;
|
||||
Py_ssize_t step = 0;
|
||||
if (!_PyEval_SliceIndex(r->step, &step)) {
|
||||
throw python_error();
|
||||
}
|
||||
@ -62,8 +61,7 @@ inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
|
||||
} else if (r->start == Py_None) {
|
||||
start_sym = c10::SymInt(step_sym < 0 ? PY_SSIZE_T_MAX : 0);
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
Py_ssize_t start;
|
||||
Py_ssize_t start = 0;
|
||||
if (!_PyEval_SliceIndex(r->start, &start)) {
|
||||
throw python_error();
|
||||
}
|
||||
@ -77,8 +75,7 @@ inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
|
||||
stop_sym = c10::SymInt(
|
||||
step_sym < 0 ? c10::SymInt::min_representable_int() : PY_SSIZE_T_MAX);
|
||||
} else {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
Py_ssize_t stop;
|
||||
Py_ssize_t stop = 0;
|
||||
if (!_PyEval_SliceIndex(r->stop, &stop)) {
|
||||
throw python_error();
|
||||
}
|
||||
|
@ -12,9 +12,7 @@ namespace caffe2 {
|
||||
CAFFE_KNOWN_TYPE(at::RecordFunction);
|
||||
} // namespace caffe2
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace profiler {
|
||||
namespace torch::autograd::profiler {
|
||||
|
||||
// Creates a new profiling scope using RecordFunction and invokes its starting
|
||||
// callbacks.
|
||||
@ -165,6 +163,4 @@ TORCH_LIBRARY_FRAGMENT(profiler, m) {
|
||||
c10::AliasAnalysisKind::FROM_SCHEMA));
|
||||
}
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::profiler
|
||||
|
@ -2,9 +2,7 @@
|
||||
|
||||
#include <sstream>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace utils {
|
||||
namespace torch::autograd::utils {
|
||||
|
||||
inline std::string requires_grad_leaf_error(bool requires_grad) {
|
||||
std::ostringstream oss;
|
||||
@ -17,6 +15,4 @@ inline std::string requires_grad_leaf_error(bool requires_grad) {
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::utils
|
||||
|
@ -2,9 +2,7 @@
|
||||
|
||||
#include <ATen/Tensor.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace utils {
|
||||
namespace torch::autograd::utils {
|
||||
|
||||
// Helper functions to enforce the "Gradient Layout Contract" described in
|
||||
// torch/csrc/autograd/functions/accumulate_grad.h.
|
||||
@ -75,6 +73,4 @@ inline at::Tensor clone_obey_contract(
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::utils
|
||||
|
@ -5,9 +5,7 @@
|
||||
|
||||
#include <torch/csrc/utils/python_arg_parser.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace utils {
|
||||
namespace torch::autograd::utils {
|
||||
|
||||
// The parameter allow_copy is to accept copy for Tensor.to (and by proxy
|
||||
// PackedSequences.to) but not nn.Module.to.
|
||||
@ -48,6 +46,4 @@ parse_to_conversion(PythonArgs& r, bool allow_copy) {
|
||||
r.memoryformatOptional(3));
|
||||
}
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::utils
|
||||
|
@ -1,8 +1,6 @@
|
||||
#include <torch/csrc/autograd/utils/warnings.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace utils {
|
||||
namespace torch::autograd::utils {
|
||||
|
||||
void DelayWarningHandler::process(const c10::Warning& warning) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
@ -19,6 +17,4 @@ void DelayWarningHandler::replay_warnings() {
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::utils
|
||||
|
@ -4,9 +4,7 @@
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace utils {
|
||||
namespace torch::autograd::utils {
|
||||
|
||||
// Warning handler for multi-threaded contexts. Gather warnings from
|
||||
// all threads into a single queue, then process together at the end
|
||||
@ -23,6 +21,4 @@ class DelayWarningHandler : public at::WarningHandler {
|
||||
std::mutex mutex_;
|
||||
};
|
||||
|
||||
} // namespace utils
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::utils
|
||||
|
@ -18,9 +18,7 @@
|
||||
#include <torch/csrc/utils/python_numbers.h>
|
||||
#include <torch/csrc/utils/tensor_qschemes.h>
|
||||
|
||||
namespace torch {
|
||||
namespace autograd {
|
||||
namespace utils {
|
||||
namespace torch::autograd::utils {
|
||||
|
||||
inline PyObject* wrap(bool value) {
|
||||
if (value) {
|
||||
@ -148,6 +146,4 @@ PyObject* wrap(PyTypeObject* type, std::tuple<Ts...> values) {
|
||||
return r.release();
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
} // namespace autograd
|
||||
} // namespace torch
|
||||
} // namespace torch::autograd::utils
|
||||
|
Reference in New Issue
Block a user