mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[2/N] apply clang-tidy in torch/csrc/autograd (#109277)
This PR follows the work of PR #109032. Pull Request resolved: https://github.com/pytorch/pytorch/pull/109277 Approved by: https://github.com/albanD
This commit is contained in:
@ -15,9 +15,9 @@ constexpr size_t dim_bitset_size = 64;
|
||||
|
||||
static inline std::bitset<dim_bitset_size> dim_list_to_bitset(
|
||||
OptionalIntArrayRef opt_dims,
|
||||
int64_t ndims) {
|
||||
size_t ndims) {
|
||||
TORCH_CHECK(
|
||||
ndims <= (int64_t)dim_bitset_size,
|
||||
ndims <= dim_bitset_size,
|
||||
"only tensors with up to ",
|
||||
dim_bitset_size,
|
||||
" dims are supported");
|
||||
@ -25,7 +25,7 @@ static inline std::bitset<dim_bitset_size> dim_list_to_bitset(
|
||||
if (opt_dims.has_value()) {
|
||||
auto dims = opt_dims.value();
|
||||
for (const auto i : c10::irange(dims.size())) {
|
||||
size_t dim = maybe_wrap_dim(dims[i], ndims);
|
||||
size_t dim = maybe_wrap_dim(dims[i], static_cast<int64_t>(ndims));
|
||||
TORCH_CHECK(
|
||||
!seen[dim],
|
||||
"dim ",
|
||||
@ -34,7 +34,7 @@ static inline std::bitset<dim_bitset_size> dim_list_to_bitset(
|
||||
seen[dim] = true;
|
||||
}
|
||||
} else {
|
||||
for (int64_t dim = 0; dim < ndims; dim++) {
|
||||
for (size_t dim = 0; dim < ndims; dim++) {
|
||||
seen[dim] = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
#include <torch/csrc/autograd/functions/basic_ops.h>
|
||||
#include <torch/csrc/autograd/functions/utils.h>
|
||||
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@ -74,19 +75,19 @@ static void warnAutogradNotImplemented(const std::string& op_name) {
|
||||
struct WarnNotImplemented : public Node {
|
||||
WarnNotImplemented(
|
||||
std::string op_name,
|
||||
int64_t num_outputs,
|
||||
size_t num_outputs,
|
||||
edge_list&& next_edges)
|
||||
: Node(std::move(next_edges)),
|
||||
op_name(std::move(op_name)),
|
||||
num_outputs(num_outputs) {}
|
||||
|
||||
WarnNotImplemented(std::string op_name, int64_t num_outputs)
|
||||
WarnNotImplemented(std::string op_name, size_t num_outputs)
|
||||
: op_name(std::move(op_name)), num_outputs(num_outputs) {}
|
||||
|
||||
variable_list apply(variable_list&& inputs) override;
|
||||
|
||||
std::string op_name;
|
||||
int64_t num_outputs;
|
||||
size_t num_outputs;
|
||||
};
|
||||
|
||||
auto WarnNotImplemented::apply(variable_list&& inputs) -> variable_list {
|
||||
@ -250,7 +251,7 @@ static void autogradNotImplementedFallbackImpl(
|
||||
std::vector<bool> is_inplace_output(num_returns, false);
|
||||
bool any_is_inplace_output = false;
|
||||
std::vector<bool> is_aliased_output(num_returns, false);
|
||||
int aliased_output_idx = -1;
|
||||
std::optional<size_t> aliased_output_idx;
|
||||
|
||||
for (const auto i : c10::irange(num_returns)) {
|
||||
if (schema.is_aliasing({c10::SchemaArgType::output, i})) {
|
||||
@ -259,7 +260,7 @@ static void autogradNotImplementedFallbackImpl(
|
||||
any_is_inplace_output = true;
|
||||
} else {
|
||||
TORCH_CHECK(
|
||||
aliased_output_idx == -1,
|
||||
!aliased_output_idx.has_value(),
|
||||
"Expected only a single output in the operator schema to have a non-write alias annotation (i.e., 'Tensor(a)'). "
|
||||
"Non-composite functions where multiple outputs are aliased with inputs aren't supported."
|
||||
"Please rewrite your function as a composite function.");
|
||||
@ -385,10 +386,10 @@ static void autogradNotImplementedFallbackImpl(
|
||||
num_returns);
|
||||
// There should be only a single base-view pair, make sure their storage is
|
||||
// aliased.
|
||||
if (aliased_input_idx != -1 && aliased_output_idx != -1) {
|
||||
if (aliased_input_idx != -1 && aliased_output_idx.has_value()) {
|
||||
const c10::IValue& aliased_input_iv = stack_args_copy[aliased_input_idx];
|
||||
const c10::IValue& aliased_output_iv =
|
||||
(*stack)[stack->size() - num_returns + aliased_output_idx];
|
||||
(*stack)[stack->size() - num_returns + *aliased_output_idx];
|
||||
TORCH_INTERNAL_ASSERT(aliased_input_iv.isTensor(), op_name);
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
aliased_output_iv.isTensor() || aliased_output_iv.isTensorList(),
|
||||
@ -482,12 +483,12 @@ static void autogradNotImplementedInplaceOrViewFallbackImpl(
|
||||
}
|
||||
}
|
||||
|
||||
int64_t aliased_input_idx = -1;
|
||||
std::optional<size_t> aliased_input_idx;
|
||||
for (const auto i : c10::irange(num_arguments)) {
|
||||
if (schema.is_aliasing({c10::SchemaArgType::input, i}) &&
|
||||
!schema.is_mutable({c10::SchemaArgType::input, i})) {
|
||||
TORCH_CHECK(
|
||||
aliased_input_idx == -1,
|
||||
!aliased_input_idx.has_value(),
|
||||
"Fallback ADInplaceOrView kernel expects only a single input in the operator schema to have a "
|
||||
"non-write alias annotation (i.e., 'Tensor(a)'). "
|
||||
"Non-composite functions where multiple inputs are aliased with outputs aren't supported. "
|
||||
@ -504,12 +505,13 @@ static void autogradNotImplementedInplaceOrViewFallbackImpl(
|
||||
}
|
||||
// See NOTE [ Limitations of ADInplaceOrView boxed kernel ] above
|
||||
TORCH_CHECK(
|
||||
(aliased_input_idx == -1 && aliased_output_idx == -1) ||
|
||||
(aliased_input_idx == 0 && aliased_output_idx == 0),
|
||||
(!aliased_input_idx.has_value() && aliased_output_idx == -1) ||
|
||||
(aliased_input_idx.has_value() && aliased_input_idx.value() == 0 &&
|
||||
aliased_output_idx == 0),
|
||||
"Fallback ADInplaceOrView kernel can only create view relationships between the first "
|
||||
"input and the first output (the output can be a vector of tensors). Please change the "
|
||||
"order of your operator's parameters so that this is the case.");
|
||||
const bool is_view = aliased_input_idx != -1;
|
||||
const bool is_view = aliased_input_idx.has_value();
|
||||
|
||||
{
|
||||
at::AutoDispatchBelowADInplaceOrView guard;
|
||||
|
||||
@ -10,12 +10,12 @@ using torch::autograd::Variable;
|
||||
void check_single_result(
|
||||
const at::TensorBase& value,
|
||||
const at::TensorBase& result,
|
||||
std::string hook_name) {
|
||||
const std::string& hook_name) {
|
||||
if (!value.defined()) {
|
||||
throw std::runtime_error(
|
||||
"can't replace a empty gradient with a non-empty value");
|
||||
}
|
||||
torch::autograd::check_variable_result(value, result, std::move(hook_name));
|
||||
torch::autograd::check_variable_result(value, result, hook_name);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@ -24,7 +24,7 @@ namespace autograd {
|
||||
|
||||
CppFunctionTensorPreHook::CppFunctionTensorPreHook(
|
||||
std::shared_ptr<hooks_list> hooks,
|
||||
int value_idx)
|
||||
size_t value_idx)
|
||||
: hooks_(std::move(hooks)), value_idx_(value_idx) {}
|
||||
|
||||
variable_list CppFunctionTensorPreHook::operator()(
|
||||
@ -51,7 +51,7 @@ variable_list CppFunctionTensorPreHook::operator()(
|
||||
|
||||
CppFunctionSingleTensorPreHook::CppFunctionSingleTensorPreHook(
|
||||
std::function<at::TensorBase(const at::TensorBase&)> hook,
|
||||
int value_idx)
|
||||
size_t value_idx)
|
||||
: hook_(std::move(hook)), value_idx_(value_idx) {}
|
||||
|
||||
variable_list CppFunctionSingleTensorPreHook::operator()(
|
||||
|
||||
@ -10,21 +10,21 @@ using hooks_list =
|
||||
std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
|
||||
|
||||
struct CppFunctionTensorPreHook : public FunctionPreHook {
|
||||
CppFunctionTensorPreHook(std::shared_ptr<hooks_list> hooks, int value_idx);
|
||||
CppFunctionTensorPreHook(std::shared_ptr<hooks_list> hooks, size_t value_idx);
|
||||
variable_list operator()(const variable_list& values) override;
|
||||
|
||||
std::shared_ptr<hooks_list> hooks_;
|
||||
int value_idx_;
|
||||
size_t value_idx_;
|
||||
};
|
||||
|
||||
struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
|
||||
CppFunctionSingleTensorPreHook(
|
||||
std::function<at::TensorBase(const at::TensorBase&)> hook,
|
||||
int value_idx);
|
||||
size_t value_idx);
|
||||
variable_list operator()(const variable_list& values) override;
|
||||
|
||||
std::function<at::TensorBase(const at::TensorBase&)> hook_;
|
||||
int value_idx_;
|
||||
size_t value_idx_;
|
||||
};
|
||||
|
||||
} // namespace autograd
|
||||
|
||||
@ -275,7 +275,7 @@ static optional_variable_list _process_backward_mode_ad(
|
||||
const at::ArrayRef<c10::optional<Variable>> raw_outputs,
|
||||
const std::shared_ptr<Node>& cdata,
|
||||
const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context) {
|
||||
int num_outputs = raw_outputs.size();
|
||||
auto num_outputs = raw_outputs.size();
|
||||
|
||||
const char* error_msg_input_returned_as_is =
|
||||
"A input that has been returned as-is as output is being saved for backward. "
|
||||
@ -368,7 +368,7 @@ static optional_variable_list _process_backward_mode_ad(
|
||||
if (!raw_outputs[i].has_value()) {
|
||||
if (cdata) {
|
||||
auto output_nr = cdata->add_input_metadata(Node::undefined_input());
|
||||
AT_ASSERT(i == (int)output_nr);
|
||||
AT_ASSERT(i == output_nr);
|
||||
}
|
||||
outputs.emplace_back();
|
||||
continue;
|
||||
@ -386,13 +386,13 @@ static optional_variable_list _process_backward_mode_ad(
|
||||
to_save_if_setup_context.count(out_tensor_impl) > 0;
|
||||
|
||||
if (cdata) {
|
||||
auto output_nr = -1;
|
||||
uint32_t output_nr = 0;
|
||||
if (!is_differentiable) {
|
||||
output_nr = cdata->add_input_metadata(Node::undefined_input());
|
||||
} else {
|
||||
output_nr = cdata->add_input_metadata(var);
|
||||
}
|
||||
AT_ASSERT(i == (int)output_nr);
|
||||
AT_ASSERT(i == output_nr);
|
||||
}
|
||||
set_history(
|
||||
var,
|
||||
@ -452,7 +452,7 @@ optional_variable_list _wrap_outputs(
|
||||
const std::unordered_set<at::TensorImpl*>& dirty_inputs,
|
||||
const at::ArrayRef<c10::optional<Variable>> raw_outputs,
|
||||
const std::shared_ptr<Node>& cdata,
|
||||
_jvp_fn_t jvp_user_function,
|
||||
const _jvp_fn_t& jvp_user_function,
|
||||
const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context) {
|
||||
std::unordered_map<at::TensorImpl*, size_t> inputs_mapping;
|
||||
inputs_mapping.reserve(input_vars.size());
|
||||
@ -477,7 +477,7 @@ optional_variable_list _wrap_outputs(
|
||||
outputs,
|
||||
non_differentiable,
|
||||
dirty_inputs,
|
||||
std::move(jvp_user_function));
|
||||
jvp_user_function);
|
||||
|
||||
return outputs;
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ TORCH_API std::vector<c10::optional<Variable>> _wrap_outputs(
|
||||
const std::unordered_set<at::TensorImpl*>& dirty_inputs,
|
||||
const at::ArrayRef<c10::optional<Variable>> raw_outputs,
|
||||
const std::shared_ptr<Node>& cdata,
|
||||
_jvp_fn_t jvp_user_function,
|
||||
const _jvp_fn_t& jvp_user_function,
|
||||
const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context);
|
||||
|
||||
TORCH_API void check_variable_result(
|
||||
|
||||
@ -538,7 +538,8 @@ auto Engine::thread_main(const std::shared_ptr<GraphTask>& graph_task) -> void {
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(local_graph_task = task.base_.lock())) {
|
||||
local_graph_task = task.base_.lock();
|
||||
if (!local_graph_task) {
|
||||
// GraphTask for function is no longer valid, skipping further
|
||||
// execution.
|
||||
continue;
|
||||
@ -622,8 +623,8 @@ void Engine::reentrant_thread_init() {
|
||||
auto task = tp_shared->graphtasks_queue_.front();
|
||||
tp_shared->graphtasks_queue_.pop();
|
||||
lk.unlock();
|
||||
std::shared_ptr<GraphTask> graph_task;
|
||||
if (!(graph_task = task.lock())) {
|
||||
std::shared_ptr<GraphTask> graph_task = task.lock();
|
||||
if (!graph_task) {
|
||||
LOG(INFO) << "GraphTask has expired, skipping reentrant execution";
|
||||
continue;
|
||||
}
|
||||
@ -808,7 +809,9 @@ void set_device(int device) {
|
||||
c10::DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES))) {
|
||||
auto* impl = c10::impl::device_guard_impl_registry[i].load();
|
||||
if (impl && device < impl->deviceCount()) {
|
||||
impl->setDevice(at::Device(static_cast<c10::DeviceType>(i), device));
|
||||
impl->setDevice(at::Device(
|
||||
static_cast<c10::DeviceType>(i),
|
||||
static_cast<c10::DeviceIndex>(device)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1012,7 +1015,7 @@ void Engine::evaluate_function(
|
||||
fn.release_variables();
|
||||
}
|
||||
|
||||
int num_outputs = outputs.size();
|
||||
auto num_outputs = outputs.size();
|
||||
if (num_outputs == 0) { // Note: doesn't acquire the mutex
|
||||
// Records leaf stream (if applicable)
|
||||
// See Note [Streaming backwards]
|
||||
@ -1161,9 +1164,9 @@ auto Engine::execute(
|
||||
bool create_graph,
|
||||
bool accumulate_grad,
|
||||
const edge_list& outputs) -> variable_list {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
validate_outputs(
|
||||
root_edges,
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<variable_list&>(inputs),
|
||||
[](const std::string& msg) { return msg; });
|
||||
if (accumulate_grad && create_graph) {
|
||||
|
||||
@ -501,11 +501,11 @@ struct TORCH_API Node : std::enable_shared_from_this<Node> {
|
||||
|
||||
void add_retains_grad_hook(
|
||||
std::unique_ptr<FunctionPreHook>&& pre_hook,
|
||||
int output_idx) {
|
||||
size_t output_idx) {
|
||||
retains_grad_hooks_[output_idx] = std::move(pre_hook);
|
||||
}
|
||||
|
||||
std::unique_ptr<FunctionPreHook> pop_retains_grad_hook(int output_idx) {
|
||||
std::unique_ptr<FunctionPreHook> pop_retains_grad_hook(size_t output_idx) {
|
||||
auto ret = std::move(retains_grad_hooks_[output_idx]);
|
||||
retains_grad_hooks_.erase(output_idx);
|
||||
return ret;
|
||||
@ -531,7 +531,7 @@ struct TORCH_API Node : std::enable_shared_from_this<Node> {
|
||||
return empty;
|
||||
}
|
||||
|
||||
std::unordered_map<int, std::unique_ptr<FunctionPreHook>>&
|
||||
std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>&
|
||||
retains_grad_hooks() noexcept {
|
||||
return retains_grad_hooks_;
|
||||
}
|
||||
@ -680,8 +680,8 @@ struct TORCH_API Node : std::enable_shared_from_this<Node> {
|
||||
std::vector<std::unique_ptr<FunctionPreHook>> pre_hooks_;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
std::vector<std::unique_ptr<FunctionPreHook>> tensor_pre_hooks_;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
std::unordered_map<int, std::unique_ptr<FunctionPreHook>> retains_grad_hooks_;
|
||||
std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>
|
||||
retains_grad_hooks_;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
std::vector<std::unique_ptr<FunctionPostHook>> post_hooks_;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
||||
|
||||
@ -146,6 +146,7 @@ struct GraphTask : std::enable_shared_from_this<GraphTask> {
|
||||
// Safe to read owner_ and reentrant_depth_ without synchronization
|
||||
int owner_;
|
||||
// The number of parent graph tasks for this graph task
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
const int reentrant_depth_;
|
||||
|
||||
bool can_checkpoint() const {
|
||||
|
||||
@ -61,7 +61,7 @@ struct InputMetadata {
|
||||
t.unsafeGetTensorImpl()->is_python_dispatch(),
|
||||
t.is_nested()) {}
|
||||
|
||||
const at::TensorOptions options() const {
|
||||
const at::TensorOptions& options() const {
|
||||
return options_;
|
||||
}
|
||||
|
||||
@ -184,6 +184,7 @@ struct InputMetadata {
|
||||
return c10::get<at::Tensor>(shape_);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
||||
const at::TensorOptions options_;
|
||||
MetadataShape shape_;
|
||||
c10::Stream stream_ = c10::Stream(c10::Stream::Default::DEFAULT, device());
|
||||
|
||||
@ -692,7 +692,7 @@ class PythonTracer final : public python_tracer::PythonTracerBase {
|
||||
|
||||
struct StartFrame {
|
||||
TraceKey trace_key_;
|
||||
approx_time_t start_time;
|
||||
approx_time_t start_time{};
|
||||
};
|
||||
|
||||
private:
|
||||
@ -960,7 +960,7 @@ class PostProcess {
|
||||
using stack_t = std::vector<std::shared_ptr<Result>>;
|
||||
const auto initial_size = out.size();
|
||||
auto pop = [](stack_t& stack, time_t t) {
|
||||
TORCH_INTERNAL_ASSERT(stack.size(), "Python replay stack is empty.");
|
||||
TORCH_INTERNAL_ASSERT(!stack.empty(), "Python replay stack is empty.");
|
||||
c10::get<ExtraFields<E>>(stack.back()->extra_fields_).end_time_ns_ = t;
|
||||
stack.pop_back();
|
||||
};
|
||||
|
||||
@ -33,8 +33,8 @@ PyObject* THPCppFunction_call(
|
||||
return PyErr_Format(PyExc_TypeError, "keyword arguments are not supported");
|
||||
}
|
||||
|
||||
int num_inputs = PyTuple_GET_SIZE(args);
|
||||
int num_inputs_required = ((THPCppFunction*)self)->cdata->num_inputs();
|
||||
auto num_inputs = PyTuple_GET_SIZE(args);
|
||||
auto num_inputs_required = ((THPCppFunction*)self)->cdata->num_inputs();
|
||||
if (num_inputs != num_inputs_required) {
|
||||
return PyErr_Format(
|
||||
PyExc_TypeError,
|
||||
@ -62,14 +62,14 @@ PyObject* THPCppFunction_call(
|
||||
}
|
||||
END_HANDLE_TH_ERRORS
|
||||
|
||||
int num_outputs = output.size();
|
||||
auto num_outputs = output.size();
|
||||
if (num_outputs == 1) {
|
||||
// assume we want to unpack one element tuples for now
|
||||
return THPVariable_Wrap(output[0]);
|
||||
}
|
||||
|
||||
THPObjectPtr tuple(PyTuple_New(num_outputs));
|
||||
for (int i = 0; i != num_outputs; ++i) {
|
||||
THPObjectPtr tuple(PyTuple_New(static_cast<Py_ssize_t>(num_outputs)));
|
||||
for (size_t i = 0; i != num_outputs; ++i) {
|
||||
PyTuple_SET_ITEM(tuple.get(), i, THPVariable_Wrap(output[i]));
|
||||
}
|
||||
return tuple.release();
|
||||
|
||||
@ -92,11 +92,11 @@ void PythonEngine::thread_init(
|
||||
// runtime is finalizing
|
||||
if (!Py_IsInitialized()) {
|
||||
no_gil.disarm();
|
||||
// TODO: call disarm rather than leak gil_scoped_acquired once
|
||||
// PyThreadState_Clear can safely be called from finalize NOTE: deploy.cpp
|
||||
// calls `PyInterpreterState_Delete` to destruct PyThreadState, so avoid
|
||||
// use-after-free here.
|
||||
gil.release();
|
||||
// TODO: call disarm once PyThreadState_Clear can safely be called from
|
||||
// finalize NOTE: deploy.cpp calls `PyInterpreterState_Delete` to destruct
|
||||
// PyThreadState, so avoid use-after-free here.
|
||||
auto ptr = gil.release();
|
||||
operator delete(ptr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -192,6 +192,7 @@ PyObject* THPEngine_run_backward(
|
||||
args,
|
||||
kwargs,
|
||||
"OObb|Obb",
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast,-warnings-as-errors)
|
||||
const_cast<char**>(accepted_kwargs),
|
||||
&tensors,
|
||||
&grad_tensors,
|
||||
|
||||
@ -86,7 +86,7 @@ auto PyNode::apply(variable_list&& inputs) -> variable_list {
|
||||
|
||||
// Massage a C++ variable_list into a Python arguments tuple
|
||||
auto num_inputs = inputs.size();
|
||||
THPObjectPtr pyInputs(PyTuple_New(num_inputs));
|
||||
THPObjectPtr pyInputs(PyTuple_New(static_cast<Py_ssize_t>(num_inputs)));
|
||||
if (!pyInputs)
|
||||
throw_python_error();
|
||||
auto& output_info = py_fn->output_info;
|
||||
@ -114,8 +114,8 @@ auto PyNode::apply(variable_list&& inputs) -> variable_list {
|
||||
ensure_tuple(r);
|
||||
|
||||
auto& is_variable_input = py_fn->is_variable_input;
|
||||
int num_outputs = PyTuple_GET_SIZE(r.get());
|
||||
int num_forward_inputs = is_variable_input.size();
|
||||
auto num_outputs = PyTuple_GET_SIZE(r.get());
|
||||
auto num_forward_inputs = static_cast<Py_ssize_t>(is_variable_input.size());
|
||||
// Returning too many results is ok, but only as long as they're all None.
|
||||
// Truncate the result tuple in that case.
|
||||
if (num_outputs > num_forward_inputs) {
|
||||
@ -443,7 +443,7 @@ static void _wrap_outputs(
|
||||
// Massage a C++ variable_list into a Python arguments tuple
|
||||
// Making sure to introduce the proper None for non-Tensor inputs
|
||||
auto num_inputs = self->is_variable_input.size();
|
||||
THPObjectPtr pyInputs(PyTuple_New(num_inputs));
|
||||
THPObjectPtr pyInputs(PyTuple_New(static_cast<Py_ssize_t>(num_inputs)));
|
||||
if (!pyInputs)
|
||||
throw_python_error();
|
||||
int64_t variable_idx = 0;
|
||||
@ -508,7 +508,7 @@ static void _wrap_outputs(
|
||||
dirty_inputs,
|
||||
raw_output_vars,
|
||||
cdata_if_executable,
|
||||
std::move(jvp_user_function),
|
||||
jvp_user_function,
|
||||
to_save_if_setup_context);
|
||||
|
||||
for (const auto i : c10::irange(num_outputs)) {
|
||||
@ -1021,7 +1021,7 @@ PyObject* THPFunction_apply(PyObject* cls, PyObject* inputs) {
|
||||
HANDLE_TH_ERRORS
|
||||
|
||||
// save a local copy of seq_id before it gets incremented
|
||||
int seq_id = at::sequence_number::peek();
|
||||
auto seq_id = at::sequence_number::peek();
|
||||
auto info_pair = unpack_input<false>(inputs);
|
||||
UnpackedInput& unpacked_input = info_pair.first;
|
||||
InputFlags& input_info = info_pair.second;
|
||||
@ -1241,8 +1241,8 @@ static PyObject* unpack_saved_variables(
|
||||
if (saved_variables.empty())
|
||||
return PyTuple_New(0);
|
||||
|
||||
int num_saved = saved_variables.size();
|
||||
THPObjectPtr saved(PyTuple_New(num_saved));
|
||||
auto num_saved = saved_variables.size();
|
||||
THPObjectPtr saved(PyTuple_New(static_cast<Py_ssize_t>(num_saved)));
|
||||
if (!saved)
|
||||
return nullptr;
|
||||
auto saved_for = self->cdata.lock();
|
||||
@ -1311,7 +1311,7 @@ PyObject* THPFunction_get_compiled_autograd_symints(
|
||||
HANDLE_TH_ERRORS
|
||||
auto self = (THPFunction*)_self;
|
||||
auto size = self->compiled_autograd_symints.size();
|
||||
PyObject* result = PyTuple_New(size);
|
||||
PyObject* result = PyTuple_New(static_cast<Py_ssize_t>(size));
|
||||
if (!result) {
|
||||
throw python_error();
|
||||
}
|
||||
@ -1333,7 +1333,7 @@ PyObject* THPFunction_raw_saved_tensors(THPFunction* self, void* _unused) {
|
||||
if (saved_variables.empty())
|
||||
return PyTuple_New(0);
|
||||
size_t num_saved = saved_variables.size();
|
||||
THPObjectPtr saved(PyTuple_New(num_saved));
|
||||
THPObjectPtr saved(PyTuple_New(static_cast<Py_ssize_t>(num_saved)));
|
||||
if (!saved) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -92,7 +92,9 @@ bool _call_hooks(PyObject* dict, PyObject* args) {
|
||||
|
||||
} // namespace
|
||||
|
||||
PyFunctionTensorPreHook::PyFunctionTensorPreHook(PyObject* dict, int value_idx)
|
||||
PyFunctionTensorPreHook::PyFunctionTensorPreHook(
|
||||
PyObject* dict,
|
||||
size_t value_idx)
|
||||
: dict(dict), value_idx(value_idx) {
|
||||
Py_INCREF(dict);
|
||||
}
|
||||
@ -174,7 +176,8 @@ void PyFunctionTensorPreHook::compiled_args(CompiledNodeArgs& args) {
|
||||
while (PyDict_Next(dict, &pos, &key, &value)) {
|
||||
Py_INCREF(value);
|
||||
args.add_tensor_pre_hook(
|
||||
c10::SafePyObject(value, getPyInterpreter()), value_idx);
|
||||
c10::SafePyObject(value, getPyInterpreter()),
|
||||
static_cast<int>(value_idx));
|
||||
}
|
||||
}
|
||||
|
||||
@ -225,7 +228,7 @@ auto PyFunctionTensorPostAccGradHooks::operator()(const Variable& tensor)
|
||||
|
||||
static PyObject* wrap_variables(const variable_list& c_variables) {
|
||||
size_t num_vars = c_variables.size();
|
||||
THPObjectPtr tuple(PyTuple_New(num_vars));
|
||||
THPObjectPtr tuple(PyTuple_New(static_cast<Py_ssize_t>(num_vars)));
|
||||
if (!tuple)
|
||||
throw python_error();
|
||||
for (const auto i : c10::irange(num_vars)) {
|
||||
|
||||
@ -8,12 +8,12 @@ namespace torch {
|
||||
namespace autograd {
|
||||
|
||||
struct PyFunctionTensorPreHook : public FunctionPreHook {
|
||||
PyFunctionTensorPreHook(PyObject* dict, int value_idx);
|
||||
PyFunctionTensorPreHook(PyObject* dict, size_t value_idx);
|
||||
~PyFunctionTensorPreHook() override;
|
||||
variable_list operator()(const variable_list& values) override;
|
||||
void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override;
|
||||
PyObject* dict;
|
||||
int value_idx;
|
||||
size_t value_idx;
|
||||
};
|
||||
|
||||
struct PyFunctionPreHook : public FunctionPreHook {
|
||||
|
||||
@ -32,6 +32,7 @@ static PyObject* THPVariable_pynew(
|
||||
args,
|
||||
kwds,
|
||||
"|ObbOz",
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
||||
const_cast<char**>(accepted_args),
|
||||
&data,
|
||||
&requires_grad,
|
||||
|
||||
@ -70,7 +70,7 @@ std::pair<py::object, py::dict> parseIValuesToPyArgsKwargs(
|
||||
// right (but ideally, this would just be precomputed in FunctionSchema
|
||||
// itself). (NB: minus one in the loop is because we're testing if the
|
||||
// *next* argument is kwarg-only before we advance the starting index)
|
||||
int64_t kwarg_only_start = arguments.size();
|
||||
int64_t kwarg_only_start = static_cast<int64_t>(arguments.size());
|
||||
for (; kwarg_only_start > 0; kwarg_only_start--) {
|
||||
const auto& arg = schema.arguments()[kwarg_only_start - 1];
|
||||
if (!arg.kwarg_only()) {
|
||||
@ -79,7 +79,7 @@ std::pair<py::object, py::dict> parseIValuesToPyArgsKwargs(
|
||||
}
|
||||
|
||||
// Find the first positional argument that isn't defaulted
|
||||
auto is_default = [&](int64_t idx) -> bool {
|
||||
auto is_default = [&](size_t idx) -> bool {
|
||||
const auto& arg = schema.arguments()[idx];
|
||||
if (!arg.default_value().has_value()) {
|
||||
return false;
|
||||
@ -102,7 +102,7 @@ std::pair<py::object, py::dict> parseIValuesToPyArgsKwargs(
|
||||
auto args =
|
||||
py::reinterpret_steal<py::object>(PyTuple_New(positional_default_start));
|
||||
|
||||
auto schemaAwareToPyObject = [&](int64_t idx) -> py::object {
|
||||
auto schemaAwareToPyObject = [&](size_t idx) -> py::object {
|
||||
const auto& arg = schema.arguments()[idx];
|
||||
auto match = [&](c10::TypeKind kind) {
|
||||
const auto& t = arg.real_type();
|
||||
@ -271,7 +271,7 @@ PyObject* THPVariable_Wrap(at::TensorBase var) {
|
||||
c10::optional<PyObject*> mb_obj =
|
||||
var.unsafeGetTensorImpl()->pyobj_slot()->check_pyobj(
|
||||
getPyInterpreter(), /*ignore_hermetic_tls=*/false);
|
||||
c10::impl::PyInterpreterStatus status;
|
||||
c10::impl::PyInterpreterStatus status{};
|
||||
if (mb_obj.has_value()) {
|
||||
auto obj = *mb_obj;
|
||||
if (obj) {
|
||||
@ -1064,15 +1064,14 @@ PyObject* THPVariable_get_names(PyObject* self, void* unused) {
|
||||
// The long-term plan is to return a list of (python) torch.Dimname.
|
||||
// However, for now, return a list of string.
|
||||
const auto& tensor = THPVariable_Unpack(self);
|
||||
size_t size = tensor.dim();
|
||||
auto size = tensor.dim();
|
||||
THPObjectPtr tuple(PyTuple_New(size));
|
||||
if (!tuple)
|
||||
throw python_error();
|
||||
|
||||
const auto dimnames = tensor.names();
|
||||
for (const auto i : c10::irange(size)) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
PyObject* str;
|
||||
PyObject* str = nullptr;
|
||||
if (dimnames[i].type() == at::NameType::WILDCARD) {
|
||||
// PyTuple_SET_ITEM steals a reference to the object. When the tuple is
|
||||
// deallocated, it'll decrement the refcount on Py_None, which is bad.
|
||||
|
||||
@ -226,7 +226,6 @@ void create_cpp_hook(const at::TensorBase& self, bool is_retains_grad_hook) {
|
||||
const auto& fn = self.grad_fn();
|
||||
std::shared_ptr<hooks_list>& list =
|
||||
materialize_autograd_meta(self)->cpp_hooks_list_;
|
||||
// NOLINTNEXTLINE(modernize-make-shared)
|
||||
list.reset(new hooks_list());
|
||||
std::unique_ptr<FunctionPreHook> hook_ptr{
|
||||
new CppFunctionTensorPreHook(list, self.output_nr())};
|
||||
|
||||
@ -732,7 +732,7 @@ inline Variable make_variable_differentiable_view(
|
||||
// See NOTE [ Autograd View Variables ] for details.
|
||||
// Non-differentiable view. Just share version counter.
|
||||
inline Variable make_variable_non_differentiable_view(
|
||||
Variable base,
|
||||
const Variable& base,
|
||||
const at::Tensor& data,
|
||||
bool allow_tensor_metadata_change = true) {
|
||||
if (data.defined()) {
|
||||
@ -795,7 +795,7 @@ inline Variable make_variable(
|
||||
/// specifying the function in the autograd graph, and what particular input of
|
||||
/// that function, this variable is connected to.
|
||||
inline Variable make_variable(
|
||||
at::Tensor data,
|
||||
const at::Tensor& data,
|
||||
Edge gradient_edge,
|
||||
bool allow_tensor_metadata_change = true) {
|
||||
if (data.defined()) {
|
||||
|
||||
@ -22,7 +22,7 @@ struct StashTorchDispatchModeGuard {
|
||||
~StashTorchDispatchModeGuard() {
|
||||
if (saved_mode_key_ != c10::nullopt) {
|
||||
c10::impl::TorchDispatchModeTLS::set_mode(
|
||||
std::move(saved_mode_), saved_mode_key_.value());
|
||||
saved_mode_, saved_mode_key_.value());
|
||||
} else {
|
||||
c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
|
||||
std::move(saved_mode_));
|
||||
|
||||
Reference in New Issue
Block a user