Revert "[Distributed] [2/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#122892)"

This reverts commit 0ba16ffd35af3eb56da4892cc5387c5e8ac864bb.

Reverted https://github.com/pytorch/pytorch/pull/122892 on behalf of https://github.com/atalman due to broke cuda tests ([comment](https://github.com/pytorch/pytorch/pull/122892#issuecomment-2037207036))
This commit is contained in:
PyTorch MergeBot
2024-04-04 13:22:22 +00:00
parent 6890333e3d
commit 54801e6fd6
3 changed files with 34 additions and 37 deletions

View File

@ -51,7 +51,7 @@ class TORCH_API Reducer {
explicit Reducer(
std::vector<at::Tensor> params,
std::vector<std::vector<size_t>> bucket_indices,
const std::vector<size_t>& per_bucket_size_limits,
std::vector<size_t> per_bucket_size_limits,
c10::intrusive_ptr<c10d::ProcessGroup> process_group,
std::vector<bool> expect_sparse_gradients,
int64_t bucket_bytes_cap,
@ -303,9 +303,11 @@ class TORCH_API Reducer {
using GradCallback = std::function<bool(at::Tensor&)>;
#ifndef _WIN32
static_assert(
std::is_same_v<
std::is_same<
GradCallback,
torch::distributed::autograd::DistAutogradContext::GradCallback>);
torch::distributed::autograd::DistAutogradContext::GradCallback>::
value,
"");
#endif
void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);