mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Remove some NOLINT (#146610)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/146610 Approved by: https://github.com/Skylion007, https://github.com/malfet
This commit is contained in:
@ -100,7 +100,7 @@ at::Tensor PackedLinearWeight::apply_impl(
|
||||
// 2. If the input tensor is {x, batch_size, K}, the output tensor is {x,
|
||||
// batch_size, out_channels}.
|
||||
std::vector<int64_t> out_sizes = input.sizes().vec();
|
||||
out_sizes.back() = out_channels; // NOLINT
|
||||
out_sizes.back() = out_channels;
|
||||
// Allocate output Tensor and a buffer for fbgemmPacked to use
|
||||
auto output_tr = at::_empty_affine_quantized(
|
||||
out_sizes,
|
||||
|
@ -740,7 +740,7 @@ TEST(VmapTest, TestBatchedTensorExpand) {
|
||||
TEST(VmapTest, TestBatchedTensorUnsqueeze) {
|
||||
{
|
||||
// Basic test
|
||||
auto tensor = at::randn({2, 3, 5}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.unsqueeze(0);
|
||||
@ -750,7 +750,7 @@ TEST(VmapTest, TestBatchedTensorUnsqueeze) {
|
||||
}
|
||||
{
|
||||
// Test with multiple levels
|
||||
auto tensor = at::randn({2, 3, 5}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5});
|
||||
auto batched = makeBatched(tensor, {{0, 0}, {1, 1}});
|
||||
|
||||
auto batched_out = batched.unsqueeze(0);
|
||||
@ -760,7 +760,7 @@ TEST(VmapTest, TestBatchedTensorUnsqueeze) {
|
||||
}
|
||||
{
|
||||
// Negative dim
|
||||
auto tensor = at::randn({2, 3, 5}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.unsqueeze(-1);
|
||||
@ -773,7 +773,7 @@ TEST(VmapTest, TestBatchedTensorUnsqueeze) {
|
||||
TEST(VmapTest, TestBatchedTensorSqueeze) {
|
||||
{
|
||||
// Basic test
|
||||
auto tensor = at::randn({2, 1, 5}); // NOLINT
|
||||
auto tensor = at::randn({2, 1, 5});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.squeeze(0);
|
||||
@ -783,7 +783,7 @@ TEST(VmapTest, TestBatchedTensorSqueeze) {
|
||||
}
|
||||
{
|
||||
// Test with multiple levels
|
||||
auto tensor = at::randn({2, 3, 1}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 1});
|
||||
auto batched = makeBatched(tensor, {{0, 0}, {1, 1}});
|
||||
|
||||
auto batched_out = batched.squeeze(0);
|
||||
@ -793,7 +793,7 @@ TEST(VmapTest, TestBatchedTensorSqueeze) {
|
||||
}
|
||||
{
|
||||
// Negative dim
|
||||
auto tensor = at::randn({2, 3, 1}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 1});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.squeeze(-1);
|
||||
@ -806,7 +806,7 @@ TEST(VmapTest, TestBatchedTensorSqueeze) {
|
||||
TEST(VmapTest, TestBatchedTensorTranspose) {
|
||||
{
|
||||
// Basic test
|
||||
auto tensor = at::randn({2, 3, 5}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.transpose(0, 1);
|
||||
@ -816,7 +816,7 @@ TEST(VmapTest, TestBatchedTensorTranspose) {
|
||||
}
|
||||
{
|
||||
// Test with multiple levels
|
||||
auto tensor = at::randn({2, 3, 5, 7, 11}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5, 7, 11});
|
||||
auto batched = makeBatched(tensor, {{0, 0}, {1, 1}});
|
||||
|
||||
auto batched_out = batched.transpose(0, 2);
|
||||
@ -826,7 +826,7 @@ TEST(VmapTest, TestBatchedTensorTranspose) {
|
||||
}
|
||||
{
|
||||
// Negative dims
|
||||
auto tensor = at::randn({2, 3, 5, 7}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5, 7});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.mT();
|
||||
@ -840,7 +840,7 @@ TEST(VmapTest, TestBatchedTensorTranspose) {
|
||||
TEST(VmapTest, TestBatchedTensorPermute) {
|
||||
{
|
||||
// Basic test
|
||||
auto tensor = at::randn({2, 3, 5}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.permute({1, 0});
|
||||
@ -850,7 +850,7 @@ TEST(VmapTest, TestBatchedTensorPermute) {
|
||||
}
|
||||
{
|
||||
// Test with multiple levels
|
||||
auto tensor = at::randn({2, 3, 5, 7, 11}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5, 7, 11});
|
||||
auto batched = makeBatched(tensor, {{0, 0}, {1, 1}});
|
||||
|
||||
auto batched_out = batched.permute({2, 1, 0});
|
||||
@ -860,7 +860,7 @@ TEST(VmapTest, TestBatchedTensorPermute) {
|
||||
}
|
||||
{
|
||||
// Negative dims
|
||||
auto tensor = at::randn({2, 3, 5, 7}); // NOLINT
|
||||
auto tensor = at::randn({2, 3, 5, 7});
|
||||
auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}});
|
||||
|
||||
auto batched_out = batched.permute({-1, -2, -3});
|
||||
|
@ -194,15 +194,15 @@ PyObject* THPEngine_run_backward(
|
||||
unsigned char allow_unreachable = 0;
|
||||
unsigned char accumulate_grad =
|
||||
0; // Indicate whether to accumulate grad into leaf Tensors or capture
|
||||
constexpr const char* accepted_kwargs[] = {// NOLINT
|
||||
"tensors",
|
||||
"grad_tensors",
|
||||
"keep_graph",
|
||||
"create_graph",
|
||||
"inputs",
|
||||
"allow_unreachable",
|
||||
"accumulate_grad",
|
||||
nullptr};
|
||||
constexpr const char* accepted_kwargs[] = {
|
||||
"tensors",
|
||||
"grad_tensors",
|
||||
"keep_graph",
|
||||
"create_graph",
|
||||
"inputs",
|
||||
"allow_unreachable",
|
||||
"accumulate_grad",
|
||||
nullptr};
|
||||
if (!PyArg_ParseTupleAndKeywords(
|
||||
args,
|
||||
kwargs,
|
||||
|
@ -10,12 +10,10 @@ namespace {
|
||||
class GroupRegistry {
|
||||
public:
|
||||
void register_group(
|
||||
std::string group_name,
|
||||
// NOLINTNEXTLINE(performance-unnecessary-value-param)
|
||||
const std::string& group_name,
|
||||
c10::intrusive_ptr<c10d::ProcessGroup> group) {
|
||||
std::unique_lock write_lock(lock_);
|
||||
auto [_, inserted] =
|
||||
registry_.try_emplace(std::move(group_name), std::move(group));
|
||||
auto [_, inserted] = registry_.try_emplace(group_name, std::move(group));
|
||||
TORCH_CHECK(
|
||||
inserted,
|
||||
"A process group is already registered under the name",
|
||||
|
@ -38,7 +38,7 @@ constexpr int kUnsetDivFactor = -1;
|
||||
|
||||
} // namespace
|
||||
|
||||
C10_DEFINE_TYPED_REGISTRY( // NOLINT
|
||||
C10_DEFINE_TYPED_REGISTRY(
|
||||
TimerRegistry,
|
||||
c10::DeviceType,
|
||||
Timer,
|
||||
|
Reference in New Issue
Block a user