Fix ouput typos (#120870)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/120870
Approved by: https://github.com/clee2000
This commit is contained in:
Sergii Dymchenko
2024-02-29 08:29:10 +00:00
committed by PyTorch MergeBot
parent 14c5ebc8a1
commit 09aefe1502
17 changed files with 31 additions and 31 deletions

View File

@ -149,7 +149,7 @@ bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(1);
auto& L = Input(2);
auto& S = Input(3);
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
// Below is gradient of net w.r.t. avg_loss ("gradOutput"), should be all 1's
auto& d_avg_loss = Input(4);
auto* d_Y_hat = Output(0, Y_hat.sizes(), at::dtype<float>()); // gradient of net w.r.t. Y_hat ("gradInput")

View File

@ -128,7 +128,7 @@ bool SmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(1);
auto& alpha_in = Input(2);
auto& alpha_out = Input(3);
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput")
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOutput")
// We intentially don't compute gradients for Y, alpha_{in,out} since they
// are not needed (can change in the future if desired)