mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix ouput typos (#120870)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/120870 Approved by: https://github.com/clee2000
This commit is contained in:
committed by
PyTorch MergeBot
parent
14c5ebc8a1
commit
09aefe1502
@ -149,7 +149,7 @@ bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
|
||||
auto& Y = Input(1);
|
||||
auto& L = Input(2);
|
||||
auto& S = Input(3);
|
||||
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
|
||||
// Below is gradient of net w.r.t. avg_loss ("gradOutput"), should be all 1's
|
||||
auto& d_avg_loss = Input(4);
|
||||
|
||||
auto* d_Y_hat = Output(0, Y_hat.sizes(), at::dtype<float>()); // gradient of net w.r.t. Y_hat ("gradInput")
|
||||
|
@ -128,7 +128,7 @@ bool SmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
|
||||
auto& Y = Input(1);
|
||||
auto& alpha_in = Input(2);
|
||||
auto& alpha_out = Input(3);
|
||||
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput")
|
||||
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOutput")
|
||||
// We intentially don't compute gradients for Y, alpha_{in,out} since they
|
||||
// are not needed (can change in the future if desired)
|
||||
|
||||
|
Reference in New Issue
Block a user