Fix non-existing parameters in docstrings in benchmarks (#91115)

This is a continuation of https://github.com/pytorch/pytorch/pull/90505
Pull Request resolved: https://github.com/pytorch/pytorch/pull/91115
Approved by: https://github.com/clee2000
This commit is contained in:
Sergii Dymchenko
2022-12-20 02:07:28 +00:00
committed by PyTorch MergeBot
parent 99bd8d12e1
commit 30edd39bdc
3 changed files with 3 additions and 3 deletions

View File

@ -117,7 +117,7 @@ def run_trainer(
extra_args (dict): configurations added by the user
data (list): training samples
rank (int): process number in the world
server_rrefs (dict): a dictionary containing server RRefs
server_rref (dict): a dictionary containing server RRefs
"""
trainer_class = trainer_map[args.trainer]
if extra_args is not None:

View File

@ -11,7 +11,7 @@ def basic_ddp_model(self, rank, model, process_group, hook_state, hook):
rank (int): worker rank
model (nn.Module): neural network model
process_group (ProcessGroup): distributed process group
HookState (class): class that will be used to keep track of state
hook_state (class): class that will be used to keep track of state
during training.
hook (function): ddp communication hook
"""

View File

@ -47,7 +47,7 @@ def _create_test(bench_op_obj, orig_test_attrs, tags, OperatorTestCase, run_back
bench_op_obj: an object which instantiated from a subclass of
Caffe2BenchmarkBase/TorchBenchmarkBase which includes tensor
creation and operator execution.
test_attrs: a dictionary includes test configs.
orig_test_attrs: a dictionary includes test configs.
tags: a attribute in test config to filter inputs
OperatorTestCase: a named tuple to save the metadata of an test
run_backward: a bool parameter indicating backward path