mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
UFMT formatting on test/autograd test/ao test/cpp test/backends (#123369)
Partially addresses #123062 Ran lintrunner on - test/_test_bazel.py - test/ao - test/autograd test/backends test/benchmark_uitls test/conftest.py test/bottleneck_test test/cpp Pull Request resolved: https://github.com/pytorch/pytorch/pull/123369 Approved by: https://github.com/huydhn
This commit is contained in:
committed by
PyTorch MergeBot
parent
de7edeea25
commit
f71e368969
@ -1,6 +1,7 @@
|
||||
"""Script to generate baseline values from PyTorch initialization algorithms"""
|
||||
|
||||
import sys
|
||||
|
||||
import torch
|
||||
|
||||
HEADER = """
|
||||
@ -19,13 +20,13 @@ INITIALIZERS = {
|
||||
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
|
||||
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
|
||||
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
|
||||
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
|
||||
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w),
|
||||
}
|
||||
|
||||
|
||||
def emit(initializer_parameter_map):
|
||||
# Don't write generated with an @ in front, else this file is recognized as generated.
|
||||
print("// @{} from {}".format('generated', __file__))
|
||||
print("// @{} from {}".format("generated", __file__))
|
||||
print(HEADER)
|
||||
for initializer_name, weights in initializer_parameter_map.items():
|
||||
print(PARAMETERS.format(initializer_name))
|
||||
@ -63,10 +64,11 @@ def run(initializer):
|
||||
def main():
|
||||
initializer_parameter_map = {}
|
||||
for initializer in INITIALIZERS.keys():
|
||||
sys.stderr.write(f'Evaluating {initializer} ...\n')
|
||||
sys.stderr.write(f"Evaluating {initializer} ...\n")
|
||||
initializer_parameter_map[initializer] = run(initializer)
|
||||
|
||||
emit(initializer_parameter_map)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -21,27 +21,43 @@ FOOTER = "} // namespace expected_parameters"
|
||||
PARAMETERS = "inline std::vector<std::vector<torch::Tensor>> {}() {{"
|
||||
|
||||
OPTIMIZERS = {
|
||||
"LBFGS" : lambda p: torch.optim.LBFGS(p, 1.0),
|
||||
"LBFGS_with_line_search" : lambda p: torch.optim.LBFGS(p, 1.0, line_search_fn="strong_wolfe"),
|
||||
"LBFGS": lambda p: torch.optim.LBFGS(p, 1.0),
|
||||
"LBFGS_with_line_search": lambda p: torch.optim.LBFGS(
|
||||
p, 1.0, line_search_fn="strong_wolfe"
|
||||
),
|
||||
"Adam": lambda p: torch.optim.Adam(p, 1.0),
|
||||
"Adam_with_weight_decay": lambda p: torch.optim.Adam(p, 1.0, weight_decay=1e-2),
|
||||
"Adam_with_weight_decay_and_amsgrad": lambda p: torch.optim.Adam(p, 1.0, weight_decay=1e-6, amsgrad=True),
|
||||
"Adam_with_weight_decay_and_amsgrad": lambda p: torch.optim.Adam(
|
||||
p, 1.0, weight_decay=1e-6, amsgrad=True
|
||||
),
|
||||
"AdamW": lambda p: torch.optim.AdamW(p, 1.0),
|
||||
"AdamW_without_weight_decay": lambda p: torch.optim.AdamW(p, 1.0, weight_decay=0),
|
||||
"AdamW_with_amsgrad": lambda p: torch.optim.AdamW(p, 1.0, amsgrad=True),
|
||||
"Adagrad": lambda p: torch.optim.Adagrad(p, 1.0),
|
||||
"Adagrad_with_weight_decay": lambda p: torch.optim.Adagrad(p, 1.0, weight_decay=1e-2),
|
||||
"Adagrad_with_weight_decay_and_lr_decay": lambda p: torch.optim.Adagrad(p, 1.0, weight_decay=1e-6, lr_decay=1e-3),
|
||||
"Adagrad_with_weight_decay": lambda p: torch.optim.Adagrad(
|
||||
p, 1.0, weight_decay=1e-2
|
||||
),
|
||||
"Adagrad_with_weight_decay_and_lr_decay": lambda p: torch.optim.Adagrad(
|
||||
p, 1.0, weight_decay=1e-6, lr_decay=1e-3
|
||||
),
|
||||
"RMSprop": lambda p: torch.optim.RMSprop(p, 0.1),
|
||||
"RMSprop_with_weight_decay": lambda p: torch.optim.RMSprop(p, 0.1, weight_decay=1e-2),
|
||||
"RMSprop_with_weight_decay_and_centered": lambda p: torch.optim.RMSprop(p, 0.1, weight_decay=1e-6, centered=True),
|
||||
"RMSprop_with_weight_decay_and_centered_and_momentum":
|
||||
lambda p: torch.optim.RMSprop(p, 0.1, weight_decay=1e-6, centered=True, momentum=0.9),
|
||||
"RMSprop_with_weight_decay": lambda p: torch.optim.RMSprop(
|
||||
p, 0.1, weight_decay=1e-2
|
||||
),
|
||||
"RMSprop_with_weight_decay_and_centered": lambda p: torch.optim.RMSprop(
|
||||
p, 0.1, weight_decay=1e-6, centered=True
|
||||
),
|
||||
"RMSprop_with_weight_decay_and_centered_and_momentum": lambda p: torch.optim.RMSprop(
|
||||
p, 0.1, weight_decay=1e-6, centered=True, momentum=0.9
|
||||
),
|
||||
"SGD": lambda p: torch.optim.SGD(p, 0.1),
|
||||
"SGD_with_weight_decay": lambda p: torch.optim.SGD(p, 0.1, weight_decay=1e-2),
|
||||
"SGD_with_weight_decay_and_momentum": lambda p: torch.optim.SGD(p, 0.1, momentum=0.9, weight_decay=1e-2),
|
||||
"SGD_with_weight_decay_and_nesterov_momentum":
|
||||
lambda p: torch.optim.SGD(p, 0.1, momentum=0.9, weight_decay=1e-6, nesterov=True),
|
||||
"SGD_with_weight_decay_and_momentum": lambda p: torch.optim.SGD(
|
||||
p, 0.1, momentum=0.9, weight_decay=1e-2
|
||||
),
|
||||
"SGD_with_weight_decay_and_nesterov_momentum": lambda p: torch.optim.SGD(
|
||||
p, 0.1, momentum=0.9, weight_decay=1e-6, nesterov=True
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@ -75,11 +91,11 @@ def run(optimizer_name, iterations, sample_every):
|
||||
loss.backward()
|
||||
|
||||
def closure():
|
||||
return torch.tensor([10.])
|
||||
return torch.tensor([10.0])
|
||||
|
||||
optimizer.step(closure)
|
||||
|
||||
if i % sample_every == 0:
|
||||
|
||||
values.append(
|
||||
[p.clone().flatten().data.numpy() for p in model.parameters()]
|
||||
)
|
||||
@ -89,7 +105,7 @@ def run(optimizer_name, iterations, sample_every):
|
||||
|
||||
def emit(optimizer_parameter_map):
|
||||
# Don't write generated with an @ in front, else this file is recognized as generated.
|
||||
print("// @{} from {}".format('generated', __file__))
|
||||
print("// @{} from {}".format("generated", __file__))
|
||||
print(HEADER)
|
||||
for optimizer_name, parameters in optimizer_parameter_map.items():
|
||||
print(PARAMETERS.format(optimizer_name))
|
||||
@ -115,7 +131,7 @@ def main():
|
||||
|
||||
optimizer_parameter_map = {}
|
||||
for optimizer in OPTIMIZERS.keys():
|
||||
sys.stderr.write(f'Evaluating {optimizer} ...\n')
|
||||
sys.stderr.write(f"Evaluating {optimizer} ...\n")
|
||||
optimizer_parameter_map[optimizer] = run(
|
||||
optimizer, options.iterations, options.sample_every
|
||||
)
|
||||
|
Reference in New Issue
Block a user