mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Enable UFMT on test/onnx_caffe2
, test/optim
, test/package
and test/profiler
(#123901)
Part of: #123062 Ran lintrunner on: - `test/onnx_caffe2` - `test/optim` - `test/package` - `test/profiler` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123901 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
63dcb5b0f2
commit
8ce29f1416
@ -1256,65 +1256,6 @@ exclude_patterns = [
|
||||
'test/nn/test_parametrization.py',
|
||||
'test/nn/test_pooling.py',
|
||||
'test/nn/test_pruning.py',
|
||||
'test/onnx_caffe2/export_onnx_tests_filter.py',
|
||||
'test/onnx_caffe2/export_onnx_tests_generator.py',
|
||||
'test/onnx_caffe2/test_caffe2_common.py',
|
||||
'test/onnx_caffe2/test_custom_ops.py',
|
||||
'test/onnx_caffe2/test_pytorch_helper.py',
|
||||
'test/onnx_caffe2/test_pytorch_onnx_caffe2.py',
|
||||
'test/onnx_caffe2/test_pytorch_onnx_caffe2_quantized.py',
|
||||
'test/onnx_caffe2/test_verify.py',
|
||||
'test/optim/test_lrscheduler.py',
|
||||
'test/optim/test_optim.py',
|
||||
'test/optim/test_swa_utils.py',
|
||||
'test/package/__init__.py',
|
||||
'test/package/common.py',
|
||||
'test/package/generate_bc_packages.py',
|
||||
'test/package/module_a.py',
|
||||
'test/package/module_a_remapped_path.py',
|
||||
'test/package/package_a/__init__.py',
|
||||
'test/package/package_a/fake_interface.py',
|
||||
'test/package/package_a/fake_script_class.py',
|
||||
'test/package/package_a/long_name.py',
|
||||
'test/package/package_a/std_sys_module_hacks.py',
|
||||
'test/package/package_a/subpackage.py',
|
||||
'test/package/package_a/test_all_leaf_modules_tracer.py',
|
||||
'test/package/package_a/test_module.py',
|
||||
'test/package/package_a/test_nn_module.py',
|
||||
'test/package/package_a/use_dunder_package.py',
|
||||
'test/package/package_a/use_torch_package_importer.py',
|
||||
'test/package/package_b/__init__.py',
|
||||
'test/package/package_b/subpackage_0/__init__.py',
|
||||
'test/package/package_b/subpackage_0/subsubpackage_0/__init__.py',
|
||||
'test/package/package_b/subpackage_1.py',
|
||||
'test/package/package_b/subpackage_2.py',
|
||||
'test/package/package_c/__init__.py',
|
||||
'test/package/package_c/test_module.py',
|
||||
'test/package/package_d/__init__.py',
|
||||
'test/package/package_d/imports_directly.py',
|
||||
'test/package/package_d/imports_indirectly.py',
|
||||
'test/package/package_d/subpackage_0/__init__.py',
|
||||
'test/package/package_d/subpackage_0/subsubpackage_0/__init__.py',
|
||||
'test/package/test_analyze.py',
|
||||
'test/package/test_dependency_api.py',
|
||||
'test/package/test_dependency_hooks.py',
|
||||
'test/package/test_digraph.py',
|
||||
'test/package/test_directory_reader.py',
|
||||
'test/package/test_glob_group.py',
|
||||
'test/package/test_importer.py',
|
||||
'test/package/test_load_bc_packages.py',
|
||||
'test/package/test_mangling.py',
|
||||
'test/package/test_misc.py',
|
||||
'test/package/test_model.py',
|
||||
'test/package/test_package_fx.py',
|
||||
'test/package/test_package_script.py',
|
||||
'test/package/test_repackage.py',
|
||||
'test/package/test_resources.py',
|
||||
'test/package/test_save_load.py',
|
||||
'test/package/test_trace_dep/__init__.py',
|
||||
'test/profiler/test_memory_profiler.py',
|
||||
'test/profiler/test_profiler.py',
|
||||
'test/profiler/test_profiler_tree.py',
|
||||
'test/quantization/__init__.py',
|
||||
'test/quantization/core/__init__.py',
|
||||
'test/quantization/core/experimental/apot_fx_graph_mode_ptq.py',
|
||||
|
@ -3071,7 +3071,6 @@ def setup_rnn_tests():
|
||||
variable_length_opts,
|
||||
dropout_opts,
|
||||
):
|
||||
|
||||
for base, name, extra_kwargs in (
|
||||
("elman", "elman_relu", {"nonlinearity": "relu"}),
|
||||
("elman", "elman_tanh", {"nonlinearity": "tanh"}),
|
||||
|
@ -1,40 +1,40 @@
|
||||
# Owner(s): ["module: optimizer", "module: LrScheduler" ]
|
||||
import types
|
||||
import warnings
|
||||
import math
|
||||
import pickle
|
||||
import types
|
||||
import warnings
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch.nn import Parameter
|
||||
from torch.optim import Adam, SGD, Rprop
|
||||
from torch.optim import Adam, Rprop, SGD
|
||||
from torch.optim.lr_scheduler import (
|
||||
ChainedScheduler,
|
||||
ConstantLR,
|
||||
CosineAnnealingLR,
|
||||
CosineAnnealingWarmRestarts,
|
||||
CyclicLR,
|
||||
EPOCH_DEPRECATION_WARNING,
|
||||
ExponentialLR,
|
||||
LambdaLR,
|
||||
LinearLR,
|
||||
LRScheduler,
|
||||
MultiplicativeLR,
|
||||
MultiStepLR,
|
||||
OneCycleLR,
|
||||
PolynomialLR,
|
||||
ReduceLROnPlateau,
|
||||
SequentialLR,
|
||||
StepLR,
|
||||
MultiStepLR,
|
||||
ConstantLR,
|
||||
LinearLR,
|
||||
ExponentialLR,
|
||||
CosineAnnealingLR,
|
||||
ReduceLROnPlateau,
|
||||
LRScheduler,
|
||||
CyclicLR,
|
||||
CosineAnnealingWarmRestarts,
|
||||
OneCycleLR,
|
||||
ChainedScheduler,
|
||||
PolynomialLR,
|
||||
EPOCH_DEPRECATION_WARNING,
|
||||
)
|
||||
from torch.optim.swa_utils import SWALR
|
||||
from torch.testing._internal.common_utils import (
|
||||
TestCase,
|
||||
instantiate_parametrized_tests,
|
||||
load_tests,
|
||||
parametrize,
|
||||
instantiate_parametrized_tests,
|
||||
skipIfTorchDynamo
|
||||
skipIfTorchDynamo,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
# load_tests from common_utils is used to automatically filter tests for
|
||||
@ -52,7 +52,6 @@ class TestLRScheduler(TestCase):
|
||||
def forward(self, x):
|
||||
return self.conv2(F.relu(self.conv1(x)))
|
||||
|
||||
|
||||
class LambdaLRTestObject:
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
@ -65,6 +64,7 @@ class TestLRScheduler(TestCase):
|
||||
return self.__dict__ == other.__dict__
|
||||
else:
|
||||
return False
|
||||
|
||||
exact_dtype = True
|
||||
|
||||
def setUp(self):
|
||||
@ -112,7 +112,9 @@ class TestLRScheduler(TestCase):
|
||||
with self.assertRaises(TypeError):
|
||||
scheduler = MultiStepLR(optimizer, gamma=1, milestones=[10, 20])
|
||||
|
||||
@skipIfTorchDynamo("Torchdynamo keeps references to optim in the guards and the stack of the graph break frames")
|
||||
@skipIfTorchDynamo(
|
||||
"Torchdynamo keeps references to optim in the guards and the stack of the graph break frames"
|
||||
)
|
||||
def test_no_cyclic_references(self):
|
||||
import gc
|
||||
|
||||
@ -132,7 +134,9 @@ class TestLRScheduler(TestCase):
|
||||
gc.collect(), 0, msg="Optimizer should be garbage-collected on __del__"
|
||||
)
|
||||
|
||||
@skipIfTorchDynamo("Torchdynamo keeps references to optim in the guards and the stack of the graph break frames")
|
||||
@skipIfTorchDynamo(
|
||||
"Torchdynamo keeps references to optim in the guards and the stack of the graph break frames"
|
||||
)
|
||||
def test_no_cyclic_references_in_step(self):
|
||||
import gc
|
||||
import weakref
|
||||
@ -347,9 +351,7 @@ class TestLRScheduler(TestCase):
|
||||
from torch.nn import Parameter
|
||||
|
||||
epochs = 10
|
||||
optimizer = SGD(
|
||||
[Parameter(torch.randn(2, 2, requires_grad=True))], 0.1
|
||||
)
|
||||
optimizer = SGD([Parameter(torch.randn(2, 2, requires_grad=True))], 0.1)
|
||||
targets = [[0.1] * 3 + [0.01] * 3 + [0.001] * 3 + [0.0001]]
|
||||
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.1)
|
||||
self._test_get_last_lr(scheduler, targets, epochs)
|
||||
@ -692,7 +694,9 @@ class TestLRScheduler(TestCase):
|
||||
scheduler = ReduceLROnPlateau(
|
||||
self.opt,
|
||||
)
|
||||
self.assertEqual(scheduler.get_last_lr(), [0.5 for param_group in self.opt.param_groups])
|
||||
self.assertEqual(
|
||||
scheduler.get_last_lr(), [0.5 for param_group in self.opt.param_groups]
|
||||
)
|
||||
|
||||
def test_sequentiallr1(self):
|
||||
epochs = 19
|
||||
@ -1555,7 +1559,9 @@ class TestLRScheduler(TestCase):
|
||||
def scale_fn(_):
|
||||
return 0.5
|
||||
|
||||
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
|
||||
scheduler = CyclicLR(
|
||||
adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
|
||||
)
|
||||
state = scheduler.state_dict()
|
||||
self.assertNotIn("_scale_fn_ref", state)
|
||||
self.assertIs(state["_scale_fn_custom"], None)
|
||||
@ -1571,7 +1577,9 @@ class TestLRScheduler(TestCase):
|
||||
|
||||
scale_fn = ScaleFn()
|
||||
|
||||
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
|
||||
scheduler = CyclicLR(
|
||||
adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
|
||||
)
|
||||
state = scheduler.state_dict()
|
||||
self.assertNotIn("_scale_fn_ref", state)
|
||||
self.assertEqual(state["_scale_fn_custom"], scale_fn.__dict__)
|
||||
@ -1581,11 +1589,17 @@ class TestLRScheduler(TestCase):
|
||||
adam_opt = Adam(self.net.parameters())
|
||||
|
||||
# Case 1: Built-in mode
|
||||
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2")
|
||||
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
|
||||
scheduler = CyclicLR(
|
||||
adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2"
|
||||
)
|
||||
restored_scheduler = CyclicLR(
|
||||
adam_opt, base_lr=1, max_lr=5, cycle_momentum=False
|
||||
)
|
||||
restored_scheduler.load_state_dict(scheduler.state_dict())
|
||||
self.assertTrue(restored_scheduler.mode == scheduler.mode == "triangular2")
|
||||
self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone(scheduler._scale_fn_ref)
|
||||
self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone(
|
||||
scheduler._scale_fn_ref
|
||||
)
|
||||
self.assertIs(restored_scheduler._scale_fn_custom, None)
|
||||
self.assertIs(scheduler._scale_fn_custom, None)
|
||||
|
||||
@ -1593,8 +1607,12 @@ class TestLRScheduler(TestCase):
|
||||
def scale_fn(_):
|
||||
return 0.5
|
||||
|
||||
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
|
||||
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
|
||||
scheduler = CyclicLR(
|
||||
adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
|
||||
)
|
||||
restored_scheduler = CyclicLR(
|
||||
adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn
|
||||
)
|
||||
restored_scheduler.load_state_dict(scheduler.state_dict())
|
||||
self.assertIs(scheduler._scale_fn_custom, scale_fn)
|
||||
self.assertIs(restored_scheduler._scale_fn_custom, scale_fn)
|
||||
@ -2253,30 +2271,40 @@ class TestLRScheduler(TestCase):
|
||||
|
||||
self.assertLessEqual(last_lr, max_lr)
|
||||
|
||||
|
||||
@parametrize("LRClass", [
|
||||
partial(LambdaLR, lr_lambda=lambda e: e // 10),
|
||||
partial(MultiplicativeLR, lr_lambda=lambda: 0.95),
|
||||
partial(StepLR, step_size=30),
|
||||
partial(MultiStepLR, milestones=[30, 80]),
|
||||
ConstantLR,
|
||||
LinearLR,
|
||||
partial(ExponentialLR, gamma=0.9),
|
||||
lambda opt, **kwargs: SequentialLR(
|
||||
opt, schedulers=[ConstantLR(opt), ConstantLR(opt)], milestones=[2], **kwargs),
|
||||
PolynomialLR,
|
||||
partial(CosineAnnealingLR, T_max=10),
|
||||
ReduceLROnPlateau,
|
||||
partial(CyclicLR, base_lr=0.01, max_lr=0.1),
|
||||
partial(CosineAnnealingWarmRestarts, T_0=20),
|
||||
partial(OneCycleLR, max_lr=0.01, total_steps=10),
|
||||
])
|
||||
@parametrize(
|
||||
"LRClass",
|
||||
[
|
||||
partial(LambdaLR, lr_lambda=lambda e: e // 10),
|
||||
partial(MultiplicativeLR, lr_lambda=lambda: 0.95),
|
||||
partial(StepLR, step_size=30),
|
||||
partial(MultiStepLR, milestones=[30, 80]),
|
||||
ConstantLR,
|
||||
LinearLR,
|
||||
partial(ExponentialLR, gamma=0.9),
|
||||
lambda opt, **kwargs: SequentialLR(
|
||||
opt,
|
||||
schedulers=[ConstantLR(opt), ConstantLR(opt)],
|
||||
milestones=[2],
|
||||
**kwargs,
|
||||
),
|
||||
PolynomialLR,
|
||||
partial(CosineAnnealingLR, T_max=10),
|
||||
ReduceLROnPlateau,
|
||||
partial(CyclicLR, base_lr=0.01, max_lr=0.1),
|
||||
partial(CosineAnnealingWarmRestarts, T_0=20),
|
||||
partial(OneCycleLR, max_lr=0.01, total_steps=10),
|
||||
],
|
||||
)
|
||||
def test_lr_scheduler_verbose_deprecation_warning(self, LRClass):
|
||||
"""Check that a deprecating warning with verbose parameter."""
|
||||
with self.assertWarnsOnceRegex(UserWarning, "The verbose parameter is deprecated"):
|
||||
with self.assertWarnsOnceRegex(
|
||||
UserWarning, "The verbose parameter is deprecated"
|
||||
):
|
||||
LRClass(self.opt, verbose=True)
|
||||
|
||||
with self.assertWarnsOnceRegex(UserWarning, "The verbose parameter is deprecated"):
|
||||
with self.assertWarnsOnceRegex(
|
||||
UserWarning, "The verbose parameter is deprecated"
|
||||
):
|
||||
LRClass(self.opt, verbose=False)
|
||||
|
||||
# No warning is raised when verbose is the default value.
|
||||
|
@ -2,17 +2,26 @@
|
||||
|
||||
import torch
|
||||
from torch.optim import (
|
||||
Adadelta, Adagrad, Adam, Adamax, AdamW, ASGD, NAdam, RAdam, RMSprop, Rprop, SGD
|
||||
Adadelta,
|
||||
Adagrad,
|
||||
Adam,
|
||||
Adamax,
|
||||
AdamW,
|
||||
ASGD,
|
||||
NAdam,
|
||||
RAdam,
|
||||
RMSprop,
|
||||
Rprop,
|
||||
SGD,
|
||||
)
|
||||
from torch.testing._internal.common_utils import (
|
||||
TestCase,
|
||||
load_tests,
|
||||
gradcheck,
|
||||
skipIfTorchDynamo
|
||||
load_tests,
|
||||
skipIfTorchDynamo,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
|
||||
|
||||
# load_tests from common_utils is used to automatically filter tests for
|
||||
# sharding on sandcastle. This line silences flake warnings
|
||||
load_tests = load_tests
|
||||
@ -41,7 +50,6 @@ def _diff_fn(p, grad, opt_differentiable_state, opt_class, kwargs, *ignored):
|
||||
|
||||
@skipIfTorchDynamo("Differentiable optimizers not supported")
|
||||
class TestDifferentiableOptimizer(TestCase):
|
||||
|
||||
def test_sgd(self):
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -59,7 +67,6 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def test_adam(self):
|
||||
state = {}
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -85,7 +92,6 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def test_rmsprop(self):
|
||||
state = {}
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -118,7 +124,6 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def test_adadelta(self):
|
||||
state = {}
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -140,7 +145,6 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def test_adagrad(self):
|
||||
state = {}
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -161,7 +165,6 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def test_adamax(self):
|
||||
state = {}
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -183,9 +186,10 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@skipIfTorchDynamo("The inplace mu update fails with dynamo, "
|
||||
"since this is only happening when differentiable is enabled, skipping for now")
|
||||
@skipIfTorchDynamo(
|
||||
"The inplace mu update fails with dynamo, "
|
||||
"since this is only happening when differentiable is enabled, skipping for now"
|
||||
)
|
||||
def test_asgd(self):
|
||||
state = {}
|
||||
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
|
||||
@ -319,7 +323,12 @@ class TestDifferentiableOptimizer(TestCase):
|
||||
grad,
|
||||
state,
|
||||
RAdam,
|
||||
{"lr": 0.9, "weight_decay": 0.1, "decoupled_weight_decay": True, "differentiable": True},
|
||||
{
|
||||
"lr": 0.9,
|
||||
"weight_decay": 0.1,
|
||||
"decoupled_weight_decay": True,
|
||||
"differentiable": True,
|
||||
},
|
||||
*state.values(),
|
||||
),
|
||||
)
|
||||
|
@ -4,12 +4,17 @@ import itertools
|
||||
import pickle
|
||||
|
||||
import torch
|
||||
from torch.optim.swa_utils import AveragedModel, update_bn, get_swa_multi_avg_fn, get_ema_multi_avg_fn
|
||||
from torch.optim.swa_utils import (
|
||||
AveragedModel,
|
||||
get_ema_multi_avg_fn,
|
||||
get_swa_multi_avg_fn,
|
||||
update_bn,
|
||||
)
|
||||
from torch.testing._internal.common_utils import (
|
||||
TestCase,
|
||||
instantiate_parametrized_tests,
|
||||
load_tests,
|
||||
parametrize,
|
||||
instantiate_parametrized_tests,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
# load_tests from common_utils is used to automatically filter tests for
|
||||
@ -75,9 +80,13 @@ class TestSWAUtils(TestCase):
|
||||
def _run_averaged_steps(self, dnn, swa_device, ema):
|
||||
ema_decay = 0.999
|
||||
if ema:
|
||||
averaged_dnn = AveragedModel(dnn, device=swa_device, multi_avg_fn=get_ema_multi_avg_fn(ema_decay))
|
||||
averaged_dnn = AveragedModel(
|
||||
dnn, device=swa_device, multi_avg_fn=get_ema_multi_avg_fn(ema_decay)
|
||||
)
|
||||
else:
|
||||
averaged_dnn = AveragedModel(dnn, device=swa_device, multi_avg_fn=get_swa_multi_avg_fn())
|
||||
averaged_dnn = AveragedModel(
|
||||
dnn, device=swa_device, multi_avg_fn=get_swa_multi_avg_fn()
|
||||
)
|
||||
|
||||
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
|
||||
|
||||
@ -86,7 +95,11 @@ class TestSWAUtils(TestCase):
|
||||
for p, p_avg in zip(dnn.parameters(), averaged_params):
|
||||
p.detach().add_(torch.randn_like(p))
|
||||
if ema:
|
||||
p_avg += p.detach() * ema_decay ** (n_updates - i - 1) * ((1 - ema_decay) if i > 0 else 1.0)
|
||||
p_avg += (
|
||||
p.detach()
|
||||
* ema_decay ** (n_updates - i - 1)
|
||||
* ((1 - ema_decay) if i > 0 else 1.0)
|
||||
)
|
||||
else:
|
||||
p_avg += p.detach() / n_updates
|
||||
averaged_dnn.update_parameters(dnn)
|
||||
@ -157,8 +170,11 @@ class TestSWAUtils(TestCase):
|
||||
decay = 0.9
|
||||
|
||||
if use_multi_avg_fn:
|
||||
averaged_dnn = AveragedModel(dnn, multi_avg_fn=get_ema_multi_avg_fn(decay), use_buffers=use_buffers)
|
||||
averaged_dnn = AveragedModel(
|
||||
dnn, multi_avg_fn=get_ema_multi_avg_fn(decay), use_buffers=use_buffers
|
||||
)
|
||||
else:
|
||||
|
||||
def avg_fn(p_avg, p, n_avg):
|
||||
return decay * p_avg + (1 - decay) * p
|
||||
|
||||
@ -206,7 +222,6 @@ class TestSWAUtils(TestCase):
|
||||
self.assertEqual(b_avg, b_swa)
|
||||
|
||||
def _test_update_bn(self, dnn, dl_x, dl_xy, cuda):
|
||||
|
||||
preactivation_sum = torch.zeros(dnn.n_features)
|
||||
preactivation_squared_sum = torch.zeros(dnn.n_features)
|
||||
if cuda:
|
||||
|
@ -1,6 +1,9 @@
|
||||
def add_function(d):
|
||||
# noqa: B950
|
||||
d.append(function_with_a_long_name_256charsplus_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
d.append(
|
||||
function_with_a_long_name_256charsplus_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
)
|
||||
|
||||
|
||||
def function_with_a_long_name_256charsplus_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(): # noqa: B950
|
||||
return 1337
|
||||
|
@ -4,7 +4,6 @@ from .subpackage_0.subsubpackage_0 import important_string
|
||||
|
||||
|
||||
class ImportsDirectlyFromSubSubPackage(torch.nn.Module):
|
||||
|
||||
key = important_string
|
||||
|
||||
def forward(self, inp):
|
||||
|
@ -4,7 +4,6 @@ from .subpackage_0 import important_string
|
||||
|
||||
|
||||
class ImportsIndirectlyFromSubPackage(torch.nn.Module):
|
||||
|
||||
key = important_string
|
||||
|
||||
def forward(self, inp):
|
||||
|
@ -44,7 +44,10 @@ class DirectoryReaderTest(PackageTestCase):
|
||||
"""Tests use of DirectoryReader as accessor for opened packages."""
|
||||
|
||||
@skipIfNoTorchVision
|
||||
@skipIf(True, "Does not work with latest TorchVision, see https://github.com/pytorch/pytorch/issues/81115")
|
||||
@skipIf(
|
||||
True,
|
||||
"Does not work with latest TorchVision, see https://github.com/pytorch/pytorch/issues/81115",
|
||||
)
|
||||
def test_loading_pickle(self):
|
||||
"""
|
||||
Test basic saving and loading of modules and pickles from a DirectoryReader.
|
||||
|
@ -32,7 +32,6 @@ class TestLoadBCPackages(PackageTestCase):
|
||||
"Tests that use temporary files are disabled in fbcode",
|
||||
)
|
||||
def test_load_bc_packages_torchscript_module(self):
|
||||
|
||||
"""Tests for backwards compatible torchscript module"""
|
||||
importer2 = PackageImporter(f"{packaging_directory}/test_torchscript_module.pt")
|
||||
loaded2 = importer2.load_pickle("torchscript_module", "torchscript_module.pkl")
|
||||
|
@ -11,7 +11,12 @@ from unittest import skipIf
|
||||
|
||||
from torch.package import is_from_package, PackageExporter, PackageImporter
|
||||
from torch.package.package_exporter import PackagingError
|
||||
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests, skipIfTorchDynamo
|
||||
from torch.testing._internal.common_utils import (
|
||||
IS_FBCODE,
|
||||
IS_SANDCASTLE,
|
||||
run_tests,
|
||||
skipIfTorchDynamo,
|
||||
)
|
||||
|
||||
try:
|
||||
from .common import PackageTestCase
|
||||
@ -118,7 +123,9 @@ class TestMisc(PackageTestCase):
|
||||
def get_filename(self, name):
|
||||
result = super().get_filename(name)
|
||||
if name == "module_a":
|
||||
return os.path.join(os.path.dirname(result), "module_a_remapped_path.py")
|
||||
return os.path.join(
|
||||
os.path.dirname(result), "module_a_remapped_path.py"
|
||||
)
|
||||
else:
|
||||
return result
|
||||
|
||||
@ -139,7 +146,9 @@ class TestMisc(PackageTestCase):
|
||||
if spec is not None:
|
||||
break
|
||||
assert spec is not None and isinstance(spec.loader, SourceFileLoader)
|
||||
spec.loader = LoaderThatRemapsModuleA(spec.loader.name, spec.loader.path)
|
||||
spec.loader = LoaderThatRemapsModuleA(
|
||||
spec.loader.name, spec.loader.path
|
||||
)
|
||||
return spec
|
||||
|
||||
sys.meta_path.insert(0, FinderThatRemapsModuleA())
|
||||
@ -154,7 +163,6 @@ class TestMisc(PackageTestCase):
|
||||
he.intern("**")
|
||||
he.save_module(module_a.__name__)
|
||||
|
||||
|
||||
buffer.seek(0)
|
||||
hi = PackageImporter(buffer)
|
||||
self.assertTrue("remapped_path" in hi.get_source("module_a"))
|
||||
|
@ -23,7 +23,10 @@ except ImportError:
|
||||
from common import PackageTestCase
|
||||
|
||||
|
||||
@skipIf(True, "Does not work with recent torchvision, see https://github.com/pytorch/pytorch/issues/81115")
|
||||
@skipIf(
|
||||
True,
|
||||
"Does not work with recent torchvision, see https://github.com/pytorch/pytorch/issues/81115",
|
||||
)
|
||||
@skipIfNoTorchVision
|
||||
class ModelTest(PackageTestCase):
|
||||
"""End-to-end tests packaging an entire model."""
|
||||
@ -88,7 +91,6 @@ class ModelTest(PackageTestCase):
|
||||
|
||||
@skipIfNoTorchVision
|
||||
def test_model_save(self):
|
||||
|
||||
# This example shows how you might package a model
|
||||
# so that the creator of the model has flexibility about
|
||||
# how they want to save it but the 'server' can always
|
||||
|
@ -22,6 +22,7 @@ torch.fx.wrap("len")
|
||||
# Do it twice to make sure it doesn't affect anything
|
||||
torch.fx.wrap("len")
|
||||
|
||||
|
||||
class TestPackageFX(PackageTestCase):
|
||||
"""Tests for compatibility with FX."""
|
||||
|
||||
@ -186,6 +187,5 @@ class TestPackageFX(PackageTestCase):
|
||||
self.assertEqual(loaded_traced(input), traced(input))
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
||||
|
@ -163,7 +163,9 @@ class TestSaveLoad(PackageTestCase):
|
||||
buffer = BytesIO()
|
||||
with PackageExporter(buffer) as exporter:
|
||||
exporter.intern("**")
|
||||
exporter.save_pickle("container", "container.pkl", container, pickle_protocol=4)
|
||||
exporter.save_pickle(
|
||||
"container", "container.pkl", container, pickle_protocol=4
|
||||
)
|
||||
|
||||
buffer.seek(0)
|
||||
importer = PackageImporter(buffer)
|
||||
|
@ -103,7 +103,6 @@ class TestIdentifyGradients(TestCase):
|
||||
grad_tensor: torch.Tensor,
|
||||
parameter: Optional[torch.Tensor] = None,
|
||||
) -> None:
|
||||
|
||||
# This is not an exhaustive check, but for the purpose of unit testing
|
||||
# it is sufficient.
|
||||
def key_matches_tensor(key, tensor) -> bool:
|
||||
@ -219,7 +218,6 @@ class TestIdentifyGradients(TestCase):
|
||||
check(cold_start=False)
|
||||
|
||||
def _test_extract_gradients_from_optimizer(self, set_to_none: bool) -> None:
|
||||
|
||||
x = torch.ones((1,))
|
||||
w0 = torch.ones((1,), requires_grad=True)
|
||||
w1 = torch.ones((1,), requires_grad=True)
|
||||
@ -844,14 +842,19 @@ class TestMemoryProfilerE2E(TestCase):
|
||||
if key.storage.allocation_id == max(ids | {-1})
|
||||
}
|
||||
|
||||
def _run_and_check_parameters_and_gradients(self, inner_fn, model, grads_none: bool = False):
|
||||
|
||||
def _run_and_check_parameters_and_gradients(
|
||||
self, inner_fn, model, grads_none: bool = False
|
||||
):
|
||||
with profile() as prof:
|
||||
inner_fn()
|
||||
|
||||
memory_profile = prof._memory_profile()
|
||||
|
||||
def assert_category(t: torch.Tensor, category: _memory_profiler.Category, should_be_none: bool = False):
|
||||
def assert_category(
|
||||
t: torch.Tensor,
|
||||
category: _memory_profiler.Category,
|
||||
should_be_none: bool = False,
|
||||
):
|
||||
if should_be_none:
|
||||
assert t is None, "tensor should be None but is not."
|
||||
return
|
||||
@ -940,7 +943,9 @@ class TestMemoryProfilerE2E(TestCase):
|
||||
# If we profile the first step then gradients will not have been
|
||||
# created when we call `model.forward`, so if we don't call `.backward`
|
||||
# then gradients are never created.
|
||||
self._run_and_check_parameters_and_gradients(inner_fn=fwd_only, model=model, grads_none=True)
|
||||
self._run_and_check_parameters_and_gradients(
|
||||
inner_fn=fwd_only, model=model, grads_none=True
|
||||
)
|
||||
|
||||
# On the first step we must rely on `AccumulateGrad`, since gradients
|
||||
# did not exist when `model.forward` was called.
|
||||
@ -1461,7 +1466,6 @@ class TestMemoryProfilerE2E(TestCase):
|
||||
return f"{size / 1024:3.1f} kB"
|
||||
return f"{size // 1024} kB"
|
||||
|
||||
|
||||
# We generate sequential IDs for Tensors; however platforms vary
|
||||
# slightly in the exact computation executed. If this results in
|
||||
# tensor creation the IDs will be shifted and the unit test will fail.
|
||||
@ -1477,7 +1481,6 @@ class TestMemoryProfilerE2E(TestCase):
|
||||
f"{action.name.lower():<25} {format_action(action, key, version):<25} "
|
||||
f"{id_for_testing(key):>3}(v{version}) {format_size(size):>15}"
|
||||
for _, action, (key, version), size in prof._memory_profile().timeline
|
||||
|
||||
# We generally don't care about tiny allocations during memory
|
||||
# profiling and they add a lot of noise to the unit test.
|
||||
if size > 1024
|
||||
@ -1547,7 +1550,8 @@ class TestMemoryProfilerE2E(TestCase):
|
||||
destroy ??? 29(v1) 1024 kB
|
||||
destroy GRADIENT 16(v0) 128 kB
|
||||
destroy GRADIENT 17(v0) 2 kB
|
||||
destroy GRADIENT 13(v0) 1024 kB""")
|
||||
destroy GRADIENT 13(v0) 1024 kB""",
|
||||
)
|
||||
|
||||
def test_memory_timeline_no_id(self) -> None:
|
||||
# On CPU the default behavior is to simply forward to malloc. That
|
||||
@ -1594,7 +1598,9 @@ class TestMemoryProfilerE2E(TestCase):
|
||||
if not torch.cuda.is_available():
|
||||
expected = expected[2:]
|
||||
for event in expected:
|
||||
self.assertTrue(event in actual, f"event: {event} was not found in actual.")
|
||||
self.assertTrue(
|
||||
event in actual, f"event: {event} was not found in actual."
|
||||
)
|
||||
else:
|
||||
self.assertEqual(
|
||||
actual,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,7 +12,13 @@ import expecttest
|
||||
import torch
|
||||
from torch._C._profiler import _ExtraFields_PyCall, _ExtraFields_PyCCall
|
||||
from torch.testing._internal.common_utils import (
|
||||
TestCase, run_tests, IS_WINDOWS, TEST_WITH_CROSSREF, IS_ARM64, skipIfTorchDynamo)
|
||||
IS_ARM64,
|
||||
IS_WINDOWS,
|
||||
run_tests,
|
||||
skipIfTorchDynamo,
|
||||
TEST_WITH_CROSSREF,
|
||||
TestCase,
|
||||
)
|
||||
from torch.utils._pytree import tree_map
|
||||
|
||||
# These functions can vary from based on platform and build (e.g. with CUDA)
|
||||
@ -28,7 +34,6 @@ PRUNE_FUNCTIONS = {
|
||||
"torch/profiler/profiler.py(...): _transit_action": KEEP_ELLIPSES,
|
||||
"<built-in method __exit__ of torch._C.DisableTorchFunctionSubclass object at 0xXXXXXXXXXXXX>": PRUNE_ALL,
|
||||
"cudaStreamIsCapturing": PRUNE_ALL,
|
||||
|
||||
# These show up only on CUDA, prune them so the CUDA and CPU expected results can be the same
|
||||
"cudaGetDeviceCount": PRUNE_ALL,
|
||||
"cudaGetDeviceProperties_v2": PRUNE_ALL,
|
||||
@ -46,14 +51,12 @@ ALLOW_CUDA_FAILURE = (torch.version.hip is not None) or IS_WINDOWS
|
||||
|
||||
|
||||
class TorchFunctionTensor(torch.Tensor):
|
||||
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
return super().__torch_function__(func, types, args, kwargs)
|
||||
|
||||
|
||||
class TorchDispatchTensor(torch.Tensor):
|
||||
|
||||
@staticmethod
|
||||
def __new__(cls, elem):
|
||||
t = torch.Tensor._make_subclass(cls, elem, elem.requires_grad)
|
||||
@ -62,7 +65,6 @@ class TorchDispatchTensor(torch.Tensor):
|
||||
|
||||
@classmethod
|
||||
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
||||
|
||||
def unwrap(x):
|
||||
return x.elem if isinstance(x, TorchDispatchTensor) else x
|
||||
|
||||
@ -76,7 +78,6 @@ class TorchDispatchTensor(torch.Tensor):
|
||||
|
||||
|
||||
class ProfilerTree:
|
||||
|
||||
@staticmethod
|
||||
def test(f):
|
||||
"""Mark unit test that will be using ProfilerTree to test traces.
|
||||
@ -99,11 +100,11 @@ class ProfilerTree:
|
||||
return out
|
||||
finally:
|
||||
delattr(self, "tree_replicate")
|
||||
|
||||
return begin_unit_test_marker
|
||||
|
||||
@classmethod
|
||||
def format(cls, profiler, indent: int = 0):
|
||||
|
||||
def flatten(nodes, depth=0, out=None):
|
||||
if out is None:
|
||||
out = []
|
||||
@ -140,10 +141,19 @@ class ProfilerTree:
|
||||
if flat_nodes and flat_nodes[-1][1] == "hipDeviceSynchronize":
|
||||
flat_nodes = flat_nodes[:-1]
|
||||
|
||||
min_depth = min([d + 1 for d, name in flat_nodes if "begin_unit_test_marker" in name] or [0])
|
||||
min_depth = min(
|
||||
[d + 1 for d, name in flat_nodes if "begin_unit_test_marker" in name] or [0]
|
||||
)
|
||||
return textwrap.indent(
|
||||
"\n".join([f"{' ' * (d - min_depth)}{name.rstrip()}" for d, name in flat_nodes if d >= min_depth]),
|
||||
" " * indent)
|
||||
"\n".join(
|
||||
[
|
||||
f"{' ' * (d - min_depth)}{name.rstrip()}"
|
||||
for d, name in flat_nodes
|
||||
if d >= min_depth
|
||||
]
|
||||
),
|
||||
" " * indent,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def fmt_name(name: str) -> str:
|
||||
@ -172,18 +182,15 @@ class ProfilerTree:
|
||||
"void at::native::reduce_kernel",
|
||||
"void at::native::vectorized_elementwise_kernel",
|
||||
"void at::native::unrolled_elementwise_kernel",
|
||||
|
||||
r"void [a-zA-Z0-9]+_kernel", # Nvidia kernels.
|
||||
):
|
||||
name = re.sub(
|
||||
rf"{kernel_pattern}<.+>\(.+\)$",
|
||||
f"{kernel_pattern.replace('[a-zA-Z0-9]+', '...')}<...>(...)",
|
||||
name)
|
||||
name,
|
||||
)
|
||||
|
||||
return re.sub(
|
||||
"object at 0x[0-9a-fA-F]+>",
|
||||
"object at 0xXXXXXXXXXXXX>",
|
||||
name)
|
||||
return re.sub("object at 0x[0-9a-fA-F]+>", "object at 0xXXXXXXXXXXXX>", name)
|
||||
|
||||
@classmethod
|
||||
def validate_node(cls, node):
|
||||
@ -205,6 +212,7 @@ class ProfilerTree:
|
||||
caller_name = to_string(extra_fields.caller)
|
||||
assert parent_name == caller_name, f"{parent_name} vs. {caller_name}"
|
||||
|
||||
|
||||
@unittest.skipIf(IS_ARM64, "Not working on ARM")
|
||||
class TestProfilerTree(TestCase):
|
||||
def assertTreesMatch(self, actual: str, expected: str, allow_failure: bool = False):
|
||||
@ -228,7 +236,9 @@ class TestProfilerTree(TestCase):
|
||||
self.maxDiff = None
|
||||
|
||||
replicate = getattr(self, "tree_replicate", None)
|
||||
self.assertIsNotNone(replicate, "Please annotate test with `@ProfilerTree.test`")
|
||||
self.assertIsNotNone(
|
||||
replicate, "Please annotate test with `@ProfilerTree.test`"
|
||||
)
|
||||
|
||||
# The profiler should produce deterministic results and should return
|
||||
# to a clean state after each run. As a result, only the first
|
||||
@ -299,7 +309,7 @@ class TestProfilerTree(TestCase):
|
||||
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
|
||||
torch::autograd::AccumulateGrad
|
||||
aten::detach
|
||||
detach"""
|
||||
detach""",
|
||||
)
|
||||
|
||||
# TODO: Add logic for CUDA version of test
|
||||
@ -313,7 +323,9 @@ class TestProfilerTree(TestCase):
|
||||
|
||||
# Check that we correctly handle the case when a user
|
||||
# annotation does not call `__exit__`.
|
||||
_ = torch.autograd.profiler.record_function("Second Annotation").__enter__()
|
||||
_ = torch.autograd.profiler.record_function(
|
||||
"Second Annotation"
|
||||
).__enter__()
|
||||
|
||||
y = x + 1
|
||||
with torch.autograd.profiler.record_function("Third Annotation"):
|
||||
@ -347,7 +359,7 @@ class TestProfilerTree(TestCase):
|
||||
torch::autograd::AccumulateGrad
|
||||
aten::new_empty_strided
|
||||
aten::empty_strided
|
||||
aten::copy_"""
|
||||
aten::copy_""",
|
||||
)
|
||||
|
||||
# TODO: Add logic for CUDA version of test
|
||||
@ -421,10 +433,12 @@ class TestProfilerTree(TestCase):
|
||||
torch::autograd::AccumulateGrad
|
||||
aten::detach
|
||||
detach
|
||||
[memory]"""
|
||||
[memory]""",
|
||||
)
|
||||
|
||||
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
|
||||
@unittest.skipIf(
|
||||
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
|
||||
)
|
||||
@ProfilerTree.test
|
||||
def test_profiler_experimental_tree_with_memory_and_stack(self):
|
||||
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
|
||||
@ -519,11 +533,13 @@ class TestProfilerTree(TestCase):
|
||||
[memory]
|
||||
torch/profiler/profiler.py(...): __exit__
|
||||
torch/profiler/profiler.py(...): stop
|
||||
..."""
|
||||
...""",
|
||||
)
|
||||
|
||||
@skipIfTorchDynamo("too slow")
|
||||
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
|
||||
@unittest.skipIf(
|
||||
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
|
||||
)
|
||||
@ProfilerTree.test
|
||||
def test_profiler_experimental_tree_with_stack_and_modules(self):
|
||||
class MyModule(torch.nn.Module):
|
||||
@ -647,10 +663,12 @@ class TestProfilerTree(TestCase):
|
||||
aten::clamp_min
|
||||
torch/profiler/profiler.py(...): __exit__
|
||||
torch/profiler/profiler.py(...): stop
|
||||
..."""
|
||||
...""",
|
||||
)
|
||||
|
||||
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
|
||||
@unittest.skipIf(
|
||||
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
|
||||
)
|
||||
@ProfilerTree.test
|
||||
def test_profiler_experimental_tree_with_stack_and_torch_function(self):
|
||||
x = TorchFunctionTensor(torch.ones((1,)))
|
||||
@ -686,10 +704,12 @@ class TestProfilerTree(TestCase):
|
||||
<built-in function isinstance>
|
||||
torch/profiler/profiler.py(...): __exit__
|
||||
torch/profiler/profiler.py(...): stop
|
||||
..."""
|
||||
...""",
|
||||
)
|
||||
|
||||
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
|
||||
@unittest.skipIf(
|
||||
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
|
||||
)
|
||||
@ProfilerTree.test
|
||||
def test_profiler_experimental_tree_with_stack_and_torch_dispatch(self):
|
||||
x = TorchDispatchTensor(torch.ones((1,)))
|
||||
@ -717,7 +737,8 @@ class TestProfilerTree(TestCase):
|
||||
...
|
||||
torch/profiler/profiler.py(...): __exit__
|
||||
torch/profiler/profiler.py(...): stop
|
||||
...""")
|
||||
...""",
|
||||
)
|
||||
|
||||
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
|
||||
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
|
||||
@ -875,7 +896,9 @@ class TestProfilerTree(TestCase):
|
||||
)
|
||||
|
||||
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
|
||||
@unittest.skipIf(TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite.")
|
||||
@unittest.skipIf(
|
||||
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
|
||||
)
|
||||
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
|
||||
@ProfilerTree.test
|
||||
def test_profiler_experimental_tree_cuda_detailed(self):
|
||||
@ -1071,5 +1094,5 @@ class TestProfilerTree(TestCase):
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user