From 498a7808ffb9921634c5e0d3eb6191b45ba1bbd1 Mon Sep 17 00:00:00 2001 From: Tom Ritchford Date: Wed, 11 Dec 2024 14:00:52 +0000 Subject: [PATCH] Fix unused Python variables outside torch/ and test/ (#136359) Pull Request resolved: https://github.com/pytorch/pytorch/pull/136359 Approved by: https://github.com/albanD --- .github/scripts/filter_test_configs.py | 2 +- .github/scripts/runner_determinator.py | 2 +- .github/scripts/test_trymerge.py | 2 +- .github/scripts/trymerge.py | 3 +-- .github/workflows/_runner-determinator.yml | 2 +- benchmarks/distributed/rpc/rl/coordinator.py | 1 - benchmarks/dynamo/check_accuracy.py | 2 +- benchmarks/dynamo/check_graph_breaks.py | 2 +- benchmarks/dynamo/common.py | 13 +++++-------- benchmarks/dynamo/dist_util.py | 2 +- benchmarks/dynamo/runner.py | 2 +- benchmarks/dynamo/torchbench.py | 1 - benchmarks/fastrnns/bench.py | 2 +- benchmarks/fastrnns/factory.py | 4 ++-- benchmarks/fastrnns/test_bench.py | 2 +- .../framework_overhead_benchmark.py | 2 +- .../functional_autograd_benchmark.py | 4 ++-- benchmarks/fuser/run_benchmarks.py | 1 - benchmarks/gpt_fast/mixtral_moe_quantize.py | 1 - benchmarks/gpt_fast/quantize.py | 1 - benchmarks/instruction_counts/core/utils.py | 2 +- benchmarks/nested/nested_bmm_bench.py | 4 ++-- benchmarks/operator_benchmark/benchmark_core.py | 3 +-- benchmarks/operator_benchmark/pt/qrnn_test.py | 2 -- benchmarks/profiler_benchmark/profiler_bench.py | 2 +- benchmarks/serialization/simple_measurement.py | 4 ++-- benchmarks/sparse/dlmc/utils.py | 4 ---- benchmarks/sparse/triton_ops.py | 2 +- benchmarks/tensorexpr/elementwise.py | 1 - .../better_transformer_vs_mha_functional.py | 2 +- benchmarks/transformer/score_mod.py | 2 -- benchmarks/transformer/sdp.py | 2 +- functorch/dim/delayed_mul_tensor.py | 1 - functorch/dim/reference.py | 2 -- functorch/examples/dp_cifar10/cifar10_transforms.py | 4 ---- .../maml_omniglot/maml-omniglot-transforms.py | 2 -- tools/autograd/gen_inplace_or_view_type.py | 1 - tools/autograd/gen_python_functions.py | 2 -- tools/autograd/gen_variable_type.py | 4 ---- tools/jit/gen_unboxing.py | 2 +- tools/setup_helpers/cmake.py | 2 +- tools/stats/monitor.py | 6 +++--- .../heuristics/mentioned_in_pr.py | 2 +- tools/testing/test_selections.py | 2 +- torchgen/_autoheuristic/train_decision.py | 4 ---- torchgen/_autoheuristic/train_regression.py | 1 - torchgen/gen_aoti_c_shim.py | 3 --- torchgen/gen_functionalization_type.py | 2 +- torchgen/operator_versions/gen_mobile_upgraders.py | 4 ---- 49 files changed, 39 insertions(+), 86 deletions(-) diff --git a/.github/scripts/filter_test_configs.py b/.github/scripts/filter_test_configs.py index 12c577c7732b..476eeb3699a8 100755 --- a/.github/scripts/filter_test_configs.py +++ b/.github/scripts/filter_test_configs.py @@ -332,7 +332,7 @@ def process_jobs( # The job name from github is in the PLATFORM / JOB (CONFIG) format, so breaking # it into its two components first current_platform, _ = (n.strip() for n in job_name.split(JOB_NAME_SEP, 1) if n) - except ValueError as error: + except ValueError: warnings.warn(f"Invalid job name {job_name}, returning") return test_matrix diff --git a/.github/scripts/runner_determinator.py b/.github/scripts/runner_determinator.py index 4e208114b642..96ea30fd1f24 100644 --- a/.github/scripts/runner_determinator.py +++ b/.github/scripts/runner_determinator.py @@ -258,7 +258,7 @@ def load_yaml(yaml_text: str) -> Any: try: data = yaml.safe_load(yaml_text) return data - except yaml.YAMLError as exc: + except yaml.YAMLError: log.exception("Error loading YAML") raise diff --git a/.github/scripts/test_trymerge.py b/.github/scripts/test_trymerge.py index a89c1778132b..3bbf701cb5f5 100755 --- a/.github/scripts/test_trymerge.py +++ b/.github/scripts/test_trymerge.py @@ -898,7 +898,7 @@ class TestBypassFailures(TestCase): repo = DummyGitRepo() # Check that failure is classified as flaky but still raises exception with warnings.catch_warnings(record=True) as w, self.assertRaises(RuntimeError): - rule = find_matching_merge_rule(pr, repo) + find_matching_merge_rule(pr, repo) self.assertEqual(len(w), 1) self.assertIn( "1 checks failed but were likely due flakiness or broken trunk", diff --git a/.github/scripts/trymerge.py b/.github/scripts/trymerge.py index b73a6d080592..ca18ddcf4712 100755 --- a/.github/scripts/trymerge.py +++ b/.github/scripts/trymerge.py @@ -1747,7 +1747,7 @@ def get_classifications( try: print(f"From Dr.CI checkrun summary: {drci_summary}") drci_classifications = json.loads(str(drci_summary)) - except json.JSONDecodeError as error: + except json.JSONDecodeError: warn("Invalid Dr.CI checkrun summary") drci_classifications = {} @@ -1918,7 +1918,6 @@ def do_revert_prs( dry_run: bool = False, ) -> None: # Prepare and push revert commits - commit_shas: List[str] = [] for commit_sha, pr in shas_and_prs: revert_msg = f"\nReverted {pr.get_pr_url()} on behalf of {prefix_with_github_url(author_login)}" revert_msg += extra_msg diff --git a/.github/workflows/_runner-determinator.yml b/.github/workflows/_runner-determinator.yml index ca24c7c4a7c2..36f5a06da5d6 100644 --- a/.github/workflows/_runner-determinator.yml +++ b/.github/workflows/_runner-determinator.yml @@ -326,7 +326,7 @@ jobs: try: data = yaml.safe_load(yaml_text) return data - except yaml.YAMLError as exc: + except yaml.YAMLError: log.exception("Error loading YAML") raise diff --git a/benchmarks/distributed/rpc/rl/coordinator.py b/benchmarks/distributed/rpc/rl/coordinator.py index 8dff633d2e18..18c3abb86359 100644 --- a/benchmarks/distributed/rpc/rl/coordinator.py +++ b/benchmarks/distributed/rpc/rl/coordinator.py @@ -72,7 +72,6 @@ class CoordinatorBase: print(f"Episode {ep} - ", end="") n_steps = episode_steps - agent_start_time = time.time() futs = [] for ob_rref in self.ob_rrefs: diff --git a/benchmarks/dynamo/check_accuracy.py b/benchmarks/dynamo/check_accuracy.py index 54279471295a..359289219dcb 100644 --- a/benchmarks/dynamo/check_accuracy.py +++ b/benchmarks/dynamo/check_accuracy.py @@ -19,7 +19,7 @@ flaky_models = { def get_field(csv, model_name: str, field: str): try: return csv.loc[csv["name"] == model_name][field].item() - except Exception as e: + except Exception: return None diff --git a/benchmarks/dynamo/check_graph_breaks.py b/benchmarks/dynamo/check_graph_breaks.py index 6f379c8ac935..c5279bb6bcf3 100644 --- a/benchmarks/dynamo/check_graph_breaks.py +++ b/benchmarks/dynamo/check_graph_breaks.py @@ -9,7 +9,7 @@ import pandas as pd def get_field(csv, model_name: str, field: str): try: return csv.loc[csv["name"] == model_name][field].item() - except Exception as e: + except Exception: return None diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 00e0351a2331..3388c364937c 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -671,7 +671,7 @@ def print_summary_table(data, print_dataframe=False): col.ljust(width), f"gmean={gmean(cdata):.2f}x mean={cdata.mean():.3f}x", ) - except Exception as e: + except Exception: pass @@ -3018,7 +3018,7 @@ class BenchmarkRunner: ) ): is_same = False - except Exception as e: + except Exception: # Sometimes torch.allclose may throw RuntimeError is_same = False @@ -3110,7 +3110,7 @@ class BenchmarkRunner: tol=tolerance, ): is_same = False - except Exception as e: + except Exception: # Sometimes torch.allclose may throw RuntimeError is_same = False @@ -3157,7 +3157,7 @@ class BenchmarkRunner: self.init_optimizer(name, current_device, model.parameters()) optimized_model_iter_fn = optimize_ctx(self.run_n_iterations) new_result = optimized_model_iter_fn(model, example_inputs) - except Exception as e: + except Exception: log.exception("") print( "TorchDynamo optimized model failed to run because of following error" @@ -3542,7 +3542,7 @@ class BenchmarkRunner: try: shutil.move("repro.py", f"{repro_dir}/{name}_repro.py") - except OSError as e: + except OSError: logging.error("Could not find repro script for model %s", name) else: logging.info( @@ -4369,9 +4369,6 @@ def run(runner, args, original_dir=None): # Set translation validation on by default on CI accuracy runs. torch.fx.experimental._config.translation_validation = True - ci = functools.partial( - CI, args.backend, training=args.training, dynamic=args.dynamic_shapes - ) if args.ddp: assert args.training, "DDP benchmark requires --training mode" torch._dynamo.config.optimize_ddp = args.optimize_ddp_mode diff --git a/benchmarks/dynamo/dist_util.py b/benchmarks/dynamo/dist_util.py index c1036bbb1e4e..2994c0681c77 100644 --- a/benchmarks/dynamo/dist_util.py +++ b/benchmarks/dynamo/dist_util.py @@ -90,7 +90,7 @@ def model_iter_fn(model, example_inputs, collect_outputs=False): def get_model(args): if args.torchbench_model: - old_cwd = setup_torchbench_cwd() + setup_torchbench_cwd() module = importlib.import_module( f"torchbenchmark.models.{args.torchbench_model}" ) diff --git a/benchmarks/dynamo/runner.py b/benchmarks/dynamo/runner.py index 2b13a4b5c530..3e1ab1a4b3b9 100755 --- a/benchmarks/dynamo/runner.py +++ b/benchmarks/dynamo/runner.py @@ -1451,7 +1451,7 @@ class DashboardUpdater: RegressionDetector(self.args).generate_comment() try: RegressionTracker(self.args).diff() - except Exception as e: + except Exception: logging.exception("") with open(f"{self.args.output_dir}/gh_regression.txt", "w") as gh_fh: gh_fh.write("") diff --git a/benchmarks/dynamo/torchbench.py b/benchmarks/dynamo/torchbench.py index 94b6ca9f57ad..c127b00bac73 100755 --- a/benchmarks/dynamo/torchbench.py +++ b/benchmarks/dynamo/torchbench.py @@ -236,7 +236,6 @@ class TorchBenchmarkRunner(BenchmarkRunner): ) is_training = self.args.training use_eval_mode = self.args.use_eval_mode - dynamic_shapes = self.args.dynamic_shapes candidates = [ f"torchbenchmark.models.{model_name}", f"torchbenchmark.canary_models.{model_name}", diff --git a/benchmarks/fastrnns/bench.py b/benchmarks/fastrnns/bench.py index fc18c89fa95d..23a3d81c909c 100644 --- a/benchmarks/fastrnns/bench.py +++ b/benchmarks/fastrnns/bench.py @@ -205,7 +205,7 @@ def bench(rnn_runners, group_name, print_json=False, sep=" ", **params): result_with_no_info = result._replace(info_fwd="None", info_bwd="None") print_stderr(pretty_print(result_with_no_info, sep=sep)) results[name] = result - except Exception as e: + except Exception: if not print_json: raise diff --git a/benchmarks/fastrnns/factory.py b/benchmarks/fastrnns/factory.py index dfc813e7c02c..32bb3eec504e 100644 --- a/benchmarks/fastrnns/factory.py +++ b/benchmarks/fastrnns/factory.py @@ -338,8 +338,8 @@ def layernorm_pytorch_lstm_creator(**kwargs): seq_len = len(input.unbind(0)) hy, cy = new_hidden for i in range(seq_len): - ln_i_output = ln_i(ln_input1) - ln_h_output = ln_h(ln_input1) + ln_i(ln_input1) + ln_h(ln_input1) cy = ln_c(cy) return out, (hy, cy) diff --git a/benchmarks/fastrnns/test_bench.py b/benchmarks/fastrnns/test_bench.py index fe5bd310d8cc..65074512cfb8 100644 --- a/benchmarks/fastrnns/test_bench.py +++ b/benchmarks/fastrnns/test_bench.py @@ -40,7 +40,7 @@ def cuda_sync(func, *args, **kwargs): class TestBenchNetwork: # See 'modeldef' fixture, which provides the things to benchmark def test_forward(self, modeldef, benchmark): - forward_output = benchmark(cuda_sync, modeldef.forward, *modeldef.inputs) + benchmark(cuda_sync, modeldef.forward, *modeldef.inputs) def test_backward(self, modeldef, benchmark): backward_input = modeldef.forward(*modeldef.inputs) diff --git a/benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py b/benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py index 4a3638e56288..7c82d7ab30f7 100644 --- a/benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py +++ b/benchmarks/framework_overhead_benchmark/framework_overhead_benchmark.py @@ -25,7 +25,7 @@ SUPPORTED_OPS = {"add_op"} def parse_op_args(op): - op_list = op.split(",") + op_list = op.split(",") # noqa: F841 def print_results(result): diff --git a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py b/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py index a78e4d6816e9..3eb2d1ff7b39 100644 --- a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py +++ b/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py @@ -190,9 +190,9 @@ def run_once(model: Callable, inp: InputsType, task: str, v: VType, **kwargs) -> func = get_task_func(task) if v is not None: - res = func(model, inp, v=v, strict=True) + func(model, inp, v=v, strict=True) else: - res = func(model, inp, strict=True) + func(model, inp, strict=True) def run_once_functorch( diff --git a/benchmarks/fuser/run_benchmarks.py b/benchmarks/fuser/run_benchmarks.py index 64b517e19611..481906bbc5f1 100644 --- a/benchmarks/fuser/run_benchmarks.py +++ b/benchmarks/fuser/run_benchmarks.py @@ -284,7 +284,6 @@ def run_benchmarks(operators, shapes): shapes = [globals()[k] for k in shapes.split(",")] print("fuser,device,operator,shape,time") - results = [] for shape, operator in itertools.product(shapes, operators): nargs = len(inspect.signature(operator).parameters) args = shape() diff --git a/benchmarks/gpt_fast/mixtral_moe_quantize.py b/benchmarks/gpt_fast/mixtral_moe_quantize.py index 232245156090..50ffd61bdb83 100644 --- a/benchmarks/gpt_fast/mixtral_moe_quantize.py +++ b/benchmarks/gpt_fast/mixtral_moe_quantize.py @@ -132,7 +132,6 @@ class WeightOnlyInt8Linear(torch.nn.Module): target_dtype=None, ) -> None: assert target_dtype is not None - factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in_features = in_features self.out_features = out_features diff --git a/benchmarks/gpt_fast/quantize.py b/benchmarks/gpt_fast/quantize.py index 89f16ad931d4..524c7072b2a4 100644 --- a/benchmarks/gpt_fast/quantize.py +++ b/benchmarks/gpt_fast/quantize.py @@ -93,7 +93,6 @@ class WeightOnlyInt8Linear(torch.nn.Module): device=None, dtype=None, ) -> None: - factory_kwargs = {"device": device, "dtype": dtype} super().__init__() self.in_features = in_features self.out_features = out_features diff --git a/benchmarks/instruction_counts/core/utils.py b/benchmarks/instruction_counts/core/utils.py index dbb1cd655af5..d654f4c8a82e 100644 --- a/benchmarks/instruction_counts/core/utils.py +++ b/benchmarks/instruction_counts/core/utils.py @@ -74,7 +74,7 @@ def parse_stmts(stmts: str) -> Tuple[str, str]: assert len(lines) >= 3, f"Invalid string:\n{stmts}" column_header_pattern = r"^Python\s{35}\| C\+\+(\s*)$" - signature_pattern = r"^: f\((.*)\)( -> (.+))?\s*$" + signature_pattern = r"^: f\((.*)\)( -> (.+))?\s*$" # noqa: F841 separation_pattern = r"^[-]{40} | [-]{40}$" code_pattern = r"^(.{40}) \|($| (.*)$)" diff --git a/benchmarks/nested/nested_bmm_bench.py b/benchmarks/nested/nested_bmm_bench.py index 27083354ee64..5bd09d0a4ea3 100644 --- a/benchmarks/nested/nested_bmm_bench.py +++ b/benchmarks/nested/nested_bmm_bench.py @@ -6,14 +6,14 @@ import torch def bench(nt_a, nt_b, niter): # Warmup - nt_c = nt_a.bmm(nt_b) + nt_a.bmm(nt_b) torch.cuda.synchronize() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) start_event.record() for iter in range(niter): - nt_c = nt_a.bmm(nt_b) + nt_a.bmm(nt_b) end_event.record() torch.cuda.synchronize() runtime = (start_event.elapsed_time(end_event)) / niter diff --git a/benchmarks/operator_benchmark/benchmark_core.py b/benchmarks/operator_benchmark/benchmark_core.py index bc340ae17f67..4c11c512051b 100644 --- a/benchmarks/operator_benchmark/benchmark_core.py +++ b/benchmarks/operator_benchmark/benchmark_core.py @@ -111,10 +111,9 @@ def _build_test( if tags is None: raise ValueError("Missing tags in configs") - input_config = str(test_attrs)[1:-1].replace("'", "") + op = bench_op() assert op is not None, "Can't create test" - tensor_error_info = None # op_name_function is a dictionary which has op_name and op_function. # an example of op_name_function is: # {'op_name' : 'abs', 'op_function' : torch.abs} diff --git a/benchmarks/operator_benchmark/pt/qrnn_test.py b/benchmarks/operator_benchmark/pt/qrnn_test.py index 5c0ef809acb7..2326e3dc3a8d 100644 --- a/benchmarks/operator_benchmark/pt/qrnn_test.py +++ b/benchmarks/operator_benchmark/pt/qrnn_test.py @@ -31,8 +31,6 @@ class LSTMBenchmark(op_bench.TorchBenchmarkBase): # The quantized.dynamic.LSTM has a bug. That's why we create a regular # LSTM, and quantize it later. See issue #31192. - scale = 1.0 / 256 - zero_point = 0 cell_nn = nn.LSTM( input_size=I, hidden_size=H, diff --git a/benchmarks/profiler_benchmark/profiler_bench.py b/benchmarks/profiler_benchmark/profiler_bench.py index ced82a501cba..ccb51033ec45 100644 --- a/benchmarks/profiler_benchmark/profiler_bench.py +++ b/benchmarks/profiler_benchmark/profiler_bench.py @@ -97,7 +97,7 @@ if __name__ == "__main__": with_stack=args.with_stack, use_kineto=args.use_kineto, use_cpu=not args.cuda_only, - ) as prof: + ): x = workload(input_x) return x diff --git a/benchmarks/serialization/simple_measurement.py b/benchmarks/serialization/simple_measurement.py index ee75acd5a857..53cd35cbf301 100644 --- a/benchmarks/serialization/simple_measurement.py +++ b/benchmarks/serialization/simple_measurement.py @@ -13,14 +13,14 @@ class Basic(Benchmark): torch.save(x, "big_tensor.zip", _use_new_zipfile_serialization=use_new) with Timer() as big2: - v = torch.load("big_tensor.zip") + torch.load("big_tensor.zip") x = [torch.ones(10, 10) for i in range(200)] with Timer() as small1: torch.save(x, "small_tensor.zip", _use_new_zipfile_serialization=use_new) with Timer() as small2: - v = torch.load("small_tensor.zip") + torch.load("small_tensor.zip") return { "Big Tensors Save": big1.ms_duration, diff --git a/benchmarks/sparse/dlmc/utils.py b/benchmarks/sparse/dlmc/utils.py index 196b74cd4c11..96380e7785ac 100644 --- a/benchmarks/sparse/dlmc/utils.py +++ b/benchmarks/sparse/dlmc/utils.py @@ -56,16 +56,12 @@ def load_sparse_matrix(path, device): def gen_vector(path, device): with open(path) as file: nrows, ncols, nnz = (int(el) for el in file.readline().split(", ")) - index_pointers = (int(el) for el in file.readline().split()) - indices = (int(el) for el in file.readline().split()) return torch.randn(nrows, dtype=torch.double, device=device) def gen_matrix(path, device): with open(path) as file: nrows, ncols, nnz = (int(el) for el in file.readline().split(", ")) - index_pointers = (int(el) for el in file.readline().split()) - indices = (int(el) for el in file.readline().split()) return torch.randn(nrows, ncols, dtype=torch.double, device=device) diff --git a/benchmarks/sparse/triton_ops.py b/benchmarks/sparse/triton_ops.py index 2493e1e0f740..6f5fc44e8ef4 100644 --- a/benchmarks/sparse/triton_ops.py +++ b/benchmarks/sparse/triton_ops.py @@ -374,7 +374,7 @@ if __name__ == "__main__": for r in range(args.repeat): try: time_ms, performance_tflops = test_func(x, y, **meta) - except triton.compiler.OutOfResources as msg: + except triton.compiler.OutOfResources: print( f"op={op}[{meta_str}]({bsr_size},{k}x{n}) dtype={args.dtype} {sparsity=}(nnz={x._nnz()})" f" blocksize={bm}x{bk} OutOfResources", diff --git a/benchmarks/tensorexpr/elementwise.py b/benchmarks/tensorexpr/elementwise.py index 5ea4d503e4fd..156becf135d7 100644 --- a/benchmarks/tensorexpr/elementwise.py +++ b/benchmarks/tensorexpr/elementwise.py @@ -208,7 +208,6 @@ class SimpleElementBench(benchmark.Benchmark): return "simple_element" def memory_workload(self): - input_count = len(self.inputs) if self.mode == "fwd": sol_count = 2 algorithmic_count = 2 diff --git a/benchmarks/transformer/better_transformer_vs_mha_functional.py b/benchmarks/transformer/better_transformer_vs_mha_functional.py index f7a80169521b..b5e0a6b75d31 100644 --- a/benchmarks/transformer/better_transformer_vs_mha_functional.py +++ b/benchmarks/transformer/better_transformer_vs_mha_functional.py @@ -136,7 +136,7 @@ def run( torch.testing.assert_close( y_native_mha_fast, y_native_mha_slow, atol=1e-3, rtol=1e-3 ) - except AssertionError as e: + except AssertionError: error_dict[entry_name] += 1 pprint(error_dict) diff --git a/benchmarks/transformer/score_mod.py b/benchmarks/transformer/score_mod.py index 7920a0e112fb..ed0bc13b842f 100644 --- a/benchmarks/transformer/score_mod.py +++ b/benchmarks/transformer/score_mod.py @@ -98,8 +98,6 @@ def generate_inputs( assert q_heads % kv_heads == 0 - num_h_groups = q_heads // kv_heads - make_q = partial( torch.rand, q_shape, device=device, dtype=dtype, requires_grad=requires_grad ) diff --git a/benchmarks/transformer/sdp.py b/benchmarks/transformer/sdp.py index ca15d1a95067..8b05c042ef8a 100644 --- a/benchmarks/transformer/sdp.py +++ b/benchmarks/transformer/sdp.py @@ -211,7 +211,7 @@ def run_single_experiment(config: ExperimentConfig) -> ExperimentResults: enable_flash=config.enable_flash, enable_mem_efficient=config.enable_mem_efficient, enable_cudnn=config.enable_cudnn, - ) as kernel_choice, torch.inference_mode() as inference_mode: + ): dropout_p = 0.0 mask = None diff --git a/functorch/dim/delayed_mul_tensor.py b/functorch/dim/delayed_mul_tensor.py index 3984a0638859..3c136cfe1247 100644 --- a/functorch/dim/delayed_mul_tensor.py +++ b/functorch/dim/delayed_mul_tensor.py @@ -62,7 +62,6 @@ class DelayedMulTensor(_Tensor): plhs, levelslhs = self._lhs._tensor, self._lhs._levels prhs, levelsrhs = self._rhs._tensor, self._rhs._levels - new_dims = tuple(d for d in self.dims if d not in dims) new_levels = [l for l in self._levels if l not in dims] fmt = "".join( [ diff --git a/functorch/dim/reference.py b/functorch/dim/reference.py index ed7275bf666a..6453a441b944 100644 --- a/functorch/dim/reference.py +++ b/functorch/dim/reference.py @@ -198,7 +198,6 @@ def __torch_function__(self, orig, cls, args, kwargs=empty_dict): if orig in pointwise: result_levels = llist() - arg_levels = llist() to_expand = [] for i, f in enumerate(flat_args): if isinstance(f, TensorLike): @@ -268,7 +267,6 @@ def positional(self, *dims): needs_view = True permute = list(range(len(levels))) - nflat = len(flat_dims) for i, d in enumerate(flat_dims): try: idx = levels.index(d) diff --git a/functorch/examples/dp_cifar10/cifar10_transforms.py b/functorch/examples/dp_cifar10/cifar10_transforms.py index ed1da15ee691..662472b55fcc 100644 --- a/functorch/examples/dp_cifar10/cifar10_transforms.py +++ b/functorch/examples/dp_cifar10/cifar10_transforms.py @@ -214,10 +214,6 @@ def main(): else: generator = None - augmentations = [ - transforms.RandomCrop(32, padding=4), - transforms.RandomHorizontalFlip(), - ] normalize = [ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), diff --git a/functorch/examples/maml_omniglot/maml-omniglot-transforms.py b/functorch/examples/maml_omniglot/maml-omniglot-transforms.py index be44863d36f4..971d940f4032 100755 --- a/functorch/examples/maml_omniglot/maml-omniglot-transforms.py +++ b/functorch/examples/maml_omniglot/maml-omniglot-transforms.py @@ -145,8 +145,6 @@ def loss_for_task(net, n_inner_iter, x_spt, y_spt, x_qry, y_qry): def train(db, net, device, meta_opt, epoch, log): - params = dict(net.named_parameters()) - buffers = dict(net.named_buffers()) n_train_iter = db.x_train.shape[0] // db.batchsz for batch_idx in range(n_train_iter): diff --git a/tools/autograd/gen_inplace_or_view_type.py b/tools/autograd/gen_inplace_or_view_type.py index afc932606a51..0e2927d30725 100644 --- a/tools/autograd/gen_inplace_or_view_type.py +++ b/tools/autograd/gen_inplace_or_view_type.py @@ -653,7 +653,6 @@ def gen_inplace_or_view_type( ) -> None: # NOTE: see Note [Sharded File] at the top of the VariableType.cpp # template regarding sharding of the generated files. - num_shards = 2 fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False) fm.write_sharded( diff --git a/tools/autograd/gen_python_functions.py b/tools/autograd/gen_python_functions.py index 0ff5e0259848..895a2e6c866c 100644 --- a/tools/autograd/gen_python_functions.py +++ b/tools/autograd/gen_python_functions.py @@ -1319,8 +1319,6 @@ def emit_single_dispatch( else: schema_comment = f"// aten::{f.func}" - deprecated = "[deprecated] " if ps.deprecated else "" - # dispatch lambda signature name = cpp.name(f.func) lambda_formals = ", ".join( diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index c456b127168b..3874c9295218 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -46,7 +46,6 @@ from torchgen.api.types import ( BaseCppType, BaseCType, Binding, - DispatcherSignature, intArrayRefT, iTensorListRefT, ListCType, @@ -1535,9 +1534,6 @@ def emit_body( f: NativeFunction, input_base: str, unpacked_args: Sequence[str] ) -> str: """Dispatch call via function in a namespace or method on Tensor.""" - dispatcher_sig = DispatcherSignature.from_schema(f.func) - dispatcher_exprs = dispatcher_sig.exprs() - # code-generated autograd kernels plumb and recompute dispatch keys directly through the kernel for performance. # Ops also always have a function variant of the redispatch API. # See Note [Plumbing Keys Through The Dispatcher] for details. diff --git a/tools/jit/gen_unboxing.py b/tools/jit/gen_unboxing.py index df889e379cba..7539ec2e4b96 100644 --- a/tools/jit/gen_unboxing.py +++ b/tools/jit/gen_unboxing.py @@ -269,7 +269,7 @@ def main(args: list[str]) -> None: native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml") tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml") parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path) - native_functions, backend_indices = ( + native_functions, _backend_indices = ( parsed_yaml.native_functions, parsed_yaml.backend_indices, ) diff --git a/tools/setup_helpers/cmake.py b/tools/setup_helpers/cmake.py index e417f6d56a0e..84e4dad32d31 100644 --- a/tools/setup_helpers/cmake.py +++ b/tools/setup_helpers/cmake.py @@ -95,7 +95,7 @@ class CMake: print(" ".join(command)) try: check_call(command, cwd=self.build_dir, env=env) - except (CalledProcessError, KeyboardInterrupt) as e: + except (CalledProcessError, KeyboardInterrupt): # This error indicates that there was a problem with cmake, the # Python backtrace adds no signal here so skip over it by catching # the error and exiting manually diff --git a/tools/stats/monitor.py b/tools/stats/monitor.py index b1dd2fe05ddc..7f434a5e67d3 100644 --- a/tools/stats/monitor.py +++ b/tools/stats/monitor.py @@ -235,12 +235,12 @@ class UsageLogger: if self._has_amdsmi: try: amdsmi.amdsmi_shut_down() - except amdsmi.AmdSmiException as e: + except amdsmi.AmdSmiException: pass if self._has_pynvml: try: pynvml.nvmlShutdown() - except pynvml.NVMLError as e: + except pynvml.NVMLError: pass def _get_per_process_gpu_info(self, handle: Any) -> list[dict[str, Any]]: @@ -306,7 +306,7 @@ class UsageLogger: if "pss" in memory_full_info: # only availiable in linux info["pss_memory"] = f"{memory_full_info.pss / (1024 * 1024):.2f}" - except psutil.AccessDenied as e: + except psutil.AccessDenied: # It's ok to skip this pass per_process_info.append(info) diff --git a/tools/testing/target_determination/heuristics/mentioned_in_pr.py b/tools/testing/target_determination/heuristics/mentioned_in_pr.py index 66da4e42d80f..38a2ae6d5474 100644 --- a/tools/testing/target_determination/heuristics/mentioned_in_pr.py +++ b/tools/testing/target_determination/heuristics/mentioned_in_pr.py @@ -50,7 +50,7 @@ class MentionedInPR(HeuristicInterface): ) + self._search_for_linked_issues(pr_body): try: linked_issue_bodies.append(get_issue_or_pr_body(int(issue))) - except Exception as e: + except Exception: pass mentioned = [] diff --git a/tools/testing/test_selections.py b/tools/testing/test_selections.py index d59b653655f5..162c26651072 100644 --- a/tools/testing/test_selections.py +++ b/tools/testing/test_selections.py @@ -42,7 +42,7 @@ if IS_ROCM and not IS_MEM_LEAK_CHECK: assert count > 0 # there must be at least 1 GPU # Limiting to 8 GPUs(PROCS) NUM_PROCS = min(count, 8) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: # The safe default for ROCm GHA runners is to run tests serially. NUM_PROCS = 1 diff --git a/torchgen/_autoheuristic/train_decision.py b/torchgen/_autoheuristic/train_decision.py index 784ffa4a3a50..9fd500474610 100644 --- a/torchgen/_autoheuristic/train_decision.py +++ b/torchgen/_autoheuristic/train_decision.py @@ -64,7 +64,6 @@ class AHTrainDecisionTree(AHTrain): Given a trained decision tree, and a dataframe containing the training data, returns a list of unsafe leaves. """ X = df[feature_columns] - y = df["winner"] leaf_ids = model.apply(X) unique_leaves = np.unique(leaf_ids) @@ -136,7 +135,6 @@ class AHTrainDecisionTree(AHTrain): best_model = None best_model_safe_proba = 0 best_model_num_correct = 0 - best_model_num_wrong = 0 best_model_unsafe_leaves = [] columns = ["set", "crit", "max_depth", "min_samples_leaf"] metrics_columns = [] @@ -223,7 +221,6 @@ class AHTrainDecisionTree(AHTrain): ) best_model = model best_model_num_correct = num_correct - best_model_num_wrong = num_wrong best_model_safe_proba = safe_proba best_model_unsafe_leaves = unsafe_leaves @@ -786,7 +783,6 @@ class DecisionEvaluator: def top_k_classes(self, model, probas, k, avail_choices): # Get classes and their corresponding probabilities classes = model.classes_ - class_proba_pairs = list(zip(classes, probas)) # Sort by probability (descending) and filter out zero probabilities sorted_classes = [ diff --git a/torchgen/_autoheuristic/train_regression.py b/torchgen/_autoheuristic/train_regression.py index 1fc487320425..2d5b012915e8 100644 --- a/torchgen/_autoheuristic/train_regression.py +++ b/torchgen/_autoheuristic/train_regression.py @@ -399,7 +399,6 @@ class AHTrainRegressionTree(AHTrain): def dt_to_python(node, depth): indent = " " * (depth + 1) - false_predicate = "" if tree_.feature[node] != -2: name = feature_name[node] threshold = tree_.threshold[node] diff --git a/torchgen/gen_aoti_c_shim.py b/torchgen/gen_aoti_c_shim.py index 24a3b0c91381..9846de77b430 100644 --- a/torchgen/gen_aoti_c_shim.py +++ b/torchgen/gen_aoti_c_shim.py @@ -488,9 +488,6 @@ extern "C" {{ """ else: - c_shim_include = ( - f"#include " - ) return f""" {warning} diff --git a/torchgen/gen_functionalization_type.py b/torchgen/gen_functionalization_type.py index afa4218002b5..4f9865d6d3eb 100644 --- a/torchgen/gen_functionalization_type.py +++ b/torchgen/gen_functionalization_type.py @@ -693,7 +693,7 @@ def emit_inplace_functionalization_body( ] ) else: - mutable_input_post_processing = "\n".join( + mutable_input_post_processing = "\n".join( # noqa: F841 [ f""" at::functionalization::impl::replace_({a.name}, tmp_output); diff --git a/torchgen/operator_versions/gen_mobile_upgraders.py b/torchgen/operator_versions/gen_mobile_upgraders.py index ea7a8f3effe2..845034cb7484 100644 --- a/torchgen/operator_versions/gen_mobile_upgraders.py +++ b/torchgen/operator_versions/gen_mobile_upgraders.py @@ -317,7 +317,6 @@ def get_upgrader_bytecode_function_to_index_map( def write_cpp(cpp_path: str, upgrader_dict: list[dict[str, Any]]) -> None: - body_parts = [] upgrader_bytecode_function_to_index_map = ( get_upgrader_bytecode_function_to_index_map(upgrader_dict) ) @@ -335,7 +334,6 @@ def write_cpp(cpp_path: str, upgrader_dict: list[dict[str, Any]]) -> None: operator_list_str = "" for table_name, contents in bytecode.items(): element = ByteCode[table_name] - body_string = "" if element is ByteCode.instructions: instruction_list_str = construct_instruction(contents) elif element is ByteCode.constants: @@ -364,10 +362,8 @@ def write_cpp(cpp_path: str, upgrader_dict: list[dict[str, Any]]) -> None: operator_version_map=version_map_src, upgrader_bytecode="".join(all_upgrader_src_string).lstrip("\n"), ) - body_parts.append(upgrader_file_content) print("writing file to : ", cpp_path + "/" + UPGRADER_MOBILE_FILE_NAME) with open(os.path.join(cpp_path, UPGRADER_MOBILE_FILE_NAME), "wb") as out_file: - final_output = "".join(body_parts) out_file.write(upgrader_file_content.encode("utf-8"))