[BE]: Ruff - TRY401 - Avoid verbose exception logging (#125126)

Don't bother logging exception obj explicitly with logger, it's captured anyway and would generate verbose outputs.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/125126
Approved by: https://github.com/ezyang
This commit is contained in:
Aaron Gokaslan
2024-04-28 21:44:30 +00:00
committed by PyTorch MergeBot
parent 3e1fb96964
commit e3b9b71684
6 changed files with 11 additions and 10 deletions

View File

@ -2478,7 +2478,7 @@ class BenchmarkRunner:
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_1st_run_fail"
)
log.exception(e)
log.exception("")
return record_status(accuracy_status, dynamo_start_stats=start_stats)
finally:
del model_copy
@ -2499,7 +2499,7 @@ class BenchmarkRunner:
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_2nd_run_fail"
)
log.exception(e)
log.exception("")
return record_status(accuracy_status, dynamo_start_stats=start_stats)
finally:
del model_copy
@ -2551,7 +2551,7 @@ class BenchmarkRunner:
with maybe_enable_compiled_autograd(self.args.compiled_autograd):
new_result = optimized_model_iter_fn(model_copy, example_inputs)
except Exception as e:
log.exception(e)
log.exception("")
print(
"TorchDynamo optimized model failed to run because of following error"
)
@ -2653,7 +2653,7 @@ class BenchmarkRunner:
optimized_model_iter_fn = optimize_ctx(self.run_n_iterations)
new_result = optimized_model_iter_fn(model, example_inputs)
except Exception as e:
log.exception(e)
log.exception("")
print(
"TorchDynamo optimized model failed to run because of following error"
)

View File

@ -1452,7 +1452,7 @@ class DashboardUpdater:
try:
RegressionTracker(self.args).diff()
except Exception as e:
logging.exception(e)
logging.exception("")
with open(f"{self.args.output_dir}/gh_regression.txt", "w") as gh_fh:
gh_fh.write("")

View File

@ -135,6 +135,7 @@ select = [
"TRY002", # ban vanilla raise (todo fix NOQAs)
"TRY200", # TODO: migrate from deprecated alias
"TRY302",
"TRY401", # verbose-log-message
"UP",
]

View File

@ -282,7 +282,7 @@ def helper_for_dump_minify(contents):
fd.write(contents)
except OSError as e:
log.exception(e)
log.exception("")
raise NotImplementedError("Could not write to {minified_repro_path}") from e

View File

@ -102,7 +102,7 @@ class TuningProcess:
try:
TuningProcess.workloop(request_queue, response_queue)
except Exception as ex:
log.exception("Exception in TuningProcess: %s", ex)
log.exception("Exception in TuningProcess")
@staticmethod
def workloop(request_queue: Queue[Any], response_queue: Queue[Any]) -> None:

View File

@ -149,8 +149,8 @@ def run_model(
_ = pred_control[0].sum().backward(retain_graph=True)
res = compare_gradients(model_base, model_control, precision)
logger.info("compare param grad. Numerical result : %s", res)
except Exception as e:
logger.exception("Exception %s when compare gradients", e)
except Exception:
logger.exception("Exception when comparing gradients")
traceback.print_exc()
if config.fx_passes_numeric_check["requires_optimizer"]:
@ -172,7 +172,7 @@ def run_model(
)
except Exception as e:
logger.exception(
"Exception %s when optimizer is added to check parameter names", e
"Exception when optimizer is added to check parameter names"
)
traceback.print_exc()
else: