diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 187c68f427b0..f5bb176d5075 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -1278,7 +1278,8 @@ class BenchmarkRunner: fp64_outputs = self.run_n_iterations(model_fp64, inputs_fp64) except Exception: log.warning( - f"fp64 golden ref were not generated for {name}. Setting accuracy check to cosine" + "fp64 golden ref were not generated for %s. Setting accuracy check to cosine", + name, ) self.args.cosine = True fp64_outputs = None diff --git a/tools/linter/adapters/update_s3.py b/tools/linter/adapters/update_s3.py index 866bf6e4918c..426b330fe2fb 100644 --- a/tools/linter/adapters/update_s3.py +++ b/tools/linter/adapters/update_s3.py @@ -71,7 +71,10 @@ def main() -> None: # Upload the file logging.info( - f"Uploading file {args.file} to s3 bucket: {bucket}, object name: {object_name}" + "Uploading file %s to s3 bucket: %s, object name: %s", + args.file, + bucket, + object_name, ) if not args.dry_run: s3_client = boto3.client("s3") diff --git a/torch/_dynamo/backends/distributed.py b/torch/_dynamo/backends/distributed.py index 2349dd5c919f..2b4708c87581 100644 --- a/torch/_dynamo/backends/distributed.py +++ b/torch/_dynamo/backends/distributed.py @@ -195,7 +195,8 @@ class DDPOptimizer: # stash buckets for testing/debugging purposes self.buckets = buckets log.info( - f"DDPOptimizer used bucket cap {self.bucket_bytes_cap} and produced the following buckets:" + "DDPOptimizer used bucket cap %s and produced the following buckets:", + self.bucket_bytes_cap, ) pretty_print_buckets(buckets) diff --git a/torch/_dynamo/backends/onnxrt.py b/torch/_dynamo/backends/onnxrt.py index df0a0ef114d5..50b2bf43c635 100644 --- a/torch/_dynamo/backends/onnxrt.py +++ b/torch/_dynamo/backends/onnxrt.py @@ -87,7 +87,7 @@ def onnxrt(gm, example_inputs, *, filename=None, provider=None): for name, value in zip(input_names, args): if name not in active_inputs: log.warning( - f"input {name} skipped as not found in onnx inference session" + "input %s skipped as not found in onnx inference session", name ) continue dev = value.device diff --git a/torch/_dynamo/backends/tvm.py b/torch/_dynamo/backends/tvm.py index 61aa8e65ef8c..082d65dc270f 100644 --- a/torch/_dynamo/backends/tvm.py +++ b/torch/_dynamo/backends/tvm.py @@ -133,7 +133,8 @@ def tvm(gm, example_inputs, *, scheduler=None, trials=20000): inp_name = f"inp_{idx}" if inp_name not in active_inputs: log.warning( - f"input {inp_name} skipped as not found in tvm's runtime library" + "input %s skipped as not found in tvm's runtime library", + inp_name, ) continue m.set_input( diff --git a/torch/_dynamo/convert_frame.py b/torch/_dynamo/convert_frame.py index 0272defc2adc..b73888bd0043 100644 --- a/torch/_dynamo/convert_frame.py +++ b/torch/_dynamo/convert_frame.py @@ -177,8 +177,11 @@ def has_tensor_in_frame(frame): return True log.debug( - f"skipping because no torch.* {frame.f_code.co_name} \ - {frame.f_code.co_filename} {frame.f_code.co_firstlineno}" + "skipping because no torch.* %s \ + %s %s", + frame.f_code.co_name, + frame.f_code.co_filename, + frame.f_code.co_firstlineno, ) return False @@ -364,8 +367,12 @@ def _compile( unimplemented("100+ RestartAnalysis() calls") except exc.SkipFrame as e: log.debug( - f"Skipping frame {e} {code.co_name} \ - {code.co_filename} {code.co_firstlineno}" + "Skipping frame %s %s \ + %s %s", + e, + code.co_name, + code.co_filename, + code.co_firstlineno, ) if one_graph: log.debug("No graph captured with one_graph=True") diff --git a/torch/_dynamo/debug_utils.py b/torch/_dynamo/debug_utils.py index d14b45c1a03a..32c6501cf635 100644 --- a/torch/_dynamo/debug_utils.py +++ b/torch/_dynamo/debug_utils.py @@ -92,7 +92,8 @@ python_binary( cmd_split = BUCK_CMD_PREFIX + [self.cmd_line_path] if print_msg: log.warning( - f"Found an example that reproduces the error. Run this cmd to repro - {' '.join(cmd_split)}" + "Found an example that reproduces the error. Run this cmd to repro - %s", + " ".join(cmd_split), ) return cmd_split diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py index 348fc7855461..a94439455999 100644 --- a/torch/_dynamo/symbolic_convert.py +++ b/torch/_dynamo/symbolic_convert.py @@ -371,7 +371,9 @@ def break_graph_if_unsupported(*, push): and graph_break_dup_warning_checker.add(frame_loc) ): log.warning( - f"Graph break: {excp} from user code at {user_stack_formatted}" + "Graph break: %s from user code at %s", + excp, + user_stack_formatted, ) excp.remove_from_stats() @@ -2027,7 +2029,12 @@ class InliningInstructionTranslator(InstructionTranslatorBase): sub_locals, closure_cells = func.bind_args(parent, args, kwargs) except TypeError as e: log.warning( - f"{func.get_filename()} {func.get_function()} {args} {kwargs} {e}" + "%s %s %s %s %s", + func.get_filename(), + func.get_function(), + args, + kwargs, + e, ) unimplemented("arg mismatch inlining") diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py index 90c322e7179f..ee7910b04cc1 100644 --- a/torch/_dynamo/utils.py +++ b/torch/_dynamo/utils.py @@ -314,7 +314,7 @@ def write_record_to_file(filename, exec_record): try: if os.path.exists(filename): log.warning( - f"Unable to write execution record {filename}; file already exists." + "Unable to write execution record %s; file already exists.", filename ) else: os.makedirs(os.path.dirname(filename), exist_ok=True) @@ -966,7 +966,10 @@ def same( passes_test = res_error <= (multiplier * ref_error + tol / 10.0) if not passes_test: log.error( - f"RMSE (res-fp64): {res_error:.5f}, (ref-fp64): {ref_error:.5f} and shape={res.size()}" + "RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s", + res_error, + ref_error, + res.size(), ) # import pdb; pdb.set_trace() return passes_test diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py index 5673bbc21c95..0d25086c299f 100644 --- a/torch/_dynamo/variables/builtin.py +++ b/torch/_dynamo/variables/builtin.py @@ -558,7 +558,9 @@ class BuiltinVariable(VariableTracker): except TypeError as exc: if not has_constant_handler: log.warning( - f"incorrect arg count {handler} {exc} and no constant handler" + "incorrect arg count %s %s and no constant handler", + handler, + exc, ) handler = None diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 2c5521cbc5c1..df16c5fcc4f0 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -2253,7 +2253,9 @@ class ComputedBuffer(Buffer): except Exception: if config.debug: log.warning( - f"Did not simplify complex index:\n{dict(zip(index_vars, sizes))}\n{memory_addrs}" + "Did not simplify complex index:\n%s\n%s", + dict(zip(index_vars, sizes)), + memory_addrs, ) order = list(range(len(sizes))) sizes = [sizes[i] for i in order] diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py index 27bbb51ff93e..e9282bc0b362 100644 --- a/torch/_subclasses/fake_tensor.py +++ b/torch/_subclasses/fake_tensor.py @@ -1088,7 +1088,7 @@ class FakeTensorMode(TorchDispatchMode): if log.getEffectiveLevel() <= logging.DEBUG: log.debug( - f"{' ' * RECURSION_COUNT}FakeTensorMode.__torch_dispatch__: {func}" + "%sFakeTensorMode.__torch_dispatch__: %s", " " * RECURSION_COUNT, func ) incr = IncrementRecursionCount() diff --git a/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py b/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py index 2db7d0668800..73874ab47cd5 100644 --- a/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py +++ b/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py @@ -35,5 +35,5 @@ def _log_sparsified_level(model, data_sparsifier) -> None: mask = data_sparsifier.get_mask(name=valid_name) sparsity_level = 1.0 - mask.float().mean() logger.info( - f"Sparsity in layer {name} = {sparsity_level: .2%}" + "Sparsity in layer %s = % .2%", name, sparsity_level ) diff --git a/torch/distributed/checkpoint/default_planner.py b/torch/distributed/checkpoint/default_planner.py index 3874bb0942f8..53becb0bbd7c 100644 --- a/torch/distributed/checkpoint/default_planner.py +++ b/torch/distributed/checkpoint/default_planner.py @@ -421,7 +421,7 @@ def _validate_global_plan( for chunk1 in value.chunks[chunk_idx + 1 :]: if _check_box_overlap(chunk0, chunk1): logger.warning( - f"key:{key} has overlapping chunks: {chunk0} {chunk1}" + "key:%s has overlapping chunks: %s %s", key, chunk0, chunk1 ) all_good = False diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 52728a224791..2080624cf4e8 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -510,7 +510,7 @@ def _store_based_barrier(rank, store, timeout): ) logger.info( - f"Rank {rank}: Completed store-based barrier for key:{store_key} with {world_size} nodes." + "Rank %s: Completed store-based barrier for key:%s with %s nodes.", rank, store_key, world_size ) diff --git a/torch/distributed/elastic/agent/server/api.py b/torch/distributed/elastic/agent/server/api.py index 9128ff6ec150..cf58ca9fd8b5 100644 --- a/torch/distributed/elastic/agent/server/api.py +++ b/torch/distributed/elastic/agent/server/api.py @@ -852,7 +852,7 @@ class SimpleElasticAgent(ElasticAgent): role = spec.role log.info( - f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}" + "[%s] starting workers for entrypoint: %s", role, spec.get_entrypoint_name() ) self._initialize_workers(self._worker_group) @@ -925,7 +925,7 @@ class SimpleElasticAgent(ElasticAgent): barrier_timeout=self._exit_barrier_timeout, ) log.info( - f"Done waiting for other agents. Elapsed: {time.time() - start} seconds" + "Done waiting for other agents. Elapsed: %s seconds", time.time() - start ) except SignalException as e: log.warning("Got termination signal: %s", e.sigval) diff --git a/torch/distributed/elastic/multiprocessing/api.py b/torch/distributed/elastic/multiprocessing/api.py index 233cb2296cac..60c7f70dc99d 100644 --- a/torch/distributed/elastic/multiprocessing/api.py +++ b/torch/distributed/elastic/multiprocessing/api.py @@ -536,7 +536,8 @@ class MultiprocessContext(PContext): for proc in self._pc.processes: if proc.is_alive(): log.warning( - f"Unable to shutdown process {proc.pid} via {death_sig}, forcefully exiting via {_get_kill_signal()}" + "Unable to shutdown process %s via %s, forcefully exiting via %s", + proc.pid, death_sig, _get_kill_signal() ) try: os.kill(proc.pid, _get_kill_signal()) @@ -696,7 +697,7 @@ class SubprocessContext(PContext): for handler in self.subprocess_handlers.values(): if handler.proc.poll() is None: log.warning( - f"Sending process {handler.proc.pid} closing signal {death_sig.name}" + "Sending process %s closing signal %s", handler.proc.pid, death_sig.name ) handler.close(death_sig=death_sig) end = time.monotonic() + timeout @@ -713,7 +714,8 @@ class SubprocessContext(PContext): for handler in self.subprocess_handlers.values(): if handler.proc.poll() is None: log.warning( - f"Unable to shutdown process {handler.proc.pid} via {death_sig}, forcefully exiting via {_get_kill_signal()}" + "Unable to shutdown process %s via %s, forcefully exiting via %s", + handler.proc.pid, death_sig, _get_kill_signal() ) handler.close(death_sig=_get_kill_signal()) handler.proc.wait() diff --git a/torch/distributed/elastic/multiprocessing/errors/__init__.py b/torch/distributed/elastic/multiprocessing/errors/__init__.py index 92d1368c9cd3..874b141a2e76 100644 --- a/torch/distributed/elastic/multiprocessing/errors/__init__.py +++ b/torch/distributed/elastic/multiprocessing/errors/__init__.py @@ -112,7 +112,7 @@ class ProcessFailure: with open(self.error_file, "r") as fp: self.error_file_data = json.load(fp) log.debug( - f"User process failed with error data: {json.dumps(self.error_file_data, indent=2)}" + "User process failed with error data: %s", json.dumps(self.error_file_data, indent=2) ) self.message, self.timestamp = self._get_error_data( self.error_file_data diff --git a/torch/distributed/elastic/multiprocessing/errors/error_handler.py b/torch/distributed/elastic/multiprocessing/errors/error_handler.py index 03b289a828f9..b950e8682ffd 100644 --- a/torch/distributed/elastic/multiprocessing/errors/error_handler.py +++ b/torch/distributed/elastic/multiprocessing/errors/error_handler.py @@ -135,7 +135,7 @@ class ErrorHandler: log.info("dumped error file to parent's %s", my_error_file) else: log.error( - f"no error file defined for parent, to copy child error file ({rootcause_error_file})" + "no error file defined for parent, to copy child error file (%s)", rootcause_error_file ) def _rm(self, my_error_file): diff --git a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py index 9e998f8ba148..9af7ffee3a2e 100644 --- a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py +++ b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py @@ -298,7 +298,7 @@ class EtcdRendezvous: except RendezvousClosedError: log.info( - f"Rendezvous for run_id={self._run_id} was observed to be closed" + "Rendezvous for run_id=%s was observed to be closed", self._run_id ) raise diff --git a/torch/distributed/elastic/rendezvous/etcd_server.py b/torch/distributed/elastic/rendezvous/etcd_server.py index f294def4f8ff..0ecc4a889c14 100644 --- a/torch/distributed/elastic/rendezvous/etcd_server.py +++ b/torch/distributed/elastic/rendezvous/etcd_server.py @@ -186,7 +186,7 @@ class EtcdServer: curr_retries += 1 stop_etcd(self._etcd_proc) log.warning( - f"Failed to start etcd server, got error: {str(e)}, retrying" + "Failed to start etcd server, got error: %s, retrying", str(e) ) if curr_retries >= num_retries: shutil.rmtree(self._base_data_dir, ignore_errors=True) diff --git a/torch/distributed/elastic/timer/api.py b/torch/distributed/elastic/timer/api.py index 42e6c4d65050..78e6dc7da514 100644 --- a/torch/distributed/elastic/timer/api.py +++ b/torch/distributed/elastic/timer/api.py @@ -200,7 +200,7 @@ class TimerServer(abc.ABC): reaped_worker_ids.add(worker_id) else: log.error( - f"Error reaping worker=[{worker_id}]. Will retry on next watchdog." + "Error reaping worker=[%s]. Will retry on next watchdog.", worker_id ) self.clear_timers(reaped_worker_ids) diff --git a/torch/distributed/elastic/utils/distributed.py b/torch/distributed/elastic/utils/distributed.py index 500c121cf411..a3170bc633d5 100644 --- a/torch/distributed/elastic/utils/distributed.py +++ b/torch/distributed/elastic/utils/distributed.py @@ -77,7 +77,7 @@ def create_c10d_store( if str(e) == _ADDRESS_IN_USE: # this will only happen on the server if attempt < retries: log.warning( - f"port: {port} already in use, attempt: [{attempt}/{retries}]" + "port: %s already in use, attempt: [%s/%s]", port, attempt, retries ) attempt += 1 else: diff --git a/torch/distributed/rpc/api.py b/torch/distributed/rpc/api.py index 616ec20b8f25..011b77351637 100644 --- a/torch/distributed/rpc/api.py +++ b/torch/distributed/rpc/api.py @@ -283,7 +283,7 @@ def _barrier(worker_names): _all_gather(None, set(worker_names)) except RuntimeError as ex: logger.error( - f"Failed to complete barrier, got error {ex}" + "Failed to complete barrier, got error %s", ex ) @@ -300,7 +300,7 @@ def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT): _all_gather(None, timeout=timeout) except RuntimeError as ex: logger.error( - f"Failed to respond to 'Shutdown Proceed' in time, got error {ex}" + "Failed to respond to 'Shutdown Proceed' in time, got error %s", ex ) raise ex diff --git a/torch/testing/_internal/common_distributed.py b/torch/testing/_internal/common_distributed.py index 61103a186ef9..9d0c9f337ee0 100644 --- a/torch/testing/_internal/common_distributed.py +++ b/torch/testing/_internal/common_distributed.py @@ -607,7 +607,7 @@ class MultiProcessTestCase(TestCase): if parent_pipe.closed: logger.info( - f"Pipe closed for process {rank}, stopping event listener thread" + "Pipe closed for process %s, stopping event listener thread", rank ) return @@ -657,7 +657,7 @@ class MultiProcessTestCase(TestCase): getattr(self, test_name)() except unittest.SkipTest as se: logger.info( - f"Process {self.rank} skipping test {test_name} for following reason: {str(se)}" + "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se) ) sys.exit(TEST_SKIPS["generic"].exit_code) except Exception as e: @@ -687,7 +687,7 @@ class MultiProcessTestCase(TestCase): pipes.append((i, pipe)) except ConnectionError as e: logger.error( - f"Encountered error while trying to get traceback for process {i}: {e}" + "Encountered error while trying to get traceback for process %s: %s", i, e ) # Wait for results. @@ -697,21 +697,21 @@ class MultiProcessTestCase(TestCase): if pipe.poll(5): if pipe.closed: logger.info( - f"Pipe closed for process {rank}, cannot retrieve traceback" + "Pipe closed for process %s, cannot retrieve traceback", rank ) continue traceback = pipe.recv() logger.error( - f"Process {rank} timed out with traceback: \n\n{traceback}" + "Process %s timed out with traceback: \n\n%s", rank, traceback ) else: logger.error( - f"Could not retrieve traceback for timed out process: {rank}" + "Could not retrieve traceback for timed out process: %s", rank ) except ConnectionError as e: logger.error( - f"Encountered error while trying to get traceback for process {rank}: {e}" + "Encountered error while trying to get traceback for process %s: %s", rank, e ) def _join_processes(self, fn) -> None: @@ -826,7 +826,7 @@ class MultiProcessTestCase(TestCase): # is some follow-up needed. Instead just "pass" the test # with an appropriate message. logger.info( - f"Skipping {self.id()} on sandcastle for the following reason: {skip.message}" + "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message ) return else: @@ -1086,7 +1086,7 @@ class MultiThreadedTestCase(TestCase): exc = exc_info[1] if isinstance(exc, unittest.SkipTest): logger.info( - f"Thread {rank} skipping test {fn} for following reason: {str(exc)}" + "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc) ) if skip_code < 0: skip_code = TEST_SKIPS["generic"].exit_code @@ -1099,7 +1099,7 @@ class MultiThreadedTestCase(TestCase): elif isinstance(exc, Exception): msg = "".join(traceback.format_exception(*exc_info)) logger.error( - f"Caught exception: \n{msg} exiting thread {rank}" + "Caught exception: \n%s exiting thread %s", msg, rank ) error_msg += ( "Thread {} exited with exception:\n{}\n".format(rank, msg) @@ -1118,7 +1118,7 @@ class MultiThreadedTestCase(TestCase): if IS_SANDCASTLE: # "pass" the test with an appropriate message. logger.info( - f"Skipping {fn} on sandcastle for the following reason: {skip.message}" + "Skipping %s on sandcastle for the following reason: %s", fn, skip.message ) return else: