mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Convert logging f-strings to use % format, part five (#98765)
This does some annoying but simple cases by hand. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/98765 Approved by: https://github.com/wanchaol
This commit is contained in:
committed by
PyTorch MergeBot
parent
5a7aad9681
commit
b8b840be3d
@ -1393,7 +1393,7 @@ class BenchmarkRunner:
|
||||
percentage = psutil.Process(os.getpid()).memory_percent()
|
||||
peak_mem = percentage * total / 10**9
|
||||
except Exception:
|
||||
log.exception(f"Backend {mode} failed in warmup()")
|
||||
log.exception("Backend %s failed in warmup()", mode)
|
||||
return sys.exit(-1)
|
||||
dynamo_stats = get_dynamo_stats()
|
||||
dynamo_stats.subtract(start_stats)
|
||||
|
@ -98,7 +98,6 @@ coverage_ignore_functions = [
|
||||
"cudart",
|
||||
"is_bf16_supported",
|
||||
# torch.cuda._sanitizer
|
||||
"format_log_message",
|
||||
"zip_arguments",
|
||||
"zip_by_key",
|
||||
# torch.distributed.autograd
|
||||
|
@ -61,7 +61,7 @@ def main():
|
||||
utilization, mm_conv_utilization = compute_utilization(filenames, total_length)
|
||||
print(f"{modelname}, {utilization}, {mm_conv_utilization}")
|
||||
except BaseException:
|
||||
logging.exception(f"{filename}, ERROR")
|
||||
logging.exception("%s, ERROR", filename)
|
||||
print(f"{filename}, ERROR")
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -7,7 +7,6 @@ import platform
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
@ -77,16 +76,17 @@ def check(binary_path: Path, reference_hash: str) -> bool:
|
||||
return True
|
||||
|
||||
logging.warning(
|
||||
textwrap.dedent(
|
||||
f"""\
|
||||
Found binary hash does not match reference!
|
||||
"""\
|
||||
Found binary hash does not match reference!
|
||||
|
||||
Found hash: {existing_binary_hash}
|
||||
Reference hash: {reference_hash}
|
||||
Found hash: %s
|
||||
Reference hash: %s
|
||||
|
||||
Deleting {binary_path} just to be safe.
|
||||
"""
|
||||
)
|
||||
Deleting %s just to be safe.
|
||||
""",
|
||||
existing_binary_hash,
|
||||
reference_hash,
|
||||
binary_path,
|
||||
)
|
||||
if DRY_RUN:
|
||||
logging.critical(
|
||||
|
@ -784,11 +784,11 @@ class InstructionTranslatorBase(Checkpointable[InstructionTranslatorGraphState])
|
||||
if package is not None:
|
||||
if spec is not None and package != spec.parent:
|
||||
log.warning(
|
||||
"__package__ != __spec__.parent "
|
||||
f"({package!r} != {spec.parent!r})",
|
||||
ImportWarning,
|
||||
"__package__ != __spec__.parent (%r != %r)",
|
||||
package,
|
||||
spec.parent,
|
||||
stacklevel=3,
|
||||
) # type: ignore[call-arg]
|
||||
)
|
||||
return package
|
||||
elif spec is not None:
|
||||
return spec.parent
|
||||
|
@ -321,7 +321,7 @@ def write_record_to_file(filename, exec_record):
|
||||
with open(filename, "wb") as f:
|
||||
exec_record.dump(f)
|
||||
except Exception:
|
||||
log.error(f"Unable to write execution record {filename}", exc_info=1)
|
||||
log.error("Unable to write execution record %s", filename, exc_info=1)
|
||||
|
||||
|
||||
def count_calls(g: fx.Graph):
|
||||
|
@ -491,7 +491,7 @@ class _NnapiSerializer:
|
||||
raise Exception("Flexible size is not supported for this operand.")
|
||||
if s < 0:
|
||||
# runtime flex
|
||||
LOG.warning(f"Operand {oper} has runtime flex shape")
|
||||
LOG.warning("Operand %s has runtime flex shape", oper)
|
||||
return op_id, oper
|
||||
|
||||
def get_tensor_operand_or_constant(self, jitval, dim_order=DimOrder.PRESUMED_CONTIGUOUS):
|
||||
|
@ -332,10 +332,10 @@ or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib6
|
||||
if find_je:
|
||||
logger.info("Use JeMalloc memory allocator")
|
||||
return
|
||||
logger.warning(f"""Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib
|
||||
logger.warning("""Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib
|
||||
or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or
|
||||
{expanduser("~")}/.local/lib/ so the LD_PRELOAD environment variable will not be set.
|
||||
This may drop the performance""")
|
||||
%s/.local/lib/ so the LD_PRELOAD environment variable will not be set.
|
||||
This may drop the performance""", expanduser("~"))
|
||||
|
||||
def log_env_var(self, env_var_name=""):
|
||||
if env_var_name in os.environ:
|
||||
|
@ -142,10 +142,6 @@ class CUDASanitizerErrors(Exception):
|
||||
return f"detected {len(self.errors)} errors"
|
||||
|
||||
|
||||
def format_log_message(message: str) -> str:
|
||||
return " ".join(line.strip() for line in message.strip().splitlines())
|
||||
|
||||
|
||||
@dataclass
|
||||
class TensorInfo:
|
||||
r"""Stores information about a single tensor and recent accesses to it.
|
||||
@ -169,27 +165,21 @@ class _TensorsAccessed:
|
||||
def ensure_tensor_exists(self, data_ptr: DataPtr) -> None:
|
||||
if data_ptr not in self.accesses:
|
||||
logger.info(
|
||||
format_log_message(
|
||||
f"""
|
||||
Found tensor with pointer: {data_ptr}, but no matching tensor
|
||||
allocation in the trace. Backfilling the trace now.
|
||||
Perhaps the sanitizer was enabled after some torch operations?
|
||||
"""
|
||||
)
|
||||
"Found tensor with pointer: %s, but no matching tensor "
|
||||
"allocation in the trace. Backfilling the trace now. "
|
||||
"Perhaps the sanitizer was enabled after some torch operations?",
|
||||
data_ptr
|
||||
)
|
||||
self.create_tensor(data_ptr, None)
|
||||
|
||||
def ensure_tensor_does_not_exist(self, data_ptr: DataPtr) -> None:
|
||||
if data_ptr in self.accesses:
|
||||
logger.info(
|
||||
format_log_message(
|
||||
f"""
|
||||
Found duplicate tensor allocation in the trace for tensor with
|
||||
pointer: {data_ptr}. Assuming the trace for tensor deallocation
|
||||
wasn't caught and backfilling it now.
|
||||
Perhaps the sanitizer was enabled after some torch operations?
|
||||
"""
|
||||
)
|
||||
"Found duplicate tensor allocation in the trace for tensor with "
|
||||
"pointer: %s. Assuming the trace for tensor deallocation "
|
||||
"wasn't caught and backfilling it now. "
|
||||
"Perhaps the sanitizer was enabled after some torch operations?",
|
||||
data_ptr
|
||||
)
|
||||
self.delete_tensor(data_ptr)
|
||||
|
||||
@ -233,53 +223,41 @@ class StreamSynchronizations:
|
||||
def _ensure_stream_exists(self, stream: StreamId) -> None:
|
||||
if stream not in self.current_sync_states:
|
||||
logger.info(
|
||||
format_log_message(
|
||||
f"""
|
||||
Found Stream with id: {stream}, but no matching stream
|
||||
creation in the trace. Backfilling the trace now.
|
||||
Perhaps the sanitizer was enabled after some torch operations?
|
||||
"""
|
||||
)
|
||||
"Found Stream with id: %s, but no matching stream "
|
||||
"creation in the trace. Backfilling the trace now. "
|
||||
"Perhaps the sanitizer was enabled after some torch operations?",
|
||||
stream
|
||||
)
|
||||
self.create_stream(stream)
|
||||
|
||||
def _ensure_event_exists(self, event: EventId) -> None:
|
||||
if event not in self.recorded_sync_states:
|
||||
logger.info(
|
||||
format_log_message(
|
||||
f"""
|
||||
Found Event with id: {event}, but no matching event
|
||||
creation in the trace. Backfilling the trace now.
|
||||
Perhaps the sanitizer was enabled after some torch operations?
|
||||
"""
|
||||
)
|
||||
"Found Event with id: %s, but no matching event "
|
||||
"creation in the trace. Backfilling the trace now. "
|
||||
"Perhaps the sanitizer was enabled after some torch operations?",
|
||||
event
|
||||
)
|
||||
self.create_event(event)
|
||||
|
||||
def _ensure_event_does_not_exist(self, event: EventId) -> None:
|
||||
if event in self.recorded_sync_states:
|
||||
logger.info(
|
||||
format_log_message(
|
||||
f"""
|
||||
Found duplicate event creation in the trace for event with
|
||||
id: {event}. Assuming the trace for event deletion wasn't caught
|
||||
and backfilling it now.
|
||||
Perhaps the sanitizer was enabled after some torch operations?
|
||||
"""
|
||||
)
|
||||
"Found duplicate event creation in the trace for event with "
|
||||
"id: %s. Assuming the trace for event deletion wasn't caught "
|
||||
"and backfilling it now. "
|
||||
"Perhaps the sanitizer was enabled after some torch operations?",
|
||||
event
|
||||
)
|
||||
self.delete_event(event)
|
||||
|
||||
def create_stream(self, stream: StreamId) -> None:
|
||||
if stream in self.current_sync_states:
|
||||
logger.info(
|
||||
format_log_message(
|
||||
f"""
|
||||
Found duplicate Stream creation in the trace for Stream with
|
||||
id: {stream}. PyTorch Streams are only created once, so this
|
||||
trace entry is ignored.
|
||||
"""
|
||||
)
|
||||
"Found duplicate Stream creation in the trace for Stream with "
|
||||
"id: %s. PyTorch Streams are only created once, so this "
|
||||
"trace entry is ignored.",
|
||||
stream
|
||||
)
|
||||
else:
|
||||
self.host_sync_state[stream] = 0
|
||||
|
@ -415,7 +415,7 @@ class ElasticAgent(abc.ABC):
|
||||
if group_result.is_failed():
|
||||
# workers failed
|
||||
failure = group_result.failures[0]
|
||||
log.exception(f"worker 0 failed with exit code : {failure.exit_code}")
|
||||
log.exception("worker 0 failed with exit code : %s", failure.exit_code)
|
||||
else:
|
||||
return group_result.return_values[0] # return rank 0's results
|
||||
|
||||
@ -949,5 +949,6 @@ class SimpleElasticAgent(ElasticAgent):
|
||||
raise
|
||||
except Exception:
|
||||
log.exception(
|
||||
f"Error waiting on exit barrier. Elapsed: {time.time() - start} seconds"
|
||||
"Error waiting on exit barrier. Elapsed: %s seconds",
|
||||
time.time() - start
|
||||
)
|
||||
|
@ -491,9 +491,12 @@ class MultiprocessContext(PContext):
|
||||
error_filepath = self.error_files[failed_local_rank]
|
||||
|
||||
log.error(
|
||||
f"failed (exitcode: {failed_proc.exitcode})"
|
||||
f" local_rank: {failed_local_rank} (pid: {e.pid})"
|
||||
f" of fn: {fn_name} (start_method: {self.start_method})",
|
||||
"failed (exitcode: %s)"
|
||||
" local_rank: %s (pid: %s)"
|
||||
" of fn: %s (start_method: %s)",
|
||||
failed_proc.exitcode,
|
||||
failed_local_rank, e.pid,
|
||||
fn_name, self.start_method,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
@ -118,7 +118,7 @@ class ProcessFailure:
|
||||
self.error_file_data
|
||||
)
|
||||
except Exception:
|
||||
log.exception(f"Failed to parse reply file: {self.error_file}")
|
||||
log.exception("Failed to parse reply file: %s", self.error_file)
|
||||
raise
|
||||
else:
|
||||
self._set_no_reply_file()
|
||||
@ -351,9 +351,10 @@ def record(
|
||||
else:
|
||||
log.info(
|
||||
(
|
||||
f"local_rank {rank} FAILED with no error file."
|
||||
f" Decorate your entrypoint fn with @record for traceback info."
|
||||
f" See: https://pytorch.org/docs/stable/elastic/errors.html"
|
||||
"local_rank %s FAILED with no error file."
|
||||
" Decorate your entrypoint fn with @record for traceback info."
|
||||
" See: https://pytorch.org/docs/stable/elastic/errors.html",
|
||||
rank
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
@ -132,8 +132,9 @@ class TailLog:
|
||||
f.result()
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"error in log tailor for {self._name}{local_rank}."
|
||||
f" {e.__class__.__qualname__}: {e}",
|
||||
"error in log tailor for %s%s. %s: %s",
|
||||
self._name, local_rank,
|
||||
e.__class__.__qualname__, e,
|
||||
)
|
||||
|
||||
if self._threadpool:
|
||||
|
@ -329,5 +329,5 @@ class FileTimerServer:
|
||||
log.info("Process with pid=%s does not exist. Skipping", worker_pid)
|
||||
return True
|
||||
except Exception as e:
|
||||
log.error(f"Error terminating pid={worker_pid}", exc_info=e)
|
||||
log.error("Error terminating pid=%s", worker_pid, exc_info=e)
|
||||
return False
|
||||
|
@ -121,5 +121,5 @@ class LocalTimerServer(TimerServer):
|
||||
log.info("Process with pid=%s does not exist. Skipping", worker_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
log.error(f"Error terminating pid={worker_id}", exc_info=e)
|
||||
log.error("Error terminating pid=%s", worker_id, exc_info=e)
|
||||
return False
|
||||
|
@ -148,10 +148,11 @@ if is_available():
|
||||
# Ignore type error because mypy doesn't handle dynamically generated type objects (#4865)
|
||||
if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined]
|
||||
logger.warning(
|
||||
f"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
|
||||
f"corresponding to {backend}, hence that backend will be used "
|
||||
f"instead of the default {BackendType.TENSORPIPE}. To silence this "
|
||||
f"warning pass `backend={backend}` explicitly."
|
||||
"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
|
||||
"corresponding to %(backend)s, hence that backend will be used "
|
||||
"instead of the default BackendType.TENSORPIPE. To silence this "
|
||||
"warning pass `backend=%(backend)s` explicitly.",
|
||||
{'backend': backend}
|
||||
)
|
||||
|
||||
if backend is None:
|
||||
|
@ -207,7 +207,7 @@ class CapabilityBasedPartitioner:
|
||||
|
||||
logger.debug("Partitions proposed:")
|
||||
for id, partition in partitions_by_id.items():
|
||||
logger.debug(f"partition #{id}", [node.name for node in partition.nodes])
|
||||
logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])
|
||||
|
||||
return list(partitions_by_id.values())
|
||||
|
||||
|
@ -280,7 +280,7 @@ class PassManager:
|
||||
modified = modified or res.modified
|
||||
|
||||
if isinstance(module, GraphModule):
|
||||
logger.debug(f"Graph after pass '{fn_name}':", module.graph)
|
||||
logger.debug("Graph after pass '%s': %s", fn_name, module.graph)
|
||||
module.recompile()
|
||||
|
||||
# Check graph invariants
|
||||
|
@ -450,7 +450,7 @@ class _MinimizerBase:
|
||||
report.append(f"Sequential traverse iteration {self.iteration}.")
|
||||
report.append(f"Visit node: {node.name}")
|
||||
|
||||
_LOGGER.info(f"Visit node: {node.name}")
|
||||
_LOGGER.info("Visit node: %s", node.name)
|
||||
cur_nodes: NodeSet = {node}
|
||||
|
||||
if node in self.fusions:
|
||||
|
@ -63,7 +63,7 @@ def _calc___package__(globals):
|
||||
if package is not None:
|
||||
if spec is not None and package != spec.parent:
|
||||
_warnings.warn( # noqa: G010
|
||||
"__package__ != __spec__.parent " f"({package!r} != {spec.parent!r})",
|
||||
f"__package__ != __spec__.parent ({package!r} != {spec.parent!r})", # noqa: G004
|
||||
ImportWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
@ -244,8 +244,9 @@ class Trainer:
|
||||
if trainer_has_less_inputs:
|
||||
input_batches = batches[: len(batches) // 2]
|
||||
gLogger.info(
|
||||
f"""Trainer reduced input patches from {len(batches)}
|
||||
to {len(input_batches)} to simulate uneven inputs."""
|
||||
"Trainer reduced input patches from %s "
|
||||
"to %s to simulate uneven inputs.",
|
||||
len(batches), len(input_batches)
|
||||
)
|
||||
else:
|
||||
input_batches = batches
|
||||
|
Reference in New Issue
Block a user