mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[easy] Add option for profiling backend in inference benchmark (#116187)
Some misc fixes, also added option for experiment name to add to result table Pull Request resolved: https://github.com/pytorch/pytorch/pull/116187 Approved by: https://github.com/albanD ghstack dependencies: #115286
This commit is contained in:
committed by
PyTorch MergeBot
parent
31f21e033e
commit
75a4b10d56
@ -13,6 +13,7 @@ import pandas as pd
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Parse output files")
|
||||
parser.add_argument("--csv", type=str, help="Path to csv file")
|
||||
parser.add_argument("--name", type=str, help="Name of experiment")
|
||||
args = parser.parse_args()
|
||||
|
||||
input_csv = "./results/" + args.csv
|
||||
@ -37,13 +38,13 @@ if __name__ == "__main__":
|
||||
if write_header:
|
||||
f.write(f"## Batch Size {batch_size} Compile {compile}\n\n")
|
||||
f.write(
|
||||
"| Warmup_latency (s) | Average_latency (s) | Throughput (samples/sec) | GPU Utilization (%) |\n"
|
||||
"| Experiment | Warmup_latency (s) | Average_latency (s) | Throughput (samples/sec) | GPU Utilization (%) |\n"
|
||||
)
|
||||
f.write(
|
||||
"| ------------------ | ------------------- | ------------------------ | ------------------- |\n"
|
||||
"| ---------- | ------------------ | ------------------- | ------------------------ | ------------------- |\n"
|
||||
)
|
||||
|
||||
line = "|"
|
||||
line = f"| {args.name} |"
|
||||
for metric in metrics:
|
||||
line += f" {means[metric]:.3f} +/- {stds[metric]:.3f} |"
|
||||
f.write(line + "\n")
|
||||
|
@ -1,5 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "Usage: $0 {experiment_name}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
experiment_name="$1"
|
||||
benchmark_script="server.py"
|
||||
checkpoint_file="resnet18-f37072fd.pth"
|
||||
downloaded_checkpoint=false
|
||||
@ -30,7 +36,7 @@ for batch_size in "${batch_size_values[@]}"; do
|
||||
python -W ignore "$benchmark_script" --batch_size "$batch_size" --output_file "$output_file" --no-compile
|
||||
fi
|
||||
done
|
||||
python process_metrics.py --csv "$output_file"
|
||||
python process_metrics.py --csv "$output_file" --name "$experiment_name"
|
||||
rm "./results/$output_file"
|
||||
done
|
||||
done
|
||||
|
@ -210,6 +210,9 @@ if __name__ == "__main__":
|
||||
"--compile", default=True, action=argparse.BooleanOptionalAction
|
||||
)
|
||||
parser.add_argument("--output_file", type=str, default="output.csv")
|
||||
parser.add_argument(
|
||||
"--profile", default=False, action=argparse.BooleanOptionalAction
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
downloaded_checkpoint = False
|
||||
@ -247,7 +250,16 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
frontend.start()
|
||||
asyncio.run(backend.run())
|
||||
|
||||
if args.profile:
|
||||
|
||||
def trace_handler(prof):
|
||||
prof.export_chrome_trace("trace.json")
|
||||
|
||||
with torch.profiler.profile(on_trace_ready=trace_handler) as prof:
|
||||
asyncio.run(backend.run())
|
||||
else:
|
||||
asyncio.run(backend.run())
|
||||
|
||||
frontend.join()
|
||||
|
||||
|
Reference in New Issue
Block a user