Add binary to benchmark model load speed (#74700)

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/74700

Test Plan:
Imported from OSS

Some results running this benchmark for a quantized CPU xirp14b model on a Pixel 5:

```
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "46749"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19261"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19235"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19396"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19486"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19562"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19566"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19559"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19632"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19938"}
```

Some results running this benchmark for the Vulkan xirp20a model on Pixel 5, after pre-loading the Context:

```
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "38664"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19921"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20316"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20255"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20219"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20329"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20463"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "21072"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20668"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20889"}
```

Without pre-loading Context:

```
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "70850"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19867"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20211"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20039"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20082"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20268"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20363"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "21103"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20511"}
PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20528"}

```

Reviewed By: mrshenli

Differential Revision: D35124881

Pulled By: SS-JIA

fbshipit-source-id: 0f093e4aa45d69c538a4fe2003e0d5617d72b97a
(cherry picked from commit 96f991420ad720300aea51cc0a1a6c0f79d2820b)
This commit is contained in:
Sicheng Stephen Jia
2022-03-30 13:15:59 -07:00
committed by PyTorch MergeBot
parent cc23725e89
commit be7177751e
2 changed files with 94 additions and 0 deletions

View File

@ -4,6 +4,7 @@ if(INTERN_BUILD_MOBILE)
caffe2_binary_target("speed_benchmark.cc")
else()
caffe2_binary_target("speed_benchmark_torch.cc")
caffe2_binary_target("load_benchmark_torch.cc")
if(NOT BUILD_LITE_INTERPRETER)
caffe2_binary_target("compare_models_torch.cc")
endif()

View File

@ -0,0 +1,93 @@
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <vector>
#include <ATen/ATen.h>
#include "caffe2/core/timer.h"
#include "caffe2/utils/string_utils.h"
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/jit/mobile/module.h>
#include <torch/csrc/jit/mobile/import.h>
#include <torch/csrc/jit/serialization/import.h>
#include <torch/script.h>
#include <c10/mobile/CPUCachingAllocator.h>
#include <chrono>
using namespace std::chrono;
C10_DEFINE_string(model, "", "The given torch script model to benchmark.");
C10_DEFINE_int(iter, 10, "The number of iterations to run.");
C10_DEFINE_bool(
report_pep,
true,
"Whether to print performance stats for AI-PEP.");
int main(int argc, char** argv) {
c10::SetUsageMessage(
"Run model load time benchmark for pytorch model.\n"
"Example usage:\n"
"./load_benchmark_torch"
" --model=<model_file>"
" --iter=20");
if (!c10::ParseCommandLineFlags(&argc, &argv)) {
std::cerr << "Failed to parse command line flags!" << std::endl;
return 1;
}
std::cout << "Starting benchmark." << std::endl;
CAFFE_ENFORCE(
FLAGS_iter >= 0,
"Number of main runs should be non negative, provided ",
FLAGS_iter,
".");
caffe2::Timer timer;
std::vector<long> times;
for (int i = 0; i < FLAGS_iter; ++i) {
auto start = high_resolution_clock::now();
#if BUILD_LITE_INTERPRETER
auto module = torch::jit::_load_for_mobile(FLAGS_model);
#else
auto module = torch::jit::load(FLAGS_model);
#endif
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
times.push_back(duration.count());
}
const double micros = static_cast<double>(timer.MicroSeconds());
if (FLAGS_report_pep) {
for (auto t : times) {
std::cout << R"(PyTorchObserver {"type": "NET", "unit": "us", )"
<< R"("metric": "latency", "value": ")"
<< t << R"("})" << std::endl;
}
}
const double iters = static_cast<double>(FLAGS_iter);
std::cout << "Main run finished. Microseconds per iter: "
<< micros / iters
<< ". Iters per second: " << 1000.0 * 1000 * iters / micros
<< std::endl;
return 0;
}