mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/74700 Test Plan: Imported from OSS Some results running this benchmark for a quantized CPU xirp14b model on a Pixel 5: ``` PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "46749"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19261"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19235"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19396"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19486"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19562"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19566"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19559"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19632"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19938"} ``` Some results running this benchmark for the Vulkan xirp20a model on Pixel 5, after pre-loading the Context: ``` PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "38664"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19921"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20316"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20255"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20219"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20329"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20463"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "21072"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20668"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20889"} ``` Without pre-loading Context: ``` PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "70850"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "19867"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20211"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20039"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20082"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20268"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20363"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "21103"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20511"} PyTorchObserver {"type": "NET", "unit": "us", "metric": "latency", "value": "20528"} ``` Reviewed By: mrshenli Differential Revision: D35124881 Pulled By: SS-JIA fbshipit-source-id: 0f093e4aa45d69c538a4fe2003e0d5617d72b97a (cherry picked from commit 96f991420ad720300aea51cc0a1a6c0f79d2820b)
94 lines
2.7 KiB
C++
94 lines
2.7 KiB
C++
/**
|
|
* Copyright (c) 2016-present, Facebook, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include <ATen/ATen.h>
|
|
#include "caffe2/core/timer.h"
|
|
#include "caffe2/utils/string_utils.h"
|
|
#include <torch/csrc/autograd/grad_mode.h>
|
|
#include <torch/csrc/jit/mobile/module.h>
|
|
#include <torch/csrc/jit/mobile/import.h>
|
|
#include <torch/csrc/jit/serialization/import.h>
|
|
#include <torch/script.h>
|
|
|
|
#include <c10/mobile/CPUCachingAllocator.h>
|
|
|
|
#include <chrono>
|
|
using namespace std::chrono;
|
|
|
|
C10_DEFINE_string(model, "", "The given torch script model to benchmark.");
|
|
C10_DEFINE_int(iter, 10, "The number of iterations to run.");
|
|
C10_DEFINE_bool(
|
|
report_pep,
|
|
true,
|
|
"Whether to print performance stats for AI-PEP.");
|
|
|
|
int main(int argc, char** argv) {
|
|
c10::SetUsageMessage(
|
|
"Run model load time benchmark for pytorch model.\n"
|
|
"Example usage:\n"
|
|
"./load_benchmark_torch"
|
|
" --model=<model_file>"
|
|
" --iter=20");
|
|
if (!c10::ParseCommandLineFlags(&argc, &argv)) {
|
|
std::cerr << "Failed to parse command line flags!" << std::endl;
|
|
return 1;
|
|
}
|
|
|
|
std::cout << "Starting benchmark." << std::endl;
|
|
CAFFE_ENFORCE(
|
|
FLAGS_iter >= 0,
|
|
"Number of main runs should be non negative, provided ",
|
|
FLAGS_iter,
|
|
".");
|
|
|
|
caffe2::Timer timer;
|
|
std::vector<long> times;
|
|
|
|
for (int i = 0; i < FLAGS_iter; ++i) {
|
|
auto start = high_resolution_clock::now();
|
|
|
|
#if BUILD_LITE_INTERPRETER
|
|
auto module = torch::jit::_load_for_mobile(FLAGS_model);
|
|
#else
|
|
auto module = torch::jit::load(FLAGS_model);
|
|
#endif
|
|
|
|
auto stop = high_resolution_clock::now();
|
|
auto duration = duration_cast<microseconds>(stop - start);
|
|
times.push_back(duration.count());
|
|
}
|
|
|
|
const double micros = static_cast<double>(timer.MicroSeconds());
|
|
if (FLAGS_report_pep) {
|
|
for (auto t : times) {
|
|
std::cout << R"(PyTorchObserver {"type": "NET", "unit": "us", )"
|
|
<< R"("metric": "latency", "value": ")"
|
|
<< t << R"("})" << std::endl;
|
|
}
|
|
}
|
|
|
|
const double iters = static_cast<double>(FLAGS_iter);
|
|
std::cout << "Main run finished. Microseconds per iter: "
|
|
<< micros / iters
|
|
<< ". Iters per second: " << 1000.0 * 1000 * iters / micros
|
|
<< std::endl;
|
|
|
|
return 0;
|
|
}
|