mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/61997 After profiling the model loading latency on AI Bench (Android Galaxy S8 US), it seems like a significant amount of time was spent reading data using FileAdapter, which internally calls IStreamAdapter. However, IStreamAdapter uses `std::istream` under the hood, which is not that efficient. This change reduces the model loading time from [~293ms](https://www.internalfb.com/intern/aibench/details/600870874797229) to [~254ms](https://www.internalfb.com/intern/aibench/details/163731416457694), which is a reduction of ~12%. ghstack-source-id: 134634610 Test Plan: See the AI Bench links above. Reviewed By: raziel Differential Revision: D29812191 fbshipit-source-id: 57810fdc1ac515305f5504f88ac5e9e4319e9d28
37 lines
866 B
C++
37 lines
866 B
C++
#pragma once
|
|
|
|
#include <fstream>
|
|
#include <memory>
|
|
#include <c10/macros/Macros.h>
|
|
|
|
#include "caffe2/serialize/istream_adapter.h"
|
|
#include "caffe2/serialize/read_adapter_interface.h"
|
|
|
|
namespace caffe2 {
|
|
namespace serialize {
|
|
|
|
class TORCH_API FileAdapter final : public ReadAdapterInterface {
|
|
public:
|
|
C10_DISABLE_COPY_AND_ASSIGN(FileAdapter);
|
|
explicit FileAdapter(const std::string& file_name);
|
|
size_t size() const override;
|
|
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
|
|
const override;
|
|
~FileAdapter() override;
|
|
|
|
private:
|
|
// An RAII Wrapper for a FILE pointer. Closes on destruction.
|
|
struct RAIIFile {
|
|
FILE* fp_;
|
|
explicit RAIIFile(const std::string& file_name);
|
|
~RAIIFile();
|
|
};
|
|
|
|
RAIIFile file_;
|
|
// The size of the opened file in bytes
|
|
uint64_t size_;
|
|
};
|
|
|
|
} // namespace serialize
|
|
} // namespace caffe2
|