mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Reland "Make debug_pkl smaller by only emitting unique traces." (#73368)
Summary: ## Original commit message: Pull Request resolved: https://github.com/pytorch/pytorch/pull/73368 debug_pkl file inside of pytorch's .pt file consists of a list of SourceRanges. Each SourceRange points to a Source which is a stack track, filename, and start, end numbers. Those are emitted in debug_pkl file as strings. Since many SourceRange shares the same source, the string for trace can be deduped. The newer format saves a set of unique traces in a tuple, then each SourceRange will save the offset of it's trace w.r.t. position in that tuple. (i.e. manually applying dictionary compression). The above helps with smaller file size. On loading, if we copy each trace to Source as string the runtime memory would still blowup. To mitigate this, we use SourceView directly instead of source which will take the reference of string inside of Deserializer and make that into string_view. This is safe because Deserializer is hold by Unpickler by shared_ptr, and Unpickler is also hold by shared_ptr by another Source object. That Source object will be alive during the model construction. Test Plan: ## Original Test plan unit test Took original file (312271638_930.predictor.disagg.local); loaded with `torch.jit.load` save again with `torch.jit.save`. Unzip both, look at contents: ``` [qihan@devvm5585.vll0 ~]$ du archive -h 4.0K archive/xl_model_weights 3.7M archive/extra 8.0K archive/code/__torch__/caffe2/torch/fb/model_transform/splitting 8.0K archive/code/__torch__/caffe2/torch/fb/model_transform 8.0K archive/code/__torch__/caffe2/torch/fb 8.0K archive/code/__torch__/caffe2/torch 8.0K archive/code/__torch__/caffe2 20M archive/code/__torch__/torch/fx/graph_module 20M archive/code/__torch__/torch/fx 8.0K archive/code/__torch__/torch/classes 20M archive/code/__torch__/torch 20M archive/code/__torch__ 20M archive/code 2.7M archive/constants 35M archive [qihan@devvm5585.vll0 ~]$ du resaved -h 4.0K resaved/extra 8.0K resaved/code/__torch__/caffe2/torch/fb/model_transform/splitting 8.0K resaved/code/__torch__/caffe2/torch/fb/model_transform 8.0K resaved/code/__torch__/caffe2/torch/fb 8.0K resaved/code/__torch__/caffe2/torch 8.0K resaved/code/__torch__/caffe2 1.3M resaved/code/__torch__/torch/fx/graph_module 1.3M resaved/code/__torch__/torch/fx 8.0K resaved/code/__torch__/torch/classes 1.4M resaved/code/__torch__/torch 1.4M resaved/code/__torch__ 1.4M resaved/code 2.7M resaved/constants 13M resaved [qihan@devvm5585.vll0 ~]$ ``` ## Additional test: `buck test mode/dev-tsan //caffe2/benchmarks/static_runtime:static_runtime_cpptest -- --exact 'caffe2/benchmarks/static_runtime:static_runtime_cpptest - StaticRuntime.to'` passes test jest.fbios.startup_cold_start.local.simulator f333356873 - Differential Revision: D35196883 Pull Request resolved: https://github.com/pytorch/pytorch/pull/74869 Approved by: https://github.com/gmagogsfm
This commit is contained in:
@ -187,39 +187,39 @@ struct TORCH_API SharedParserData {
|
||||
#undef ADD_CASE
|
||||
}
|
||||
|
||||
// find the longest match of str.substring(pos) against a token, return true
|
||||
// if successful filling in kind, start,and len
|
||||
bool match(
|
||||
c10::string_view str,
|
||||
size_t pos,
|
||||
StringCordView::Iterator pos,
|
||||
bool continuation, // are we inside a scope where newlines don't count
|
||||
// (e.g. inside parens)
|
||||
bool whitespace_token, // should we treat whitespace as a token
|
||||
int* kind,
|
||||
size_t* start,
|
||||
size_t* len) {
|
||||
StringCordView::Iterator* start,
|
||||
StringCordView::Iterator* end) {
|
||||
*start = pos;
|
||||
// skip whitespace
|
||||
while (pos < str.size() && isblank(str[pos]))
|
||||
pos++;
|
||||
while (pos.has_next() && isblank(*pos)) {
|
||||
++pos;
|
||||
}
|
||||
|
||||
// special handling
|
||||
if (pos < str.size()) {
|
||||
if (str[pos] == '#' && !isTypeComment(str, pos)) {
|
||||
if (pos.has_next()) {
|
||||
if (*pos == '#' && !isTypeComment(pos)) {
|
||||
// skip comments
|
||||
while (pos < str.size() && str[pos] != '\n')
|
||||
pos++;
|
||||
while (pos.has_next() && *pos != '\n')
|
||||
++pos;
|
||||
// tail call, handle whitespace and more comments
|
||||
return match(
|
||||
str, pos, continuation, whitespace_token, kind, start, len);
|
||||
return match(pos, continuation, whitespace_token, kind, start, end);
|
||||
}
|
||||
if (str[pos] == '\\' && pos + 1 < str.size() && str[pos + 1] == '\n' &&
|
||||
!whitespace_token) {
|
||||
return match(str, pos + 2, continuation, false, kind, start, len);
|
||||
if (*pos == '\\') {
|
||||
auto newiter = pos;
|
||||
++newiter;
|
||||
if (newiter.has_next() && *newiter == '\n' && !whitespace_token) {
|
||||
++newiter;
|
||||
return match(newiter, continuation, false, kind, start, end);
|
||||
}
|
||||
}
|
||||
if (str[pos] == '\n') {
|
||||
return match(
|
||||
str, pos + 1, continuation, !continuation, kind, start, len);
|
||||
if (*pos == '\n') {
|
||||
return match(++pos, continuation, !continuation, kind, start, end);
|
||||
}
|
||||
}
|
||||
// we handle white space before EOF because in the case we have something
|
||||
@ -228,26 +228,31 @@ struct TORCH_API SharedParserData {
|
||||
// else:
|
||||
// pass
|
||||
if (whitespace_token) {
|
||||
*kind = pos == str.size() ? TK_WHITESPACE_EOF : TK_WHITESPACE;
|
||||
*len = pos - *start;
|
||||
*kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE;
|
||||
*end = pos;
|
||||
return true;
|
||||
}
|
||||
if (pos == str.size()) {
|
||||
if (!pos.has_next()) {
|
||||
*kind = TK_EOF;
|
||||
*start = pos;
|
||||
*len = 0;
|
||||
*end = *start;
|
||||
return true;
|
||||
}
|
||||
// invariant: the next token is not whitespace or newline
|
||||
*start = pos;
|
||||
// check for a valid number
|
||||
if (isNumber(str, pos, len)) {
|
||||
size_t len;
|
||||
if (isNumber(pos.rest_line(), 0, &len)) {
|
||||
*end = *start;
|
||||
*end += len;
|
||||
*kind = TK_NUMBER;
|
||||
return true;
|
||||
}
|
||||
// check for string
|
||||
if (isString(str, pos, len)) {
|
||||
if (isString(pos.rest_line(), 0, &len)) {
|
||||
*kind = TK_STRINGLITERAL;
|
||||
*end = *start;
|
||||
*end += len;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -257,11 +262,14 @@ struct TORCH_API SharedParserData {
|
||||
bool matched = false;
|
||||
bool ident = true;
|
||||
TokenTrie* cur = head.get();
|
||||
for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr); i++) {
|
||||
ident = ident && validIdent(i, str[pos + i]);
|
||||
// for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr);
|
||||
// i++)
|
||||
for (size_t i = 0; pos.has_next() && (ident || cur != nullptr);
|
||||
++pos, ++i) {
|
||||
ident = ident && validIdent(i, *pos);
|
||||
if (ident) {
|
||||
matched = true;
|
||||
*len = i + 1;
|
||||
*end = pos.next_iter();
|
||||
*kind = TK_IDENT;
|
||||
}
|
||||
// check for token second, so that e.g. 'max' matches the token TK_MAX
|
||||
@ -270,14 +278,14 @@ struct TORCH_API SharedParserData {
|
||||
if (cur) {
|
||||
const auto begin_it = cur->child_chars.begin();
|
||||
const auto end_it = cur->child_chars.end();
|
||||
const auto ch_it = std::find(begin_it, end_it, str[pos + i]);
|
||||
const auto ch_it = std::find(begin_it, end_it, *pos);
|
||||
|
||||
cur = (ch_it == end_it) ? nullptr
|
||||
: cur->child_tries[ch_it - begin_it].get();
|
||||
|
||||
if (cur && cur->kind != 0) {
|
||||
matched = true;
|
||||
*len = i + 1;
|
||||
*end = pos.next_iter();
|
||||
*kind = cur->kind;
|
||||
}
|
||||
}
|
||||
@ -368,8 +376,19 @@ struct TORCH_API SharedParserData {
|
||||
bool isblank(int n) {
|
||||
return isspace(n) && n != '\n';
|
||||
}
|
||||
|
||||
bool isTypeComment(StringCordView::Iterator str_iter) {
|
||||
c10::string_view rest_line = str_iter.rest_line();
|
||||
const std::string type_string = "# type:";
|
||||
if (rest_line.size() < type_string.length()) {
|
||||
return false;
|
||||
}
|
||||
auto match_string = rest_line.substr(0, type_string.size());
|
||||
return match_string == type_string;
|
||||
}
|
||||
|
||||
// Make an exception ignoring comments for type annotation comments
|
||||
bool isTypeComment(c10::string_view str, size_t pos) {
|
||||
bool isTypeComment(StringCordView str, size_t pos) {
|
||||
const std::string type_string = "# type:";
|
||||
if (str.size() < pos + type_string.length()) {
|
||||
return false;
|
||||
@ -388,7 +407,7 @@ struct Token {
|
||||
SourceRange range;
|
||||
Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {}
|
||||
std::string text() {
|
||||
return range.text();
|
||||
return std::string(range.token_text());
|
||||
}
|
||||
std::string kindString() const {
|
||||
return kindToString(kind);
|
||||
@ -396,7 +415,7 @@ struct Token {
|
||||
};
|
||||
|
||||
struct Lexer {
|
||||
explicit Lexer(std::shared_ptr<SourceView> source)
|
||||
explicit Lexer(std::shared_ptr<Source> source)
|
||||
: source(std::move(source)),
|
||||
pos(0),
|
||||
nesting(0),
|
||||
@ -514,30 +533,37 @@ struct Lexer {
|
||||
Token lexRaw(bool whitespace_token = false) {
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
int kind;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
size_t start;
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
||||
size_t length;
|
||||
AT_ASSERT(source);
|
||||
if (current == nullptr) {
|
||||
AT_ASSERT(pos == 0);
|
||||
current = std::make_unique<StringCordView::Iterator>(
|
||||
source->text_str().begin());
|
||||
}
|
||||
|
||||
StringCordView::Iterator start_iter = *current;
|
||||
StringCordView::Iterator end_iter = *current;
|
||||
if (!shared.match(
|
||||
source->text(),
|
||||
pos,
|
||||
*current,
|
||||
nesting > 0,
|
||||
whitespace_token,
|
||||
&kind,
|
||||
&start,
|
||||
&length)) {
|
||||
&start_iter,
|
||||
&end_iter)) {
|
||||
expected(
|
||||
"a valid token",
|
||||
Token(
|
||||
(source->text())[start], SourceRange(source, start, start + 1)));
|
||||
**current,
|
||||
SourceRange(source, start_iter, start_iter.pos() + 1)));
|
||||
}
|
||||
auto t = Token(kind, SourceRange(source, start, start + length));
|
||||
pos = start + length;
|
||||
|
||||
auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos()));
|
||||
pos = end_iter.pos();
|
||||
*current = end_iter;
|
||||
return t;
|
||||
}
|
||||
|
||||
std::shared_ptr<SourceView> source;
|
||||
std::shared_ptr<Source> source;
|
||||
std::unique_ptr<StringCordView::Iterator> current;
|
||||
size_t pos;
|
||||
size_t nesting; // depth of ( [ { nesting...
|
||||
std::vector<int> indent_stack; // stack of indentation level of blocks
|
||||
|
Reference in New Issue
Block a user