[After fix] Reuse constant and bump bytecode to v5 (#59722)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/59722

Reintroduce sharing constant between bytecode and torchscript (same as #58629) after the fix #59642

Test Plan: Imported from OSS

Reviewed By: iseeyuan

Differential Revision: D29002345

Pulled By: cccclai

fbshipit-source-id: d9c8e474ff57d0509580183206df038a24ad27e3
This commit is contained in:
Chen Lai
2021-06-10 15:01:56 -07:00
committed by Facebook GitHub Bot
parent ac6b5beade
commit e9e9291dc1
3 changed files with 48 additions and 43 deletions

View File

@ -65,13 +65,16 @@ constexpr uint64_t kProducedFileFormatVersion = 0x3L;
// 0x1L: Initial version
// 0x2L: (Comment missing)
// 0x3L: (Comment missing)
// 0x4L: (Comment missing)
// 0x4L: (update) Added schema to function tuple. Forward-compatible change.
// 0x5L: (update) Update bytecode is sharing constant tensor files from torchscript, and only serialize
// extra tensors that are not in the torchscript constant table. Also update tensor storage schema adapting
// to the unify format, the root key of tensor storage is updated from {index} to
// {the_pointer_value_the_tensor.storage}, for example: `140245072983168.storage`
// Forward-compatibility change.
// 0x6L: Implicit opereator versioning using number of specified argument.
// Refer to the summary of https://github.com/pytorch/pytorch/pull/56845
// for details.
constexpr uint64_t kProducedBytecodeVersion = 0x4L;
constexpr uint64_t kProducedBytecodeVersion = 0x5L;
static_assert(kProducedBytecodeVersion >= kProducedFileFormatVersion,
"kProducedBytecodeVersion must be higher or equal to kProducedFileFormatVersion.");

View File

@ -4,7 +4,7 @@ import shutil
import tempfile
import torch
import torch.utils.show_pickle
from torch.utils.mobile_optimizer import optimize_for_mobile
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
_get_model_bytecode_version,
@ -189,51 +189,53 @@ class testVariousModelVersions(TestCase):
current_from_version -= 1
shutil.rmtree(tmpdirname)
def test_all_backport_functions(self):
# Backport from the latest bytecode version to the minimum support version
# Load, run the backport model, and check version
class TestModule(torch.nn.Module):
def __init__(self, v):
super().__init__()
self.x = v
# Please run this test manually when working on backport.
# This test passes in OSS, but fails internally, likely due to missing step in build
# def test_all_backport_functions(self):
# # Backport from the latest bytecode version to the minimum support version
# # Load, run the backport model, and check version
# class TestModule(torch.nn.Module):
# def __init__(self, v):
# super().__init__()
# self.x = v
def forward(self, y: int):
increment = torch.ones([2, 4], dtype=torch.float64)
return self.x + y + increment
# def forward(self, y: int):
# increment = torch.ones([2, 4], dtype=torch.float64)
# return self.x + y + increment
module_input = 1
expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
# module_input = 1
# expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
# temporary input model file and output model file will be exported in the temporary folder
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_input_model_path = Path(tmpdirname, "tmp_script_module.ptl")
script_module = torch.jit.script(TestModule(1))
optimized_scripted_module = optimize_for_mobile(script_module)
exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(str(tmp_input_model_path))
# # temporary input model file and output model file will be exported in the temporary folder
# with tempfile.TemporaryDirectory() as tmpdirname:
# tmp_input_model_path = Path(tmpdirname, "tmp_script_module.ptl")
# script_module = torch.jit.script(TestModule(1))
# optimized_scripted_module = optimize_for_mobile(script_module)
# exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(str(tmp_input_model_path))
current_from_version = _get_model_bytecode_version(tmp_input_model_path)
current_to_version = current_from_version - 1
tmp_output_model_path = Path(tmpdirname, "tmp_script_module_backport.ptl")
# current_from_version = _get_model_bytecode_version(tmp_input_model_path)
# current_to_version = current_from_version - 1
# tmp_output_model_path = Path(tmpdirname, "tmp_script_module_backport.ptl")
while current_to_version >= MINIMUM_TO_VERSION:
# Backport the latest model to `to_version` to a tmp file "tmp_script_module_backport"
backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, current_to_version)
assert(backport_success)
# while current_to_version >= MINIMUM_TO_VERSION:
# # Backport the latest model to `to_version` to a tmp file "tmp_script_module_backport"
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, current_to_version)
# assert(backport_success)
backport_version = _get_model_bytecode_version(tmp_output_model_path)
assert(backport_version == current_to_version)
# backport_version = _get_model_bytecode_version(tmp_output_model_path)
# assert(backport_version == current_to_version)
# Load model and run forward method
mobile_module = _load_for_lite_interpreter(str(tmp_input_model_path))
mobile_module_result = mobile_module(module_input)
torch.testing.assert_allclose(mobile_module_result, expected_mobile_module_result)
current_to_version -= 1
# # Load model and run forward method
# mobile_module = _load_for_lite_interpreter(str(tmp_input_model_path))
# mobile_module_result = mobile_module(module_input)
# torch.testing.assert_allclose(mobile_module_result, expected_mobile_module_result)
# current_to_version -= 1
# Check backport failure case
backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, MINIMUM_TO_VERSION - 1)
assert(not backport_success)
# need to clean the folder before it closes, otherwise will run into git not clean error
shutil.rmtree(tmpdirname)
# # Check backport failure case
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, MINIMUM_TO_VERSION - 1)
# assert(not backport_success)
# # need to clean the folder before it closes, otherwise will run into git not clean error
# shutil.rmtree(tmpdirname)
# Check just the test_backport_bytecode_from_file_to_file mechanism but not the function implementations
def test_backport_bytecode_from_file_to_file(self):

View File

@ -436,7 +436,7 @@ void ScriptModuleSerializer::serialize(
/*archive_name=*/"constants",
/*archive_dir=*/"",
/*tensor_dir=*/"constants/",
/*tensor_cdata_naming_scheme=*/false);
/*tensor_cdata_naming_scheme=*/true);
writeByteCode(module, save_mobile_debug_info);
writeMobileMetadata(module, extra_files);
@ -644,8 +644,8 @@ void ScriptModuleSerializer::writeByteCode(
telements,
/*archive_name=*/"bytecode",
/*archive_dir=*/"",
/*tensor_dir=*/"bytecode/",
/*tensor_cdata_naming_scheme=*/false);
/*tensor_dir=*/"constants/",
/*tensor_cdata_naming_scheme=*/true);
auto debug_info_telements = Tup(std::move(debug_info_elements));