Remove internal logic to handle bytecode version 3 (#57775)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/57775

The minimum supported bytecode version is updated from 3 to 4. We no longer support version 3 bytecode models.

Why?
* There are hacky codes in operator loading, that performs differently on one operator on the global bytecode version 3. Instead operator related metadata should be passed (for example, in #56845). To allow future development, we remove the hacky way first.
* The bytecode version was bumped from 3 to 4 more than half a year ago. Since all the production models are all bumped to version 4, it's not practical to keep and maintain version 3. The risk to deprecate version 3 is low.

Test Plan: Imported from OSS

Reviewed By: raziel

Differential Revision: D28270791

Pulled By: cccclai

fbshipit-source-id: 70b1bd6352fdaae5f8d2173b81578d77018c8e44
(cherry picked from commit 3e930fa381cd01f3705116795c6426df992372fc)
This commit is contained in:
Martin Yuan
2022-04-06 18:39:37 -07:00
committed by PyTorch MergeBot
parent 7e9bb1c273
commit 00c1e01ad0
11 changed files with 51 additions and 89 deletions

View File

@ -1188,7 +1188,6 @@ TEST(RunTimeTest, ParseOperator) {
function.get());
parseOperators(
std::move(*c10::ivalue::Tuple::create(operators)).elements(),
model_version,
1,
function.get());
const size_t rsize = 5;
@ -1571,7 +1570,6 @@ TEST(RunTimeTest, RuntimeCall) {
foo.get());
parseOperators(
std::move(*c10::ivalue::Tuple::create(operatorsFoo)).elements(),
model_version,
1,
foo.get());
parseConstants(
@ -1588,7 +1586,6 @@ TEST(RunTimeTest, RuntimeCall) {
call.get());
parseOperators(
std::move(*c10::ivalue::Tuple::create(operatorsCall)).elements(),
model_version,
1,
call.get());
parseConstants(
@ -2092,10 +2089,7 @@ TEST(LiteInterpreterUpgraderTest, Upgrader) {
if (byteCodeFunctionWithOperator.function.get_code().operators_.empty()) {
for (const auto& op : byteCodeFunctionWithOperator.operators) {
byteCodeFunctionWithOperator.function.append_operator(
op.name,
op.overload_name,
op.num_specified_args,
caffe2::serialize::kMaxSupportedFileFormatVersion);
op.name, op.overload_name, op.num_specified_args);
}
}
upgrader_functions.push_back(byteCodeFunctionWithOperator.function);

View File

@ -119,8 +119,7 @@ const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
upgrader_function.function.append_operator(
op.name,
op.overload_name,
op.num_specified_args,
caffe2::serialize::kMaxSupportedFileFormatVersion);
op.num_specified_args);
}
}
return upgrader_function_list;

View File

@ -2,6 +2,6 @@ MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python torch/csrc/jit/mobile/upgrader_mobile.cpp
* cd ~/pytorch && python tools/codegen/operator_versions/gen_mobile_upgraders.py
*/
"""

View File

@ -227,7 +227,6 @@ std::unique_ptr<mobile::Function> FlatbufferLoader::parseFunction(
}
std::unordered_set<std::string> unsupported_op_names;
const int64_t model_version = 0x6L;
for (const auto* op : *method->operators()) {
c10::optional<int> num_args = c10::nullopt;
if (op->num_args_serialized() > -1) {
@ -235,7 +234,7 @@ std::unique_ptr<mobile::Function> FlatbufferLoader::parseFunction(
}
auto op_found = function->append_operator(
op->name()->str(), op->overload_name()->str(), num_args, model_version);
op->name()->str(), op->overload_name()->str(), num_args);
if (!op_found) {
unsupported_op_names.emplace(

View File

@ -46,14 +46,12 @@ void Function::append_instruction(OpCode op, int X, int N) {
bool Function::append_operator(
const std::string& name,
const std::string& overload_name,
const c10::optional<int>& num_specified_args,
int64_t model_version) { /* TODO: T90339189 deprecate all v3 when v3 models
are removed */
const c10::optional<int>& num_specified_args) {
// Keep the original opname in code_
code_.op_names_.emplace_back(name, overload_name);
const auto& opname = code_.op_names_.back();
code_.operator_input_sizes_.emplace_back(num_specified_args.value_or(-1));
auto func = makeOperatorFunction(opname, num_specified_args, model_version);
auto func = makeOperatorFunction(opname, num_specified_args);
if (!func.has_value()) {
return false;
}
@ -134,8 +132,7 @@ const std::vector<int64_t>& Function::getExceptionDebugHandles() const {
c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
c10::OperatorName opname,
c10::optional<int> num_specified_args,
int64_t model_version) {
c10::optional<int> num_specified_args) {
std::function<void(Stack&)> fn;
const auto full_name = c10::toString(opname);
const std::vector<c10::Argument>* pArgs = nullptr;
@ -165,18 +162,6 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
if (!promoted_op) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(pArgs);
const auto& args = *pArgs;
if (model_version == 0x3LL && opname.name == "aten::_convolution" &&
opname.overload_name.empty()) {
// Since byte-code versions 0x4L, convolution has an additional
// default-value argument (allow_tf32=True, see
// https://github.com/pytorch/pytorch/pull/40737). This wrapper handles
// backward compatibility with models of byte-code version <= 0x3L, where
// this bool argument does not yet exist.
fn = [fn](Stack& stack) {
stack.push_back(true);
fn(stack);
};
} else {
// num_specified_args >= 0 indicates number of arguments are available
// from model. We can use it to handle backward compatibility.
if (num_specified_args &&
@ -198,8 +183,7 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
out_args.size(),
", which is more then the number of specified arguments: ",
num_specified_args.value());
for (size_t i = start_index; i < (args.size() - out_args.size());
++i) {
for (size_t i = start_index; i < (args.size() - out_args.size()); ++i) {
TORCH_CHECK(
args[i].default_value().has_value(),
"Error happened at preparing for default values for the argument. The ",
@ -215,7 +199,6 @@ c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
};
}
}
}
return fn;
}

View File

@ -37,9 +37,7 @@ class TORCH_API Function : public torch::jit::Function {
bool append_operator(
const std::string& name,
const std::string& overload_name,
const c10::optional<int>& num_specified_args,
int64_t model_version); /* TODO: T90339189 deprecate all v3 when v3 models
are removed */
const c10::optional<int>& num_specified_args);
void append_constant(const c10::IValue& constant);
void append_type(const c10::TypePtr& type);
void append_function(mobile::Function& func);
@ -73,8 +71,7 @@ class TORCH_API Function : public torch::jit::Function {
c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
c10::OperatorName opname,
c10::optional<int> num_specified_args,
int64_t model_version);
c10::optional<int> num_specified_args);
} // namespace mobile
} // namespace jit

View File

@ -311,8 +311,9 @@ void BytecodeDeserializer::parseMethods(
TORCH_CHECK(vals.size() > 0, "Bytecode has no elements. ");
// Initialized with the version number when kProducedBytecodeVersion was
// introduced. The old models (some of them already in production) without
// version number don't have to be re-generated.
int64_t model_version = 0x3L;
// version number are seen as version 3 (deprecated).
constexpr uint64_t default_version = 0x3L;
uint64_t model_version = default_version;
size_t method_i_start = 0;
if (vals[0].isInt()) {
model_version = vals[0].toInt();
@ -383,11 +384,7 @@ void BytecodeDeserializer::parseMethods(
}
init_upgrader(function.get());
// 1. First pass all operators from models
parseOperators(
std::move(ops_list),
model_version,
module_load_options_,
function.get());
parseOperators(std::move(ops_list), module_load_options_, function.get());
// 2. Decides if upgrader is needed
bool use_upgrader =

View File

@ -22,8 +22,7 @@ std::string operator_str(
*/
std::unordered_set<std::string> load_and_find_unsupported_operator_names(
c10::ivalue::TupleElements&& ops_list,
mobile::Function* function,
int64_t model_version) {
mobile::Function* function) {
std::unordered_set<std::string> unsupported_op_names;
// ops_list is the list of operator names that were read in from
// bytecode.plk for the method that is currently being processed.
@ -41,8 +40,7 @@ std::unordered_set<std::string> load_and_find_unsupported_operator_names(
auto op_found = function->append_operator(
op_item[0].toString()->string(),
op_item[1].toString()->string(),
num_args,
model_version);
num_args);
if (!op_found) {
unsupported_op_names.emplace(operator_str(
op_item[0].toString()->string(), op_item[1].toString()->string()));
@ -66,12 +64,10 @@ void print_unsupported_ops_and_throw(
void parseOperators(
c10::ivalue::TupleElements&& ops_list,
const int64_t& model_version,
const uint64_t& module_load_options,
mobile::Function* function) {
std::unordered_set<std::string> unsupported_op_names =
load_and_find_unsupported_operator_names(
std::move(ops_list), function, model_version);
load_and_find_unsupported_operator_names(std::move(ops_list), function);
if ((module_load_options & MobileModuleLoadOptions::OPERATOR_CHECK) &&
!unsupported_op_names.empty()) {
print_unsupported_ops_and_throw(unsupported_op_names);

View File

@ -16,7 +16,6 @@ namespace mobile {
TORCH_API void parseOperators(
c10::ivalue::TupleElements&& ops_list,
const int64_t& model_version,
const uint64_t& module_load_options,
mobile::Function* function);
} // namespace mobile

View File

@ -533,8 +533,7 @@ const std::vector<ByteCodeFunctionWithOperator>& getUpgraderBytecodeList() {
upgrader_function.function.append_operator(
op.name,
op.overload_name,
op.num_specified_args,
caffe2::serialize::kMaxSupportedFileFormatVersion);
op.num_specified_args);
}
}
return upgrader_function_list;

View File

@ -173,8 +173,7 @@ mobile::Code compileGraphToMobileCode(
}
mobile_code.operator_input_sizes_.emplace_back(num_args.value_or(-1));
mobile_code.op_names_.emplace_back(opname);
auto func = mobile::makeOperatorFunction(
opname, num_args, compilation_options.model_version);
auto func = mobile::makeOperatorFunction(opname, num_args);
TORCH_INTERNAL_ASSERT(
func.has_value(),
"Operator with name: ",