Remove Caffe2/onnx (#127991)

Remove Caffe2/onnx since it is not used. Other tiny fixes are also applied.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/127991
Approved by: https://github.com/ezyang
This commit is contained in:
cyy
2024-06-05 15:10:12 +00:00
committed by PyTorch MergeBot
parent d48c25c7d1
commit df75a9dc80
10 changed files with 0 additions and 288 deletions

View File

@ -1,23 +0,0 @@
""" build mode definitions for caffe2/caffe2 """
load("@fbcode//:BUILD_MODE.bzl", get_parent_modes = "all_modes_keep_gpu_sections_all_modes_use_lld")
load("@fbcode_macros//build_defs:create_build_mode.bzl", "extend_build_mode")
def update_mode_struct(name, mode_struct):
if name == "dev":
return extend_build_mode(
mode_struct,
# TODO(ipbrady): Modules introduce floating point inaccuracies (T43879333)
cxx_modules = False,
)
else:
return mode_struct
_modes = {
mode_name: update_mode_struct(mode_name, mode_struct)
for mode_name, mode_struct in get_parent_modes().items()
}
def get_modes():
""" Return modes for this file """
return _modes

View File

@ -1930,7 +1930,6 @@ if(BUILD_PYTHON)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
# Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80947 in EmbeddingBag.cpp
set_source_files_properties(../aten/src/ATen/native/EmbeddingBag.cpp PROPERTIES COMPILE_FLAGS -Wno-attributes)
set_source_files_properties(${TORCH_SRC_DIR}/../caffe2/operators/box_with_nms_limit_op.cc PROPERTIES COMPILE_FLAGS -Wno-attributes)
endif()
set(build_files)
@ -1950,10 +1949,4 @@ if(BUILD_PYTHON)
# Pick up static python files
install(DIRECTORY ${CMAKE_BINARY_DIR}/caffe2 DESTINATION ${PYTHON_LIB_REL_PATH}
FILES_MATCHING PATTERN "*.py")
# Caffe proto files
install(DIRECTORY ${CMAKE_BINARY_DIR}/caffe DESTINATION ${PYTHON_LIB_REL_PATH}
FILES_MATCHING PATTERN "*.py")
# Caffe2 proto files
install(DIRECTORY ${CMAKE_BINARY_DIR}/caffe2 DESTINATION ${PYTHON_LIB_REL_PATH}
FILES_MATCHING PATTERN "*.py")
endif()

View File

@ -1,5 +0,0 @@
import warnings
from torch.onnx import _CAFFE2_ATEN_FALLBACK
if not _CAFFE2_ATEN_FALLBACK:
warnings.warn("Caffe2 support is no longer present in PyTorch.")

View File

@ -1,5 +0,0 @@
# ---[ Extra onnx files.
file(GLOB ONNX_SRCS *.cc)
# ---[ Send the lists to the parent scope.
set(ONNX_SRCS ${ONNX_SRCS} PARENT_SCOPE)

View File

@ -1,7 +0,0 @@
namespace ONNX_NAMESPACE {
const int AI_ONNX_PYTORCH_DOMAIN_MIN_OPSET = 1;
const int AI_ONNX_PYTORCH_DOMAIN_MAX_OPSET = 1;
constexpr const char* AI_ONNX_PYTORCH_DOMAIN = "ai.onnx.pytorch";
} // namespace ONNX_NAMESPACE

View File

@ -1,168 +0,0 @@
// Copyright (c) Facebook Inc. and Microsoft Corporation.
// Licensed under the MIT license.
#include "./schema.h"
namespace ONNX_NAMESPACE {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
SparseLengthsSumFused8BitRowwise,
1,
OpSchema()
.SetDoc("Mirror Caffe2 SparseLengthsSumFused8BitRowwise operator")
.Input(0, "DATA", "data tensor", "T1")
.Input(1, "INDICES", "indices tensor", "T2")
.Input(2, "LENGTHS", "lengths tensor", "T2")
.Output(0, "output", "Output tensor", "T2")
.TypeConstraint(
"T1",
{"tensor(uint8)"},
"Constrain input data to uint8 tensors.")
.TypeConstraint(
"T2",
{"tensor(int8)",
"tensor(int16)",
"tensor(int32)",
"tensor(int64)",
"tensor(uint8)",
"tensor(uint16)",
"tensor(uint32)",
"tensor(uint64)"},
"Constrain index and length to integral tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
SparseLengthsSum,
1,
OpSchema()
.SetDoc("Mirror Caffe2 SparseLengthsSum operator")
.Input(0, "DATA", "data tensor", "T1")
.Input(1, "INDICES", "indices tensor", "T2")
.Input(2, "LENGTHS", "lengths tensor", "T2")
.Output(0, "output", "Output tensor", "T1")
.TypeConstraint(
"T1",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors.")
.TypeConstraint(
"T2",
{"tensor(int8)",
"tensor(int16)",
"tensor(int32)",
"tensor(int64)",
"tensor(uint8)",
"tensor(uint16)",
"tensor(uint32)",
"tensor(uint64)"},
"Constrain index and length to integral tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
SparseLengthsWeightedSum,
1,
OpSchema()
.SetDoc("Mirror Caffe2 SparseLengthsWeightedSum operator")
.Input(0, "DATA", "data tensor", "T1")
.Input(1, "WEIGHTS", "data tensor", "T1")
.Input(2, "INDICES", "indices tensor", "T2")
.Input(3, "LENGTHS", "lengths tensor", "T2")
.Output(0, "output", "Output tensor", "T1")
.TypeConstraint(
"T1",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors.")
.TypeConstraint(
"T2",
{"tensor(int8)",
"tensor(int16)",
"tensor(int32)",
"tensor(int64)",
"tensor(uint8)",
"tensor(uint16)",
"tensor(uint32)",
"tensor(uint64)"},
"Constrain index and length to integral tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
BatchGather,
1,
OpSchema()
.SetDoc("Mirror Caffe2 BatchGather operator")
.Input(0, "DATA", "data tensor", "T1")
.Input(1, "INDICES", "indices tensor", "T2")
.Output(0, "output", "Output tensor", "T1")
.TypeConstraint(
"T1",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors.")
.TypeConstraint(
"T2",
{"tensor(int8)",
"tensor(int16)",
"tensor(int32)",
"tensor(int64)",
"tensor(uint8)",
"tensor(uint16)",
"tensor(uint32)",
"tensor(uint64)"},
"Constrain index and length to integral tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
DotProduct,
1,
OpSchema()
.SetDoc("Mirror Caffe2 DotProduct operator")
.Input(0, "X", "Input 1 tensor", "T")
.Input(1, "Y", "Input 2 tensor", "T")
.Output(0, "Z", "Output tensor", "T")
.TypeConstraint(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
FCTransposed,
1,
OpSchema()
.SetDoc("Mirror Caffe2 FCTransposed operator")
.Input(0, "X", "Input tensor", "T")
.Input(1, "W", "Weight tensor", "T")
.Input(2, "B", "Bias tensor", "T")
.Output(0, "Z", "Output tensor", "T")
.TypeConstraint(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
BatchMatMul,
1,
OpSchema()
.SetDoc("Mirror Caffe2 BatchMatMul operator")
.Input(0, "X", "tensor of shape (dim0, dim1 ... M, K)", "T")
.Input(1, "Y", "tensor of shape (dim0, dim2 ... K, N)", "T")
.Output(0, "Z", "tensor of shape (dim0, dim1 ... M, N)", "T")
.TypeConstraint(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors."));
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables,bugprone-branch-clone)
ONNX_PYTORCH_OPERATOR_SET_SCHEMA(
ExpandDims,
1,
OpSchema()
.SetDoc("Mirror Caffe2 ExpandDims operator")
.Input(0, "X", "Input tensor", "T")
.Output(0, "Y", "Output tensor", "T")
.TypeConstraint(
"T",
{"tensor(float16)", "tensor(float)", "tensor(double)"},
"Constrain input and output types to float tensors."));
} // namespace ONNX_NAMESPACE

View File

@ -1,46 +0,0 @@
#pragma once
#include "onnx/defs/schema.h"
namespace ONNX_NAMESPACE {
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch,
1,
SparseLengthsSumFused8BitRowwise);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, SparseLengthsSum);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, SparseLengthsWeightedSum);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, BatchGather);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, DotProduct);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, FCTransposed);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, BatchMatMul);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, ExpandDims);
// Iterate over schema from ai.onnx.pytorch domain opset 1
class OpSet_PyTorch_ver1 {
public:
static void ForEachSchema(std::function<void(OpSchema&&)> fn) {
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, SparseLengthsSumFused8BitRowwise)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, SparseLengthsSum)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, SparseLengthsWeightedSum)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, BatchGather)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, DotProduct)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, FCTransposed)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, BatchMatMul)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, ExpandDims)>());
}
};
inline void RegisterPyTorchOperatorSetSchema() {
RegisterOpSetSchema<OpSet_PyTorch_ver1>();
}
} // namespace ONNX_NAMESPACE

View File

@ -1,17 +0,0 @@
#include "./schema.h"
#include "./operator_sets.h"
namespace {
using namespace ONNX_NAMESPACE;
class PyTorchSchemasRegisterer {
public:
PyTorchSchemasRegisterer() {
OpSchemaRegistry::DomainToVersionRange::Instance().AddDomainToVersion(
AI_ONNX_PYTORCH_DOMAIN,
AI_ONNX_PYTORCH_DOMAIN_MIN_OPSET,
AI_ONNX_PYTORCH_DOMAIN_MAX_OPSET);
RegisterPyTorchOperatorSetSchema();
}
};
static PyTorchSchemasRegisterer registerer{};
} // namespace

View File

@ -1,8 +0,0 @@
#pragma once
#include "./constants.h"
#include "onnx/defs/schema.h"
#define ONNX_PYTORCH_OPERATOR_SET_SCHEMA(name, ver, impl) \
ONNX_OPERATOR_SET_SCHEMA_EX( \
name, PyTorch, AI_ONNX_PYTORCH_DOMAIN, ver, false, impl)

View File

@ -1282,8 +1282,6 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_DISABLE_ONNX)
add_definitions(-DONNX_ML=1)
endif()
add_definitions(-DONNXIFI_ENABLE_EXT=1)
# Add op schemas in "ai.onnx.pytorch" domain
add_subdirectory("${CMAKE_CURRENT_LIST_DIR}/../caffe2/onnx/torch_ops")
if(NOT USE_SYSTEM_ONNX)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/onnx EXCLUDE_FROM_ALL)
if(NOT MSVC)