Remove all remaining usages of BUILD_NAMEDTENSOR (#31116)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/31116

Changelist:
- remove BUILD_NAMEDTENSOR macro
- remove torch._C._BUILD_NAMEDTENSOR
- remove all python behavior that relies on torch._C._BUILD_NAMEDTENSOR

Future:
- In the next diff, I will remove all usages of
ATen/core/EnableNamedTensor.h since that header doesn't do anything
anymore
- After that, we'll be done with the BUILD_NAMEDTENSOR removal.

Test Plan: - run CI

Differential Revision: D18934951

Pulled By: zou3519

fbshipit-source-id: 0a0df0f1f0470d0a01c495579333a2835aac9f5d
This commit is contained in:
Richard Zou
2019-12-12 09:50:59 -08:00
committed by Facebook Github Bot
parent c0bcfd0445
commit 9047d4df45
12 changed files with 7 additions and 54 deletions

View File

@ -122,7 +122,6 @@ option(BUILD_PYTHON "Build Python binaries" ON)
option(BUILD_CAFFE2_OPS "Build Caffe2 operators" ON)
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
option(BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" ON)
option(BUILD_NAMEDTENSOR "Experimental: compile with namedtensor support" OFF)
option(USE_STATIC_DISPATCH "Use static dispatch for ATen operators" OFF)
cmake_dependent_option(
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
@ -394,10 +393,6 @@ if(USE_FBGEMM)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_FBGEMM")
endif()
if(BUILD_NAMEDTENSOR)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_NAMEDTENSOR")
endif()
if(USE_QNNPACK)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_QNNPACK")
endif()

View File

@ -1,11 +1,2 @@
#pragma once
#include <c10/macros/Macros.h>
// We are working on removing the BUILD_NAMEDTENSOR flag from the codebase.
//
// PyTorch's codegen also uses a similar flag. You can find it in
// - aten/src/ATen/env.py
#ifndef BUILD_NAMEDTENSOR
#define BUILD_NAMEDTENSOR
#endif

View File

@ -1,12 +0,0 @@
import os
# This file copied from tools/setup_helpers/env.py
# PLEASE DO NOT ADD ANYTHING TO THIS FILE, the BUILD_NAMEDTENSOR flag is temporary.
def check_env_flag(name, default=''):
return os.getenv(name, default).upper() in ['ON', '1', 'YES', 'TRUE', 'Y']
def check_negative_env_flag(name, default=''):
return os.getenv(name, default).upper() in ['OFF', '0', 'NO', 'FALSE', 'N']
BUILD_NAMEDTENSOR = True

View File

@ -303,8 +303,7 @@ matter, please write in at https://github.com/pytorch/pytorch/issues/14234
supports_named_tensor: True
```
Experimental: this option is ignored unless compiling with BUILD_NAMEDTENSOR=1.
By default, (`supports_named_tensor: True`) ATen code generation will generate a check
By default, (`supports_named_tensor: False`) ATen code generation will generate a check
that all tensor inputs to the function are unnamed. This is used to incrementally
implement named tensors; if a function supports named tensors, then it'll have
`supports_named_tensor: True`; otherwise, passing it a named tensor will error out.

View File

@ -80,6 +80,5 @@ static_assert(
{"USE_NVTX", "${CAFFE2_USE_NVTX}"}, \
{"USE_TRT", "${CAFFE2_USE_TRT}"}, \
{"DISABLE_NUMA", "${CAFFE2_DISABLE_NUMA}"}, \
{"BUILD_NAMEDTENSOR", "${BUILD_NAMEDTENSOR}"}, \
{"USE_STATIC_DISPATCH", "${USE_STATIC_DISPATCH}"}, \
}

View File

@ -129,7 +129,6 @@ function (caffe2_print_configuration_summary)
message(STATUS " USE_MPI : ${USE_MPI}")
message(STATUS " USE_GLOO : ${USE_GLOO}")
endif()
message(STATUS " BUILD_NAMEDTENSOR : ${BUILD_NAMEDTENSOR}")
if(NOT "${SELECTED_OP_LIST}" STREQUAL "")
message(STATUS " SELECTED_OP_LIST : ${SELECTED_OP_LIST}")
endif()

View File

@ -14,9 +14,6 @@ import io
import sys
import warnings
skipIfNamedTensorDisabled = \
unittest.skipIf(not torch._C._BUILD_NAMEDTENSOR,
'PyTorch not compiled with namedtensor support')
def pass_name_to_python_arg_parser(name):
x = torch.empty(2, names=(name,))
@ -1948,11 +1945,6 @@ class TestNamedTensor(TestCase):
res = torch.isinf(a)
self.assertEqual(res.names, ['N', 'C'])
# Disable all tests if named tensor is not available.
for attr in dir(TestNamedTensor):
if attr.startswith('test_'):
new_test = skipIfNamedTensorDisabled(getattr(TestNamedTensor, attr))
setattr(TestNamedTensor, attr, new_test)
if __name__ == '__main__':
run_tests()

View File

@ -67,7 +67,8 @@ blacklist = [
'triplet_margin_loss',
# Somehow, these are defined in both _C and in functional. Ick!
'broadcast_tensors',
'align_tensors', # BUILD_NAMEDTENSOR only
# type hints for named tensors are broken: https://github.com/pytorch/pytorch/issues/27846
'align_tensors',
'meshgrid',
'cartesian_prod',
'norm',

View File

@ -1,4 +1,3 @@
import torch
from torch._six import PY2
from collections import OrderedDict
@ -9,14 +8,8 @@ subject to change or deletion.
"""
def assert_namedtensor_build(api_name):
if not torch._C._BUILD_NAMEDTENSOR:
raise RuntimeError('NYI: {} is experimental and a part '
'of our named tensors project.'.format(api_name))
def check_serializing_named_tensor(tensor):
if torch._C._BUILD_NAMEDTENSOR and tensor.has_names():
if tensor.has_names():
raise RuntimeError(
"NYI: Named tensors don't support serialization. Please drop "
"names via `tensor = tensor.rename(None)` before serialization.")
@ -136,8 +129,6 @@ def update_names(tensor, names, rename_map, inplace):
Finally, tensor.rename has an in-place version called tensor.rename_.
"""
assert_namedtensor_build(namer_api_name(inplace))
has_names = len(names) > 0
has_rename_pairs = bool(rename_map)
if has_names and has_rename_pairs:

View File

@ -195,7 +195,7 @@ def _tensor_str(self, indent):
if self.numel() == 0:
return '[]'
if torch._C._BUILD_NAMEDTENSOR and self.has_names():
if self.has_names():
# There are two main codepaths (possibly more) that tensor printing goes through:
# - tensor data can fit comfortably on screen
# - tensor data needs to be summarized
@ -321,7 +321,7 @@ def _str(self):
elif self.requires_grad:
suffixes.append('requires_grad=True')
if torch._C._BUILD_NAMEDTENSOR and self.has_names():
if self.has_names():
suffixes.append('names={}'.format(self.names))
return _add_suffixes(prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse)

View File

@ -766,8 +766,6 @@ PyObject* initModule() {
ASSERT_TRUE(set_module_attr("_GLIBCXX_USE_CXX11_ABI", Py_False));
#endif
ASSERT_TRUE(set_module_attr("_BUILD_NAMEDTENSOR", Py_True));
auto defaultGenerator = at::detail::getDefaultCPUGenerator();
THPDefaultCPUGenerator = (THPGenerator*)THPGenerator_initDefaultGenerator(defaultGenerator);
// This reference is meant to be given away, so no need to incref here.

View File

@ -6,7 +6,7 @@ from itertools import product
from ._overrides import torch_function_dispatch
__all__ = [
'align_tensors', # BUILD_NAMEDTENSOR only
'align_tensors',
'broadcast_tensors',
'cartesian_prod',
'cdist',