mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
This reverts commit bfebf254dd92f3ed35154597166e7e71fb04f31b. Differential Revision: [D39104562](https://our.internmc.facebook.com/intern/diff/D39104562) Pull Request resolved: https://github.com/pytorch/pytorch/pull/84207 Approved by: https://github.com/robieta
This commit is contained in:
committed by
PyTorch MergeBot
parent
60f47cb002
commit
44a975335e
2
.github/ci_commit_pins/xla.txt
vendored
2
.github/ci_commit_pins/xla.txt
vendored
@ -1 +1 @@
|
||||
95eedc33fb48c2ba72f5efa45daa4941cb069864
|
||||
40f6818fc653b2312134e8a0f60dbc7a21380588
|
||||
|
@ -234,11 +234,6 @@ int64_t NestedTensorImpl::numel_custom() const {
|
||||
return static_cast<int64_t>(num_elements);
|
||||
}
|
||||
|
||||
|
||||
c10::SymInt NestedTensorImpl::sym_numel_custom() const {
|
||||
return NestedTensorImpl::numel_custom();
|
||||
}
|
||||
|
||||
bool NestedTensorImpl::is_contiguous_custom(MemoryFormat) const {
|
||||
return nested_tensor_impl_is_contiguous(this);
|
||||
}
|
||||
|
@ -99,7 +99,6 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
|
||||
// TODO: numel_custom and is_contiguous_custom can be profitably overridden
|
||||
// with real implementations
|
||||
int64_t numel_custom() const override;
|
||||
c10::SymInt sym_numel_custom() const override;
|
||||
bool is_contiguous_custom(MemoryFormat) const override;
|
||||
int64_t size_custom(int64_t d) const override {
|
||||
return this->size(d);
|
||||
|
@ -298,10 +298,6 @@ class TORCH_API TensorBase {
|
||||
return impl_->numel();
|
||||
}
|
||||
|
||||
c10::SymInt sym_numel() const {
|
||||
return impl_->sym_numel();
|
||||
}
|
||||
|
||||
// Length of one array element in bytes. This is the traditional
|
||||
// Numpy naming.
|
||||
size_t itemsize() const {
|
||||
|
@ -209,20 +209,16 @@ void TensorImpl::HandleResize() {
|
||||
// If needed, we will free the data. the next mutable_data() call
|
||||
// will create the data storage.
|
||||
bool reset_tensor = false;
|
||||
|
||||
TORCH_CHECK(!numel_.is_symbolic(), "CAFFE2 doesn't support SymInts");
|
||||
int concrete_numel = numel_.as_int_unchecked();
|
||||
if (reserved_) {
|
||||
// If tensor is reserved then don't claim its memeory unless nbytes()
|
||||
// is smaller than new size
|
||||
reset_tensor = storage_.nbytes() <
|
||||
(storage_offset_ + concrete_numel) * data_type_.itemsize();
|
||||
reset_tensor =
|
||||
storage_.nbytes() < (storage_offset_ + numel_) * data_type_.itemsize();
|
||||
} else {
|
||||
reset_tensor = storage_.nbytes() <
|
||||
(storage_offset_ + concrete_numel) * data_type_.itemsize() ||
|
||||
(storage_offset_ + numel_) * data_type_.itemsize() ||
|
||||
!FLAGS_caffe2_keep_on_shrink ||
|
||||
storage_.nbytes() -
|
||||
(storage_offset_ + concrete_numel) * data_type_.itemsize() >
|
||||
storage_.nbytes() - (storage_offset_ + numel_) * data_type_.itemsize() >
|
||||
static_cast<size_t>(FLAGS_caffe2_max_keep_on_shrink_memory);
|
||||
}
|
||||
|
||||
@ -423,13 +419,6 @@ c10::SymIntArrayRef TensorImpl::sym_sizes_custom() const {
|
||||
return sym_sizes_default();
|
||||
}
|
||||
|
||||
c10::SymInt TensorImpl::sym_numel_custom() const {
|
||||
if (C10_UNLIKELY(is_python_dispatch())) {
|
||||
return load_pyobj_interpreter()->sym_numel(this);
|
||||
}
|
||||
return sym_numel_default();
|
||||
}
|
||||
|
||||
c10::SymIntArrayRef TensorImpl::sym_strides_custom() const {
|
||||
if (C10_UNLIKELY(is_python_dispatch())) {
|
||||
return load_pyobj_interpreter()->sym_strides(this);
|
||||
@ -716,7 +705,7 @@ void TensorImpl::Extend(int64_t num, float growthPct) {
|
||||
sizes_and_strides_.size_at_unchecked(0).as_int_unchecked() *
|
||||
(1 + growthPct / 100))));
|
||||
auto oldData = std::move(storage_.data_ptr());
|
||||
auto oldSize = numel_.as_int_unchecked();
|
||||
auto oldSize = numel_;
|
||||
Resize(newCapacity);
|
||||
auto* newData = raw_mutable_data(data_type_);
|
||||
if (data_type_.copy()) {
|
||||
@ -752,7 +741,7 @@ void TensorImpl::ReserveSpace(int64_t outer_dim) {
|
||||
"Right now ReserveSpace is only supported for contiguous Tensor.");
|
||||
TORCH_CHECK(
|
||||
!has_symbolic_sizes_strides_,
|
||||
"ReserveSpace() called on tensor with symbolic shape");
|
||||
"ReserveSpace() called on tensor with symbolic shape")
|
||||
|
||||
TORCH_CHECK(storage_.unique(), "Can't call ReserveSpace on shared storage.");
|
||||
// TODO: eliminate newCapacity.
|
||||
@ -784,7 +773,7 @@ void TensorImpl::Reshape(const std::vector<int64_t>& dims) {
|
||||
"Right now Reshape is only supported for contiguous Tensor.");
|
||||
TORCH_CHECK(
|
||||
!has_symbolic_sizes_strides_,
|
||||
"Reshape() called on tensor with symbolic shape");
|
||||
"Reshape() called on tensor with symbolic shape")
|
||||
|
||||
int64_t new_size = 1;
|
||||
for (auto d : dims) {
|
||||
@ -792,7 +781,7 @@ void TensorImpl::Reshape(const std::vector<int64_t>& dims) {
|
||||
new_size *= d;
|
||||
}
|
||||
TORCH_CHECK(
|
||||
new_size == numel_.as_int_unchecked(),
|
||||
new_size == numel_,
|
||||
"New size and old size are not equal. You cannot use Reshape, "
|
||||
"but should use Resize."
|
||||
// TODO(jiayq): remove the following warning after pending diffs
|
||||
@ -854,11 +843,8 @@ void TensorImpl::ShareExternalPointer(
|
||||
data_type != ScalarType::Undefined,
|
||||
"To share with a raw external pointer you need to pass in an "
|
||||
"initialized data_type(TypeMeta).");
|
||||
TORCH_CHECK(
|
||||
!has_symbolic_sizes_strides_,
|
||||
"ReserveSpace() called on tensor with symbolic shape");
|
||||
if (!size_bytes) {
|
||||
size_bytes = numel_.as_int_unchecked() * data_type.itemsize();
|
||||
size_bytes = numel_ * data_type.itemsize();
|
||||
}
|
||||
if (storage_.unique()) {
|
||||
storage_.UniqueStorageShareExternalPointer(std::move(data_ptr), size_bytes);
|
||||
|
@ -564,21 +564,6 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
|
||||
virtual c10::SymIntArrayRef sym_sizes_custom() const;
|
||||
|
||||
c10::SymInt sym_numel() const {
|
||||
if (C10_UNLIKELY(
|
||||
sizes_strides_policy_ >=
|
||||
static_cast<uint8_t>(SizesStridesPolicy::CustomSizes))) {
|
||||
return sym_numel_custom();
|
||||
}
|
||||
return sym_numel_default();
|
||||
}
|
||||
|
||||
inline c10::SymInt sym_numel_default() const {
|
||||
return numel_;
|
||||
}
|
||||
|
||||
virtual c10::SymInt sym_numel_custom() const;
|
||||
|
||||
/**
|
||||
* Return a reference to the strides of this tensor. This reference remains
|
||||
* valid as long as the tensor is live and not restrided.
|
||||
@ -778,9 +763,9 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
|
||||
inline int64_t numel_default() const {
|
||||
#ifdef DEBUG
|
||||
TORCH_INTERNAL_ASSERT(compute_numel() == numel_.as_int_unchecked());
|
||||
TORCH_INTERNAL_ASSERT(compute_numel() == numel_);
|
||||
#endif
|
||||
return numel_.as_int_unchecked();
|
||||
return numel_;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -1959,10 +1944,6 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
* and a new storage will be created.
|
||||
*/
|
||||
inline void* raw_mutable_data(const caffe2::TypeMeta meta) {
|
||||
auto concrete_numel = numel_.expect_int();
|
||||
#ifdef DEBUG
|
||||
TORCH_INTERNAL_ASSERT(compute_numel() == concrete_numel);
|
||||
#endif
|
||||
// For 0-size tensors it's fine to return any pointer (including nullptr)
|
||||
if (data_type_ == meta && storage_initialized()) {
|
||||
return static_cast<void*>(
|
||||
@ -1977,9 +1958,9 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
// We can reuse the existing buffer if the current data does not have
|
||||
// a special destructor and the new data doesn't have a special
|
||||
// constructor.
|
||||
if (concrete_numel == 0 ||
|
||||
if (numel_ == 0 ||
|
||||
(meta.placementNew() == nullptr && !had_special_dtor &&
|
||||
(storage_.nbytes() >= (concrete_numel * data_type_.itemsize())))) {
|
||||
(storage_.nbytes() >= (numel_ * data_type_.itemsize())))) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
storage_offset_ == 0); // because we just reallocated
|
||||
return storage_.data();
|
||||
@ -1996,18 +1977,18 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
// For types that need placement new, we will call it, as well as
|
||||
// making sure that when the data is freed, it calls the right
|
||||
// destruction procedure.
|
||||
auto size = numel_;
|
||||
auto dtor = data_type_.placementDelete();
|
||||
auto data_ptr =
|
||||
allocator->allocate(concrete_numel * data_type_.itemsize());
|
||||
auto data_ptr = allocator->allocate(numel_ * data_type_.itemsize());
|
||||
storage_.set_data_ptr_noswap(PlacementDeleteContext::makeDataPtr(
|
||||
std::move(data_ptr), dtor, concrete_numel, storage_.device()));
|
||||
data_type_.placementNew()(storage_.data(), concrete_numel);
|
||||
std::move(data_ptr), dtor, size, storage_.device()));
|
||||
data_type_.placementNew()(storage_.data(), numel_);
|
||||
} else {
|
||||
// For fundamental type, new and delete is easier.
|
||||
storage_.set_data_ptr_noswap(
|
||||
allocator->allocate(concrete_numel * data_type_.itemsize()));
|
||||
allocator->allocate(numel_ * data_type_.itemsize()));
|
||||
}
|
||||
storage_.set_nbytes(concrete_numel * data_type_.itemsize());
|
||||
storage_.set_nbytes(numel_ * data_type_.itemsize());
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
storage_offset_ == 0); // because we just reallocated
|
||||
device_opt_ = storage_.device();
|
||||
@ -2082,7 +2063,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
"empty_tensor_restride() called on tensor with symbolic shape")
|
||||
#ifdef DEBUG
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
compute_numel() == numel_.as_int_unchecked(),
|
||||
compute_numel() == numel_,
|
||||
"If you are seeing this error, that means empty_tensor_restride was "
|
||||
"called before setting correct numel");
|
||||
#endif
|
||||
@ -2506,7 +2487,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
|
||||
// time, we will immediately set sizes to {0} and reset numel to 0.
|
||||
// (Can't do that in the default initializers, because there's no way to
|
||||
// spell "allocate a one-element array" for strides_).
|
||||
SymInt numel_ = c10::SymInt(1);
|
||||
int64_t numel_ = 1;
|
||||
|
||||
// INVARIANT: When storage is non-null, this type meta must
|
||||
// agree with the type meta in storage
|
||||
|
@ -93,12 +93,6 @@ static c10::Layout noop_layout_fn(const PyInterpreter*, const TensorImpl*) {
|
||||
"attempted to call `layout` on Tensor with nontrivial PyObject after corresponding interpreter died");
|
||||
}
|
||||
|
||||
static c10::SymInt noop_sym_numel_fn(const PyInterpreter*, const TensorImpl*) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
0,
|
||||
"attempted to call `sym_numel` on Tensor with nontrivial PyObject after corresponding interpreter died");
|
||||
}
|
||||
|
||||
static c10::SymIntArrayRef noop_sym_strides_fn(
|
||||
const PyInterpreter*,
|
||||
const TensorImpl*) {
|
||||
@ -119,7 +113,6 @@ void PyInterpreter::disarm() noexcept {
|
||||
sizes_fn_ = &noop_sizes_fn;
|
||||
sym_sizes_fn_ = &noop_sym_sizes_fn;
|
||||
layout_fn_ = &noop_layout_fn;
|
||||
sym_numel_fn_ = &noop_sym_numel_fn;
|
||||
trace_gpu_functions.disarm();
|
||||
sym_strides_fn_ = &noop_sym_strides_fn;
|
||||
}
|
||||
|
@ -176,7 +176,6 @@ struct C10_API PyInterpreter {
|
||||
using sym_sizes_sig =
|
||||
c10::SymIntArrayRef(const PyInterpreter*, const TensorImpl*);
|
||||
using layout_sig = c10::Layout(const PyInterpreter*, const TensorImpl*);
|
||||
using sym_numel_sig = c10::SymInt(const PyInterpreter*, const TensorImpl*);
|
||||
using sym_strides_sig =
|
||||
c10::SymIntArrayRef(const PyInterpreter*, const TensorImpl*);
|
||||
|
||||
@ -192,7 +191,6 @@ struct C10_API PyInterpreter {
|
||||
sizes_sig* sizes,
|
||||
sym_sizes_sig* sym_sizes,
|
||||
layout_sig* layout,
|
||||
sym_numel_sig* sym_numel,
|
||||
sym_strides_sig* sym_strides,
|
||||
GPUTraceFunctionWrapper trace_gpu_functions)
|
||||
: name_fn_(name_fn),
|
||||
@ -206,7 +204,6 @@ struct C10_API PyInterpreter {
|
||||
sizes_fn_(sizes),
|
||||
sym_sizes_fn_(sym_sizes),
|
||||
layout_fn_(layout),
|
||||
sym_numel_fn_(sym_numel),
|
||||
trace_gpu_functions(trace_gpu_functions),
|
||||
sym_strides_fn_(sym_strides) {}
|
||||
|
||||
@ -221,7 +218,6 @@ struct C10_API PyInterpreter {
|
||||
sizes_sig* sizes_fn_;
|
||||
sym_sizes_sig* sym_sizes_fn_;
|
||||
layout_sig* layout_fn_;
|
||||
sym_numel_sig* sym_numel_fn_;
|
||||
GPUTraceFunctionWrapper trace_gpu_functions;
|
||||
sym_strides_sig* sym_strides_fn_;
|
||||
|
||||
@ -286,11 +282,6 @@ struct C10_API PyInterpreter {
|
||||
return (*layout_fn_)(this, self);
|
||||
}
|
||||
|
||||
__ubsan_ignore_function__ c10::SymInt sym_numel(
|
||||
const TensorImpl* self) const {
|
||||
return (*sym_numel_fn_)(this, self);
|
||||
}
|
||||
|
||||
__ubsan_ignore_function__ void trace_gpu_event_creation(
|
||||
uintptr_t event) const {
|
||||
return (*trace_gpu_functions.event_creation_fn_)(this, event);
|
||||
|
@ -439,10 +439,6 @@ class TORCH_API Tensor final {
|
||||
return impl_->sym_sizes();
|
||||
}
|
||||
|
||||
inline c10::SymInt sym_numel() const {
|
||||
return impl_->sym_numel();
|
||||
}
|
||||
|
||||
inline c10::SymIntArrayRef sym_strides() const {
|
||||
return impl_->sym_strides();
|
||||
}
|
||||
|
@ -270,6 +270,7 @@ ALLOW_LIST = [
|
||||
("aten::vsplit.array", datetime.date(2022, 9, 1)),
|
||||
("aten::vsplit.int", datetime.date(2022, 9, 1)),
|
||||
("c10d::allreduce_", datetime.date(2022, 10, 1)),
|
||||
("aten::sym_numel", datetime.date(2022, 10, 1)),
|
||||
]
|
||||
|
||||
ALLOW_LIST_COMPILED = [
|
||||
|
@ -879,7 +879,7 @@ symbolic_tensor_failures = {
|
||||
xfail('addmm', 'decomposed'), # aten.mm.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('addmv', ''), # aten.addmv.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('addr', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('all', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
|
||||
xfail('all', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type promotion!
|
||||
xfail('aminmax', ''), # aten.aminmax.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('argmax', ''), # aten.argmax.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('argmin', ''), # aten.argmin.default - couldn't find symbolic meta function/decomposition
|
||||
@ -913,6 +913,7 @@ symbolic_tensor_failures = {
|
||||
xfail('cumulative_trapezoid', ''), # aten.slice.Tensor - couldn't find symbolic meta function/decomposition
|
||||
xfail('deg2rad', ''), # aten.deg2rad.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('diag_embed', ''), # aten.diag_embed.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('diagflat', ''), # RuntimeError: Tensors of type TensorImpl do not have numel
|
||||
xfail('diagonal', ''), # aten.diagonal.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('diagonal_scatter', ''), # aten.diagonal_scatter.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('diff', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition
|
||||
@ -956,12 +957,14 @@ symbolic_tensor_failures = {
|
||||
xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c...
|
||||
xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('hsplit', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('hstack', ''), # Tensors of type TensorImpl do not have numel
|
||||
xfail('i0', ''), # aten.i0.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('index_add', ''), # Float
|
||||
xfail('index_copy', ''), # Expected a long tensor for index, but got Float
|
||||
xfail('index_fill', ''), # aten.index_fill.int_Scalar - couldn't find symbolic meta function/decomposition
|
||||
xfail('index_put', ''), # aten.index_put.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('index_reduce', ''), # Float
|
||||
xfail('index_select', ''), # Tensors of type TensorImpl do not have numel
|
||||
xfail('inner', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('int', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('isclose', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
|
||||
@ -993,6 +996,8 @@ symbolic_tensor_failures = {
|
||||
xfail('linalg.matrix_rank', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('linalg.matrix_rank', 'hermitian'), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('linalg.multi_dot', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('norm', 'fro'), # TensorImpl do not have numel
|
||||
xfail('norm', 'inf'), # TensorImpl do not have numel
|
||||
xfail('linalg.norm', ''), # TensorImpl do not have numel
|
||||
xfail('linalg.norm', 'subgradients_at_zero'), # TensorImpl do not have numel
|
||||
xfail('linalg.pinv', ''), # aten.linalg_pinv.atol_rtol_tensor - couldn't find symbolic meta function/decomposition
|
||||
@ -1014,6 +1019,7 @@ symbolic_tensor_failures = {
|
||||
xfail('logaddexp', ''), # aten.logaddexp.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('logcumsumexp', ''), # aten.logcumsumexp.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('logdet', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('logsumexp', ''), # Tensors of type TensorImpl do not have numel
|
||||
xfail('long', ''), # aten._to_copy.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('lu', ''), # aten.linalg_lu_factor_ex.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('lu_solve', ''), # aten.linalg_lu_solve.default - couldn't find symbolic meta function/decomposition
|
||||
@ -1024,7 +1030,7 @@ symbolic_tensor_failures = {
|
||||
xfail('matmul', ''), # aten.new_empty.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('matrix_exp', ''), # aten.linalg_matrix_exp.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('max', 'reduction_with_dim'), # aten.max.dim - couldn't find symbolic meta function/decomposition
|
||||
xfail('mean', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
|
||||
xfail('mean', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type promotion!
|
||||
xfail('median', ''), # Could not run 'aten::median' with arguments from the 'Meta' backend. This could be becau...
|
||||
xfail('meshgrid', 'list_of_tensors'), # Tensors of type TensorImpl do not have numel
|
||||
xfail('meshgrid', 'variadic_tensors'), # Tensors of type TensorImpl do not have numel
|
||||
@ -1036,7 +1042,7 @@ symbolic_tensor_failures = {
|
||||
xfail('nanmean', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
|
||||
xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
|
||||
xfail('narrow', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('native_layer_norm', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promot...
|
||||
xfail('native_layer_norm', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type promot...
|
||||
xfail('nn.functional.adaptive_avg_pool1d', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('nn.functional.adaptive_avg_pool2d', ''), # argument 'size' must be tuple of ints, but found element o...
|
||||
xfail('nn.functional.adaptive_avg_pool3d', ''), # aten._adaptive_avg_pool3d.default - couldn't find symbolic meta func...
|
||||
@ -1078,9 +1084,9 @@ symbolic_tensor_failures = {
|
||||
xfail('nn.functional.interpolate', 'linear'), # aten.upsample_linear1d.vec - couldn't find symbolic meta function/dec...
|
||||
xfail('nn.functional.interpolate', 'nearest'), # aten.upsample_nearest1d.vec - couldn't find symbolic meta function/d...
|
||||
xfail('nn.functional.interpolate', 'trilinear'), # aten.upsample_trilinear3d.vec - couldn't find symbolic meta functi...
|
||||
xfail('nn.functional.kl_div', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type pro...
|
||||
xfail('nn.functional.kl_div', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type pro...
|
||||
xfail('nn.functional.l1_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('nn.functional.layer_norm', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type...
|
||||
xfail('nn.functional.layer_norm', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type...
|
||||
xfail('nn.functional.linear', ''), # aten.mv.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('nn.functional.local_response_norm', ''), # Tensors of type TensorImpl do not have numel
|
||||
xfail('nn.functional.margin_ranking_loss', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
|
||||
@ -1098,10 +1104,12 @@ symbolic_tensor_failures = {
|
||||
xfail('nn.functional.pad', 'constant'), # aten.fill.Scalar - couldn't find symbolic meta function/decomposition
|
||||
xfail('nn.functional.pad', 'reflect'), # aten.reflection_pad1d.default - couldn't find symbolic meta function/decompo...
|
||||
xfail('nn.functional.pad', 'replicate'), # aten.replication_pad1d.default - couldn't find symbolic meta function/deco...
|
||||
xfail('nn.functional.pairwise_distance', ''), # TensorImpl does not have numel
|
||||
xfail('nn.functional.pdist', ''), # Could not run 'aten::_pdist_forward' with arguments from the 'Meta' backend...
|
||||
xfail('nn.functional.pixel_shuffle', ''), # aten.pixel_shuffle.default - couldn't find symbolic meta function/decompos...
|
||||
xfail('nn.functional.pixel_unshuffle', ''), # aten.pixel_unshuffle.default - couldn't find symbolic meta function/deco...
|
||||
xfail('nn.functional.poisson_nll_loss', ''), # The underlying op of 'aten.stride' has no overload name '_schema'
|
||||
xfail('nn.functional.prelu', ''), # Tensors of type TensorImpl do not have numel
|
||||
xfail('nn.functional.rrelu', ''), # aten.empty_like.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('nn.functional.smooth_l1_loss', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('nn.functional.soft_margin_loss', ''), # aten.soft_margin_loss.default - couldn't find symbolic meta function/de...
|
||||
@ -1131,6 +1139,7 @@ symbolic_tensor_failures = {
|
||||
xfail('rand_like', ''), # aten.randn_like.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('randint_like', ''), # aten.randint_like.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('randn_like', ''), # aten.randn_like.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('ravel', ''), # RuntimeError: Tensors of type TensorImpl do not have numel
|
||||
xfail('renorm', ''), # aten.renorm.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('repeat', ''), # aten.repeat.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('reshape_as', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
@ -1182,9 +1191,10 @@ symbolic_tensor_failures = {
|
||||
xfail('special.scaled_modified_bessel_k1', ''), # aten.special_scaled_modified_bessel_k1.default - couldn't find symbo...
|
||||
xfail('special.spherical_bessel_j0', ''), # aten.special_spherical_bessel_j0.default - couldn't find symbolic meta fun...
|
||||
xfail('special.xlog1py', ''), # aten.special_xlog1py.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('split', ''), # 'torch._C.SymIntNode' and 'int'
|
||||
xfail('split', ''), # 'torch._C.SymbolicIntNode' and 'int'
|
||||
xfail('split', 'list_args'), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('split_with_sizes', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('stack', ''), # RuntimeError: Tensors of type TensorImpl do not have numel
|
||||
xfail('std', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
|
||||
xfail('std_mean', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
|
||||
xfail('stft', ''), # argument 'size' must be tuple of ints, but found element of type torch._C.SymIntNode at...
|
||||
@ -1203,9 +1213,10 @@ symbolic_tensor_failures = {
|
||||
xfail('tril', ''), # aten.tril.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('triu', ''), # aten.triu.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('unfold', ''), # aten.unfold.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('var_mean', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
|
||||
xfail('var', ''), # Unexpected type <class 'torch.SymIntNode'> when computing elementwise type promotion!
|
||||
xfail('var_mean', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type promotion!
|
||||
xfail('var', ''), # Unexpected type <class 'torch.SymbolicIntNode'> when computing elementwise type promotion!
|
||||
xfail('vdot', ''), # aten.vdot.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('view', ''), # RuntimeError: Tensors of type TensorImpl do not have numel
|
||||
xfail('view_as_complex', ''), # aten.view_as_complex.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('view_as', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
xfail('vsplit', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
|
||||
|
@ -240,12 +240,7 @@ static PyObject * THPVariable_numel(PyObject* self, PyObject* args)
|
||||
if (jit::tracer::isTracing()) {
|
||||
return wrap(jit::tracer::getNumelOf(self_));
|
||||
} else {
|
||||
auto si = self_.sym_numel();
|
||||
if (si.is_symbolic()) {
|
||||
return py::cast(si.toSymIntNodeImpl()).release().ptr();
|
||||
} else {
|
||||
return THPUtils_packInt64(si.as_int_unchecked());
|
||||
}
|
||||
return THPUtils_packInt64(self_.numel());
|
||||
}
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
@ -266,9 +266,6 @@ c10::Layout concrete_layout_fn(
|
||||
c10::SymIntArrayRef concrete_sym_strides_fn(
|
||||
const c10::impl::PyInterpreter*,
|
||||
const c10::TensorImpl* self);
|
||||
c10::SymInt concrete_sym_numel_fn(
|
||||
const c10::impl::PyInterpreter*,
|
||||
const c10::TensorImpl* self);
|
||||
template <const char*, typename... Ts>
|
||||
void concrete_trace_cuda(const c10::impl::PyInterpreter*, Ts...);
|
||||
static constexpr char trace_cuda_event_creation_fn_name[] =
|
||||
@ -301,7 +298,6 @@ class PyInterpreterHolder {
|
||||
&concrete_sizes_fn,
|
||||
&concrete_sym_sizes_fn,
|
||||
&concrete_layout_fn,
|
||||
&concrete_sym_numel_fn,
|
||||
&concrete_sym_strides_fn,
|
||||
c10::impl::GPUTraceFunctionWrapper(
|
||||
&concrete_trace_cuda<trace_cuda_event_creation_fn_name>,
|
||||
@ -2501,33 +2497,6 @@ c10::Layout concrete_layout_fn(
|
||||
return toLayout(out.ptr());
|
||||
}
|
||||
|
||||
c10::SymInt concrete_sym_numel_fn(
|
||||
const c10::impl::PyInterpreter*,
|
||||
const c10::TensorImpl* self) {
|
||||
pybind11::gil_scoped_acquire gil;
|
||||
at::impl::MaybeSetTLSOnEntryGuard guard;
|
||||
auto out = torchDispatchFromTensorImpl(
|
||||
self,
|
||||
"sym_numel",
|
||||
py::module::import("torch")
|
||||
.attr("ops")
|
||||
.attr("aten")
|
||||
.attr("sym_numel")
|
||||
.attr("default")
|
||||
.ptr(),
|
||||
"torch.ops.aten");
|
||||
|
||||
if (out == Py_None) {
|
||||
TORCH_CHECK(
|
||||
!self->has_symbolic_sizes_strides(),
|
||||
"Cannot call numel on a tensor with symbolic shapes/strides");
|
||||
return self->sym_numel_default();
|
||||
}
|
||||
return torch::is_symint_node(out)
|
||||
? out.cast<c10::SymIntNodeImpl*>()->toSymInt()
|
||||
: c10::SymInt{py::cast<int64_t>(out)};
|
||||
}
|
||||
|
||||
template <const char* func_name, typename... Ts>
|
||||
void concrete_trace_cuda(const c10::impl::PyInterpreter*, Ts... args) {
|
||||
pybind11::gil_scoped_acquire gil;
|
||||
|
@ -68,11 +68,6 @@ void sym_size_int(Stack& stack) {
|
||||
push(stack, t.sym_sizes()[dim]);
|
||||
}
|
||||
|
||||
void sym_numel(Stack& stack) {
|
||||
auto t = std::move(pop(stack)).toTensor();
|
||||
push(stack, t.sym_numel());
|
||||
}
|
||||
|
||||
void sym_stride(Stack& stack) {
|
||||
auto t = std::move(pop(stack)).toTensor();
|
||||
pack(stack, t.sym_strides().vec());
|
||||
|
@ -23,8 +23,6 @@ void sym_size(Stack& stack);
|
||||
|
||||
void sym_size_int(Stack& stack);
|
||||
|
||||
void sym_numel(Stack& stack);
|
||||
|
||||
void sym_stride(Stack& stack);
|
||||
|
||||
void device(Stack& stack);
|
||||
|
@ -427,10 +427,6 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs{
|
||||
push(stack, arg.strides());
|
||||
},
|
||||
aliasAnalysisFromSchema()),
|
||||
OperatorGeneratorArgs(
|
||||
TORCH_SELECTIVE_SCHEMA("aten::sym_numel(Tensor self) -> SymInt"),
|
||||
sym_numel,
|
||||
aliasAnalysisFromSchema()),
|
||||
OperatorGeneratorArgs(
|
||||
TORCH_SELECTIVE_SCHEMA("aten::sym_stride(Tensor self) -> SymInt[]"),
|
||||
sym_stride,
|
||||
|
@ -67,9 +67,10 @@ def create_contiguous(shape):
|
||||
strides.append(dim * strides[-1])
|
||||
return list(reversed(strides))
|
||||
|
||||
|
||||
def is_symbolic_op(func):
|
||||
return func in [aten.sym_size.default, aten.dim.default,
|
||||
aten.is_contiguous.default, aten.sym_stride.default, aten.sym_numel.default
|
||||
aten.is_contiguous.default, aten.sym_stride.default
|
||||
]
|
||||
|
||||
def handle_symbolic_op(func, args, kwargs):
|
||||
@ -80,11 +81,6 @@ def handle_symbolic_op(func, args, kwargs):
|
||||
return None
|
||||
if func == torch.ops.aten.dim.default:
|
||||
return len(args[0].shape)
|
||||
if func == torch.ops.aten.sym_numel.default:
|
||||
res = 1
|
||||
for s in args[0].shape:
|
||||
res = res * s
|
||||
return res
|
||||
# TODO: hack, need to make is_contiguous calls symbolic (probably through computing on symbolic strides)
|
||||
if func == torch.ops.aten.is_contiguous.default:
|
||||
return True
|
||||
|
Reference in New Issue
Block a user