mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-28 10:34:54 +08:00
Revert "Reland PySymInt (#79617)"
This reverts commit 8ef6356f267c75276ea23b51163274cd5fffc0ce. Reverted https://github.com/pytorch/pytorch/pull/79617 on behalf of https://github.com/zengk95 due to this is breaking periodic jobs (and maybe pull) on trunk
This commit is contained in:
@ -95,43 +95,6 @@ static PyObject * THPVariable_apply_(PyObject* self, PyObject* arg)
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
// TODO: FIXME This should be super temprorary until we fix the XLA issue.
|
||||
static PyObject * THPVariable_sym_size(PyObject* self, PyObject* args, PyObject* kwargs)
|
||||
{
|
||||
HANDLE_TH_ERRORS
|
||||
static PythonArgParser parser({
|
||||
"sym_size(int64_t dim)",
|
||||
"sym_size()",
|
||||
"sym_size(Dimname dim)",
|
||||
});
|
||||
auto& self_ = THPVariable_Unpack(self);
|
||||
ParsedArgs<3> parsed_args;
|
||||
auto r = parser.parse(self, args, kwargs, parsed_args);
|
||||
|
||||
if(r.has_torch_function()){
|
||||
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
||||
}
|
||||
if (r.idx == 0) {
|
||||
if (jit::tracer::isTracing()) {
|
||||
// will error out if a tensor has symints
|
||||
return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
|
||||
} else {
|
||||
return torch::toPyObject(self_.sym_size(r.toInt64(0)));
|
||||
}
|
||||
} else if (r.idx == 1) {
|
||||
return THPSize_NewFromSymSizes(self_);
|
||||
}
|
||||
else if (r.idx == 2) {
|
||||
if (jit::tracer::isTracing()) {
|
||||
TORCH_INTERNAL_ASSERT(false, "NYI: Named tensors w/ JIT");
|
||||
}
|
||||
return wrap(self_.size(r.dimname(0)));
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
|
||||
|
||||
static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwargs)
|
||||
{
|
||||
HANDLE_TH_ERRORS
|
||||
@ -147,19 +110,17 @@ static PyObject * THPVariable_size(PyObject* self, PyObject* args, PyObject* kwa
|
||||
if(r.has_torch_function()){
|
||||
return handle_torch_function(r, self, args, kwargs, THPVariableClass, "torch.Tensor");
|
||||
}
|
||||
|
||||
if (r.idx == 0) {
|
||||
if (jit::tracer::isTracing()) {
|
||||
// will error out if a tensor has symints
|
||||
return wrap(jit::tracer::getSizeOf(self_, r.toInt64(0)));
|
||||
} else {
|
||||
return wrap(self_.size(r.toInt64(0)));
|
||||
//return torch::toPyObject(self_.sym_size(r.toInt64(0)));
|
||||
}
|
||||
} else if (r.idx == 1) {
|
||||
// we can't do the normal wrapping here because IntArrayRef maps to both
|
||||
// torch.Size and tuple in python.
|
||||
return THPSize_New(self_);
|
||||
//return THPSize_NewFromSymSizes(self_);
|
||||
}
|
||||
else if (r.idx == 2) {
|
||||
if (jit::tracer::isTracing()) {
|
||||
@ -1322,7 +1283,6 @@ PyMethodDef variable_methods[] = {
|
||||
{"set_", castPyCFunctionWithKeywords(THPVariable_set_), METH_VARARGS | METH_KEYWORDS, NULL},
|
||||
{"short", castPyCFunctionWithKeywords(THPVariable_short), METH_VARARGS | METH_KEYWORDS, NULL},
|
||||
{"size", castPyCFunctionWithKeywords(THPVariable_size), METH_VARARGS | METH_KEYWORDS, NULL},
|
||||
{"sym_size", castPyCFunctionWithKeywords(THPVariable_sym_size), METH_VARARGS | METH_KEYWORDS, NULL},
|
||||
{"_storage", THPVariable_storage, METH_NOARGS, NULL},
|
||||
{"storage_offset", THPVariable_storage_offset, METH_NOARGS, NULL},
|
||||
{"stride", castPyCFunctionWithKeywords(THPVariable_stride), METH_VARARGS | METH_KEYWORDS, NULL},
|
||||
|
||||
Reference in New Issue
Block a user