mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Work around buggy use_const_ref_for_mutable_tensors (#145530)
See https://github.com/pytorch/pytorch/issues/145522 for context This doesn't fix the problem with use_const_ref_for_mutable_tensors and the boxed wrapper, instead it just gets all of our out kernels off of this flag so that the mutable matching pattern works correctly. I also add a check in torchgen to prevent people from making this mistake in the future. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/145530 Approved by: https://github.com/albanD, https://github.com/bdhirsh
This commit is contained in:
committed by
PyTorch MergeBot
parent
9d6927715f
commit
bc62930765
@ -626,6 +626,11 @@ class NativeFunction:
|
||||
)
|
||||
assert isinstance(use_const_ref_for_mutable_tensors, bool)
|
||||
|
||||
if use_const_ref_for_mutable_tensors:
|
||||
assert (
|
||||
not func.arguments.out
|
||||
), "see https://github.com/pytorch/pytorch/issues/145522"
|
||||
|
||||
variants_s = e.pop("variants", "function")
|
||||
assert isinstance(variants_s, str)
|
||||
variants: set[Variant] = set()
|
||||
|
Reference in New Issue
Block a user