Fix self assignment (#165816)

This PR removes assignments of the form `var=var`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165816
Approved by: https://github.com/jansel
This commit is contained in:
Yuanyuan Chen
2025-10-18 18:51:49 +00:00
committed by PyTorch MergeBot
parent 032bed95cd
commit 1f43d17ce6
8 changed files with 5 additions and 20 deletions

View File

@ -293,7 +293,7 @@ def vmap_impl(func, in_dims, out_dims, randomness, chunk_size, *args, **kwargs):
def get_chunk_sizes(total_elems, chunk_size):
n_chunks = n_chunks = total_elems // chunk_size
n_chunks = total_elems // chunk_size
chunk_sizes = [chunk_size] * n_chunks
# remainder chunk
remainder = total_elems % chunk_size

View File

@ -108,14 +108,10 @@ def efficient_conv_bn_eval_decomposed(
else:
bias_on_the_fly = torch.zeros_like(bn_running_var)
if bn_weight is not None:
bn_weight = bn_weight
else:
if bn_weight is None:
bn_weight = torch.ones_like(bn_running_var)
if bn_bias is not None:
bn_bias = bn_bias
else:
if bn_bias is None:
bn_bias = torch.zeros_like(bn_running_var)
# shape of [C_out, 1, 1, 1] in Conv2d

View File

@ -477,7 +477,6 @@ def extract_normalized_read_writes(
(norm_pw_vars, norm_red_vars), ranges = index_vars_no_squeeze(
pw_splits, red_splits, prefix="n"
)
node = node
for n in list(node.get_nodes()):
if not isinstance(n, torch._inductor.scheduler.SchedulerNode):

View File

@ -760,7 +760,6 @@ def get_fused_kernel_name(
]
else:
raise NotImplementedError
sources = sources
return "_".join(["fused"] + sources)

View File

@ -408,8 +408,6 @@ def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"):
if int_dtype in ["numpy", "pytorch"]:
int_dtype = torch.int64
else:
int_dtype = int_dtype
new_defaults = _dtypes_impl.DefaultDTypes(
float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype

View File

@ -447,9 +447,7 @@ def _prim_elementwise_meta(
# (but getting it wrong will cause too many casts to be inserted in traces!)
if device is not None:
assert dtype is not None
if type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT:
dtype = dtype
elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
if type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
dtype = torch.bool
elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
if utils.is_integer_dtype(dtype) or utils.is_boolean_dtype(dtype):
@ -457,8 +455,6 @@ def _prim_elementwise_meta(
elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
if utils.is_complex_dtype(dtype):
dtype = utils.corresponding_real_dtype(dtype)
else:
dtype = dtype
assert shape is not None
return torch.empty_permuted(shape, l2p_perm, device=device, dtype=dtype) # type: ignore[return-value]

View File

@ -103,9 +103,6 @@ def _reparametrize_module(
strict: bool = False,
stack_weights: bool = False,
):
parameters_and_buffers = parameters_and_buffers
stack_weights = stack_weights
if tie_weights:
untied_parameters_and_buffers = _untie_named_tensors_map(
module, parameters_and_buffers

View File

@ -85,7 +85,7 @@ class RRefAPITest:
):
rref_local_value(rref)
ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
self.assertEqual(ret, torch.add(torch.ones(2, 2), 1))
@dist_init