mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Enable move warnings for torch targets (#149923)
This PR enables more move warnings for torch targets and fixes some code. Pull Request resolved: https://github.com/pytorch/pytorch/pull/149923 Approved by: https://github.com/malfet
This commit is contained in:
@ -1084,7 +1084,6 @@ if(NOT MSVC)
|
||||
if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 13)
|
||||
append_cxx_flag_if_supported("-Wno-dangling-reference" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-error=dangling-reference" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-error=redundant-move" CMAKE_CXX_FLAGS)
|
||||
endif()
|
||||
else()
|
||||
# Define export functions for AOTI.
|
||||
|
@ -508,4 +508,14 @@ __host__ __device__
|
||||
|
||||
#endif
|
||||
|
||||
// This macro is used to find older C++ compilers
|
||||
// that don't support move optimization for return values.
|
||||
|
||||
#if (defined(__GNUC__) && __GNUC__ < 13) || \
|
||||
(defined(__clang_major__) && __clang_major__ < 13)
|
||||
#define C10_RETURN_MOVE_IF_OLD_COMPILER 1
|
||||
#else
|
||||
#define C10_RETURN_MOVE_IF_OLD_COMPILER 0
|
||||
#endif
|
||||
|
||||
#endif // C10_MACROS_MACROS_H_
|
||||
|
@ -393,10 +393,10 @@ function(torch_compile_options libname)
|
||||
list(APPEND private_compile_options -Wunused-function)
|
||||
list(APPEND private_compile_options -Wunused-variable)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND private_compile_options -Wunused-but-set-variable)
|
||||
list(APPEND private_compile_options -Wunused-but-set-variable -Wredundant-move)
|
||||
endif()
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi)
|
||||
list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi -Wmove)
|
||||
else()
|
||||
list(APPEND private_compile_options
|
||||
# Considered to be flaky. See the discussion at
|
||||
|
@ -641,7 +641,11 @@ py::object toPyObject(IValue ivalue) {
|
||||
for (const auto i : c10::irange(list.size())) {
|
||||
t[i] = toPyObject(IValue{list.get(i)});
|
||||
}
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(t);
|
||||
#else
|
||||
return t;
|
||||
#endif
|
||||
} else if (ivalue.isTuple()) {
|
||||
auto tuple = std::move(ivalue).toTuple();
|
||||
const auto& elements = tuple->elements();
|
||||
@ -676,7 +680,11 @@ py::object toPyObject(IValue ivalue) {
|
||||
.attr("_create_named_tuple")(
|
||||
t, unqualName, fieldNames, py::make_tuple(defaults));
|
||||
} else {
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(t);
|
||||
#else
|
||||
return t;
|
||||
#endif
|
||||
}
|
||||
} else if (ivalue.isDevice()) {
|
||||
return py::cast(std::move(ivalue).toDevice());
|
||||
@ -689,7 +697,11 @@ py::object toPyObject(IValue ivalue) {
|
||||
py_dict[toPyObject(IValue{pair.key()})] =
|
||||
toPyObject(IValue{pair.value()});
|
||||
}
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(py_dict);
|
||||
#else
|
||||
return py_dict;
|
||||
#endif
|
||||
} else if (ivalue.isRRef()) {
|
||||
#ifdef USE_RPC
|
||||
auto RRefPtr =
|
||||
|
@ -117,7 +117,11 @@ py::object cast_sequence(std::vector<py::object> objs) {
|
||||
for (const auto i : c10::irange(num_objs)) {
|
||||
sequence[i] = std::move(objs[i]);
|
||||
}
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(sequence);
|
||||
#else
|
||||
return sequence;
|
||||
#endif
|
||||
}
|
||||
|
||||
py::object cast_dict(std::vector<py::object> objs) {
|
||||
@ -127,7 +131,11 @@ py::object cast_dict(std::vector<py::object> objs) {
|
||||
py::tuple obj = py::reinterpret_borrow<py::tuple>(objs[i]);
|
||||
sequence[obj[0]] = obj[1];
|
||||
}
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(sequence);
|
||||
#else
|
||||
return sequence;
|
||||
#endif
|
||||
}
|
||||
|
||||
py::object unflatten_rec(
|
||||
|
@ -483,7 +483,11 @@ ExprHandle TensorExprKernel::getVarForShape(const c10::ShapeSymbol& ss) {
|
||||
if (it == shapeSymbolToVar_.end()) {
|
||||
VarHandle var("ss" + std::to_string(-value), kLong);
|
||||
shapeSymbolToVar_.emplace(value, var);
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(var);
|
||||
#else
|
||||
return var;
|
||||
#endif
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
@ -1020,7 +1024,11 @@ ExprHandle TensorExprKernel::getStrideArg(
|
||||
kLong);
|
||||
strideArgToVar_[std::pair<size_t, size_t>(
|
||||
tensor_input_index, stride_index)] = var;
|
||||
#if C10_RETURN_MOVE_IF_OLD_COMPILER
|
||||
return std::move(var);
|
||||
#else
|
||||
return var;
|
||||
#endif
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
Reference in New Issue
Block a user