mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Fix compilation warning and spurious print (#87297)
Fixes compilation warning, make this warning an error and remove a random print. Pull Request resolved: https://github.com/pytorch/pytorch/pull/87297 Approved by: https://github.com/malfet
This commit is contained in:
@ -826,6 +826,7 @@ if(NOT MSVC)
|
||||
append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-missing-field-initializers" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-type-limits" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-array-bounds" CMAKE_CXX_FLAGS)
|
||||
|
@ -133,7 +133,7 @@ struct CaptureList {
|
||||
auto tensors = val.toTensorList();
|
||||
sizes_.push_back(tensors.size());
|
||||
|
||||
for (const at::Tensor tensor : tensors) {
|
||||
for (const auto& tensor : tensors) {
|
||||
captureTensor(tensor, is_output);
|
||||
}
|
||||
} else {
|
||||
@ -326,7 +326,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
|
||||
void addOutputForIValue(const IValue& value) {
|
||||
if (value.isTensorList()) {
|
||||
input_tensor_lists_.insert({index_, value.toTensorList().size()});
|
||||
for (const at::Tensor tensor : value.toTensorList()) {
|
||||
for (const at::Tensor& tensor : value.toTensorList()) {
|
||||
addOutputForTensor(tensor);
|
||||
index_++;
|
||||
}
|
||||
@ -357,7 +357,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
|
||||
if (v.isTensorList()) {
|
||||
auto tensors = v.toTensorList();
|
||||
input_instructions_.pushTensorList(tensors.size());
|
||||
for (const at::Tensor tensor : tensors) {
|
||||
for (const at::Tensor& tensor : tensors) {
|
||||
addInputVariable(tensor);
|
||||
}
|
||||
} else if (v.isTensor()) {
|
||||
|
@ -129,7 +129,7 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) {
|
||||
auto elem_type = w.type->containedType(0);
|
||||
auto lst = w.value.toList();
|
||||
lst.unsafeSetElementType(elem_type);
|
||||
for (const IValue item : lst) {
|
||||
for (const IValue& item : lst) {
|
||||
Work elem = {elem_type, item};
|
||||
to_process.emplace_back(std::move(elem));
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ std::list<std::pair<at::RecordFunctionHandle, int>> flattenOpIdList(
|
||||
std::list<std::pair<at::RecordFunctionHandle, int>> input_op_id_list;
|
||||
auto state_ptr = NVTXThreadLocalState::getTLS();
|
||||
TORCH_INTERNAL_ASSERT(state_ptr, "Expected profiler state set");
|
||||
for (const c10::IValue input : list) {
|
||||
for (const c10::IValue& input : list) {
|
||||
if (input.isTensor()) {
|
||||
const at::Tensor& tensor = input.toTensor();
|
||||
auto producer_op_pair = state_ptr->getOpIdFromInput(tensor);
|
||||
|
@ -198,7 +198,7 @@ std::vector<std::vector<int64_t>> flattenList(
|
||||
c10::List<c10::IValue> list,
|
||||
std::string fn_name) {
|
||||
std::vector<std::vector<int64_t>> tensor_dims;
|
||||
for (const c10::IValue input : list) {
|
||||
for (const c10::IValue& input : list) {
|
||||
if (input.isTensor()) {
|
||||
const at::Tensor& tensor = input.toTensor();
|
||||
if (tensor.defined()) {
|
||||
|
@ -231,5 +231,4 @@ def main() -> None:
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Generating Python interface file 'datapipe.pyi'...")
|
||||
main()
|
||||
|
Reference in New Issue
Block a user