mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Use data() method when possible as it's safer and more readable (#92755)
Apply clang-tidy readability-data-pointer fixits. This essentially uses the data() method when possible instead of the less readable `&vec[0]` to get the address of the underlying backing implementation. Not only is this more readable, it is safer as it allows you to retrieve the pointer even when the std::vector or std::string is empty without throwing an index error. Pull Request resolved: https://github.com/pytorch/pytorch/pull/92755 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
b847ac227f
commit
582485bf0f
@ -253,7 +253,7 @@ at::Tensor& embedding_bag_nbit_impl(
|
||||
} else {
|
||||
shape_arr[0] = output_size;
|
||||
shape_arr[1] = D;
|
||||
shape = c10::IntArrayRef(&shape_arr[0], 2);
|
||||
shape = c10::IntArrayRef(shape_arr.data(), 2);
|
||||
}
|
||||
at::native::resize_(output, shape, c10::nullopt);
|
||||
}
|
||||
@ -423,7 +423,7 @@ at::Tensor& embedding_bag_byte_impl(
|
||||
} else {
|
||||
shape_arr[0] = output_size;
|
||||
shape_arr[1] = D;
|
||||
shape = c10::IntArrayRef(&shape_arr[0], 2);
|
||||
shape = c10::IntArrayRef(shape_arr.data(), 2);
|
||||
}
|
||||
at::native::resize_(output, shape, c10::nullopt);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ size_t ReplaceAll(std::string& s, c10::string_view from, c10::string_view to) {
|
||||
if (from.size() >= to.size()) {
|
||||
// If the replacement string is not larger than the original, we
|
||||
// can do the replacement in-place without allocating new storage.
|
||||
char* s_data = &s[0];
|
||||
char* s_data = s.data();
|
||||
|
||||
while ((cur_pos = s.find(from.data(), last_pos, from.size())) !=
|
||||
std::string::npos) {
|
||||
|
@ -117,8 +117,8 @@ extern "C"
|
||||
"_get_module_info_from_flatbuffer", [](std::string flatbuffer_content) {
|
||||
py::gil_scoped_acquire acquire;
|
||||
py::dict result;
|
||||
mobile::ModuleInfo minfo =
|
||||
torch::jit::get_module_info_from_flatbuffer(&flatbuffer_content[0]);
|
||||
mobile::ModuleInfo minfo = torch::jit::get_module_info_from_flatbuffer(
|
||||
flatbuffer_content.data());
|
||||
result["bytecode_version"] = minfo.bytecode_version;
|
||||
result["operator_version"] = minfo.operator_version;
|
||||
result["function_names"] = minfo.function_names;
|
||||
|
@ -1005,7 +1005,7 @@ std::string Unpickler::readBytes(size_t length) {
|
||||
// If the string is smallish, do a full buffer read,
|
||||
// and read out of that buffer.
|
||||
data.resize(length);
|
||||
readSlowWithBuffer(&data[0], length);
|
||||
readSlowWithBuffer(data.data(), length);
|
||||
} else {
|
||||
// Otherwise, for larger strings, read what we can from
|
||||
// the buffer, and then read directly to the destination.
|
||||
|
@ -434,9 +434,9 @@ ArgValue TensorExprKernel::toArg(const torch::jit::Value* v) const {
|
||||
}
|
||||
if (vec.size() == 0) {
|
||||
return BufList(); // Return arbitrarily typed vector
|
||||
} else if (c10::get_if<BufHandle>(&vec[0])) {
|
||||
} else if (c10::get_if<BufHandle>(vec.data())) {
|
||||
return convertVecArgValue<BufHandle>(vec);
|
||||
} else if (c10::get_if<int64_t>(&vec[0])) {
|
||||
} else if (c10::get_if<int64_t>(vec.data())) {
|
||||
return convertVecArgValue<int64_t>(vec);
|
||||
}
|
||||
throw unsupported_dtype();
|
||||
|
Reference in New Issue
Block a user