[xplat] add static_cast where missing (#76756)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/76756

Add `static_cast` to implicit int -> float conversions.

Test Plan: CI

Reviewed By: yfeldblum

Differential Revision: D35857046

fbshipit-source-id: 0560125fed19e74eff85e22cfab971893515f4dc
(cherry picked from commit 7cd5b2347d0e95938c73e39b20e59e647c74de69)
This commit is contained in:
Richard Howell
2022-05-03 15:53:20 -07:00
committed by PyTorch MergeBot
parent 20e4d6c4dc
commit 3a2fc312be
3 changed files with 9 additions and 9 deletions

View File

@ -770,8 +770,8 @@ inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
c10::optional<ExpandingArray<2>> output_size_ = output_size;
if (output_size_ == c10::nullopt) {
TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
output_size_ = {(int64_t)(input.size(-2) * (*output_ratio.value())[0]),
(int64_t)(input.size(-1) * (*output_ratio.value())[1])};
output_size_ = {(int64_t)(static_cast<double>(input.size(-2)) * (*output_ratio.value())[0]),
(int64_t)(static_cast<double>(input.size(-1)) * (*output_ratio.value())[1])};
}
Tensor _random_samples_ = _random_samples;
@ -849,9 +849,9 @@ inline std::tuple<Tensor, Tensor> fractional_max_pool3d_with_indices(
c10::optional<ExpandingArray<3>> output_size_ = output_size;
if (output_size_ == c10::nullopt) {
TORCH_INTERNAL_ASSERT(output_ratio != c10::nullopt);
output_size_ = {(int64_t)(input.size(-3) * (*output_ratio.value())[0]),
(int64_t)(input.size(-2) * (*output_ratio.value())[1]),
(int64_t)(input.size(-1) * (*output_ratio.value())[2])};
output_size_ = {(int64_t)(static_cast<double>(input.size(-3)) * (*output_ratio.value())[0]),
(int64_t)(static_cast<double>(input.size(-2)) * (*output_ratio.value())[1]),
(int64_t)(static_cast<double>(input.size(-1)) * (*output_ratio.value())[2])};
}
Tensor _random_samples_ = _random_samples;

View File

@ -64,7 +64,7 @@ inline std::vector<int64_t> _interp_output_size(
std::vector<int64_t> ret;
for (const auto i : c10::irange(dim)) {
ret.emplace_back(static_cast<int64_t>(floor(input.size(i + 2) * scale_factors[i])));
ret.emplace_back(static_cast<int64_t>(floor(static_cast<double>(input.size(i + 2)) * scale_factors[i])));
}
return ret;
}

View File

@ -122,15 +122,15 @@ struct TORCH_API LegacyEvent {
double cpuElapsedUs(const LegacyEvent& e) const {
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers)
return (e.cpu_ns_ - cpu_ns_)/(1000.0);
return static_cast<double>(e.cpu_ns_ - cpu_ns_)/(1000.0);
}
void setCpuUs(int64_t cpu_us) {
cpu_ns_ = cpu_us * 1000.0;
cpu_ns_ = static_cast<double>(cpu_us) * 1000.0;
}
double cpuUs() const {
return cpu_ns_ / (1000.0);
return static_cast<double>(cpu_ns_) / (1000.0);
}
double cudaElapsedUs(const LegacyEvent& e) const;