Compare commits

...

2 Commits

Author SHA1 Message Date
cyy
50211f1c49 Remove unneeded pointer copy 2025-10-20 02:52:42 +00:00
cyy
0d003be802 Avoid const_cast and other cleanup 2025-10-20 02:52:42 +00:00
2 changed files with 14 additions and 32 deletions

View File

@ -77,9 +77,7 @@ static void reduced_float_copy_kernel(TensorIteratorBase &iter, bool requires_ne
int64_t grain_size = at::internal::GRAIN_SIZE;
auto loop = [strides_in, requires_neg](char** base, const int64_t* strides, int64_t size0, int64_t size1) {
std::array<char*, 2> data;
std::copy_n(base, 2, data.data());
auto loop = [strides_in, requires_neg](char** data, const int64_t* strides, int64_t size0, int64_t size1) {
const int64_t *outer_strides = &strides[2];
for ([[maybe_unused]] const auto it : c10::irange(size1)) {
@ -146,9 +144,7 @@ static void reduced_float_copy_kernel(TensorIteratorBase &iter, bool requires_ne
int64_t grain_size = at::internal::GRAIN_SIZE;
auto loop = [strides_in, requires_neg](char** base, const int64_t* strides, int64_t size0, int64_t size1) {
std::array<char*, 2> data;
std::copy_n(base, 2, data.data());
auto loop = [strides_in, requires_neg](char** data, const int64_t* strides, int64_t size0, int64_t size1) {
const int64_t *outer_strides = &strides[2];
for ([[maybe_unused]] const auto it : c10::irange(size1)) {

View File

@ -493,40 +493,33 @@ void cpu_hflip_vec(at::TensorIterator& iter) {
for ([[maybe_unused]] const auto j : c10::irange(size1)) {
// vectorized loop with negative stride for output
char** C10_RESTRICT data_ = data_arr.data();
int64_t n = size0;
char* C10_RESTRICT data[ntensors];
for (const auto arg : c10::irange(ntensors)) {
data[arg] = data_[arg];
}
int64_t i = 0;
// data[0] unaligned pre-pass
// data_arr[0] unaligned pre-pass
int64_t offset = (j * n + (n - i - Vec::size())) % 32;
offset = (offset >= n) ? n : offset;
for (; i < offset; i++) {
scalar_t* out_ptr = (scalar_t*)(data[0] - i * stride);
*out_ptr = c10::load((scalar_t *)(data[1] + i * stride));
scalar_t* out_ptr = (scalar_t*)(data_arr[0] - i * stride);
*out_ptr = c10::load((scalar_t *)(data_arr[1] + i * stride));
}
// Empirically found that it is faster to process 3 data items together vs 2 or 4
for (; i <= n - 3 * Vec::size(); i += 3 * Vec::size()) {
auto out1 = Vec::loadu(data[1] + i * stride);
auto out2 = Vec::loadu(data[1] + (i + Vec::size()) * stride);
auto out3 = Vec::loadu(data[1] + (i + 2 * Vec::size()) * stride);
auto out1 = Vec::loadu(data_arr[1] + i * stride);
auto out2 = Vec::loadu(data_arr[1] + (i + Vec::size()) * stride);
auto out3 = Vec::loadu(data_arr[1] + (i + 2 * Vec::size()) * stride);
// flip the vector: 1234 -> 4321
out1 = flip(out1);
out2 = flip(out2);
out3 = flip(out3);
out1.store(data[0] - (i + Vec::size() - 1) * stride);
out2.store(data[0] - (i + 2 * Vec::size() - 1) * stride);
out3.store(data[0] - (i + 3 * Vec::size() - 1) * stride);
out1.store(data_arr[0] - (i + Vec::size() - 1) * stride);
out2.store(data_arr[0] - (i + 2 * Vec::size() - 1) * stride);
out3.store(data_arr[0] - (i + 3 * Vec::size() - 1) * stride);
}
if (i < n) {
for (; i < n; i++) {
scalar_t* out_ptr = (scalar_t*)(data[0] - i * stride);
*out_ptr = c10::load((scalar_t *)(data[1] + i * stride));
scalar_t* out_ptr = (scalar_t*)(data_arr[0] - i * stride);
*out_ptr = c10::load((scalar_t *)(data_arr[1] + i * stride));
}
}
@ -560,15 +553,8 @@ void cpu_vflip_memcpy(at::TensorIterator& iter) {
const int64_t stride = strides[0];
for ([[maybe_unused]] const auto j : c10::irange(size1)) {
char** C10_RESTRICT data_ = data_arr.data();
int64_t n = size0;
char* C10_RESTRICT data[ntensors];
for (const auto arg : c10::irange(ntensors)) {
data[arg] = data_[arg];
}
memcpy(data[0], data[1], n * stride);
memcpy(data_arr[0], data_arr[1], n * stride);
// advance:
for (const auto arg : c10::irange(data_arr.size())) {