mirror of
https://github.com/uxlfoundation/oneDNN.git
synced 2025-10-20 18:43:49 +08:00
222 lines
7.7 KiB
C++
222 lines
7.7 KiB
C++
/*******************************************************************************
|
|
* Copyright 2018-2025 Intel Corporation
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*******************************************************************************/
|
|
|
|
#include <stddef.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
|
|
#include "oneapi/dnnl/dnnl.h"
|
|
|
|
#include "utils/parallel.hpp"
|
|
|
|
#include "dnnl_common.hpp"
|
|
#include "dnnl_memory.hpp"
|
|
|
|
#include "shuffle/shuffle.hpp"
|
|
|
|
namespace shuffle {
|
|
|
|
int fill_src(const prb_t *prb, dnn_mem_t &mem_dt, dnn_mem_t &mem_fp) {
|
|
const auto nelems = mem_fp.nelems();
|
|
if (nelems == 0) return OK;
|
|
|
|
// Refer to modes documentation for filling principles.
|
|
if (has_bench_mode_bit(mode_bit_t::bitwise)) {
|
|
return fill_random_real(mem_dt, mem_fp, nullptr);
|
|
}
|
|
if (has_bench_mode_bit(mode_bit_t::perf)) {
|
|
return fill_random_real(
|
|
mem_dt, mem_fp, nullptr, get_perf_fill_cfg(mem_dt.dt()));
|
|
}
|
|
|
|
auto get_range = [](const dnnl_data_type_t dt) {
|
|
if (dt == dnnl_s8 || dt == dnnl_u8)
|
|
return 256;
|
|
else if (dt == dnnl_bf16 || dt == dnnl_f16)
|
|
return 128;
|
|
return 1024;
|
|
};
|
|
|
|
const int range = get_range(prb->dt);
|
|
const int f_min = prb->dt == dnnl_u8 ? 0 : -range / 2;
|
|
|
|
benchdnn_parallel_nd(nelems, [&](int64_t i) {
|
|
const float gen = ((97 * i) + 101) % range;
|
|
const float value = (prb->dt == dnnl_bf16 || prb->dt == dnnl_f16)
|
|
? (f_min + gen) / range
|
|
: (f_min + gen) * (1.0f + 4.0f / range);
|
|
mem_fp.set_f32_elem(i, round_to_nearest_representable(prb->dt, value));
|
|
});
|
|
|
|
SAFE(mem_dt.reorder(mem_fp), WARN);
|
|
|
|
return OK;
|
|
}
|
|
|
|
dnnl_status_t init_pd(init_pd_args_t<prb_t> &init_pd_args) {
|
|
const prb_t *prb = init_pd_args.prb;
|
|
res_t *res = init_pd_args.res;
|
|
bool force_f32_dt = init_pd_args.force_f32_dt;
|
|
|
|
auto dnnl_attr = make_benchdnn_dnnl_wrapper(
|
|
create_dnnl_attr(prb->attr, attr_args_t()));
|
|
|
|
if (prb->dir & FLAG_FWD) {
|
|
auto src_d = dnn_mem_t::init_md(prb->ndims, prb->dims.data(),
|
|
force_f32_dt ? dnnl_f32 : prb->dt, prb->tag);
|
|
auto dst_d = dnn_mem_t::init_md(prb->ndims, prb->dims.data(),
|
|
force_f32_dt ? dnnl_f32 : prb->dt, tag::any);
|
|
|
|
auto prop_kind = prb->dir & FLAG_INF ? dnnl_forward_inference
|
|
: dnnl_forward_training;
|
|
TIME_C_PD(DNN_SAFE_STATUS(dnnl_shuffle_forward_primitive_desc_create(
|
|
&init_pd_args.pd, init_pd_args.engine, prop_kind,
|
|
init_pd_args.src_md ? init_pd_args.src_md : src_d, dst_d,
|
|
prb->axis, prb->group, dnnl_attr)));
|
|
} else {
|
|
auto diff_src_d = dnn_mem_t::init_md(prb->ndims, prb->dims.data(),
|
|
force_f32_dt ? dnnl_f32 : prb->dt, tag::any);
|
|
auto diff_dst_d = dnn_mem_t::init_md(prb->ndims, prb->dims.data(),
|
|
force_f32_dt ? dnnl_f32 : prb->dt, tag::any);
|
|
|
|
TIME_C_PD(DNN_SAFE_STATUS(dnnl_shuffle_backward_primitive_desc_create(
|
|
&init_pd_args.pd, init_pd_args.engine, diff_src_d, diff_dst_d,
|
|
prb->axis, prb->group, init_pd_args.hint, dnnl_attr)));
|
|
}
|
|
return dnnl_success;
|
|
}
|
|
|
|
void skip_unimplemented_prb(const prb_t *prb, res_t *res) {
|
|
skip_unimplemented_data_type({prb->dt}, prb->dir, res);
|
|
skip_unimplemented_sum_po(prb->attr, res, dnnl_shuffle, prb->dt);
|
|
skip_unimplemented_binary_po(prb->attr, res);
|
|
skip_unimplemented_prelu_po(prb->attr, res, dnnl_shuffle);
|
|
}
|
|
|
|
void skip_invalid_prb(const prb_t *prb, res_t *res) {}
|
|
|
|
void setup_cmp(compare::compare_t &cmp, const prb_t *prb, data_kind_t kind,
|
|
const args_t &ref_args) {}
|
|
|
|
std::vector<int> supported_exec_args(dir_t dir) {
|
|
static const std::vector<int> exec_fwd_args = {
|
|
DNNL_ARG_SRC,
|
|
DNNL_ARG_DST,
|
|
};
|
|
static const std::vector<int> exec_bwd_args = {
|
|
DNNL_ARG_DIFF_SRC,
|
|
DNNL_ARG_DIFF_DST,
|
|
};
|
|
return (dir & FLAG_FWD) ? exec_fwd_args : exec_bwd_args;
|
|
};
|
|
|
|
int init_ref_memory_args(dnn_mem_map_t &ref_mem_map, dnn_mem_map_t &mem_map,
|
|
dnnl_primitive_t prim, const prb_t *prb, res_t *res,
|
|
dnnl_primitive_t prim_ref) {
|
|
if (has_bench_mode_modifier(mode_modifier_t::no_ref_memory)) return OK;
|
|
|
|
const auto &ref_engine = get_cpu_engine();
|
|
|
|
for (auto &entry : mem_map) {
|
|
const int exec_arg = entry.first;
|
|
// The function targets regular exec_args that are positive.
|
|
// Negative args are used by bitwise and are broken in the `default`
|
|
// branch due to `&` always returns `true`.
|
|
if (exec_arg <= 0) continue;
|
|
|
|
auto &mem = entry.second; // `mem` is modified by filler (reorder).
|
|
|
|
// Scratchpad memory relates to a primitive. If reference needs it,
|
|
// use switch below to define a memory desc for it.
|
|
if (exec_arg != DNNL_ARG_SCRATCHPAD) {
|
|
ref_mem_map.emplace(exec_arg,
|
|
dnn_mem_t(mem.md_, dnnl_f32, tag::abx, ref_engine,
|
|
/* prefill = */ false));
|
|
}
|
|
auto &ref_mem = ref_mem_map[exec_arg];
|
|
|
|
switch (exec_arg) {
|
|
case DNNL_ARG_SRC: SAFE(fill_src(prb, mem, ref_mem), WARN); break;
|
|
case DNNL_ARG_DIFF_DST:
|
|
SAFE(fill_src(prb, mem, ref_mem), WARN);
|
|
break;
|
|
default: break;
|
|
}
|
|
// Don't keep reference memory if it is not used further.
|
|
if (!has_bench_mode_bit(mode_bit_t::corr)) ref_mem_map.clear();
|
|
}
|
|
|
|
return OK;
|
|
}
|
|
|
|
std::vector<data_kind_t> get_kinds_to_check(const prb_t *prb) {
|
|
std::vector<data_kind_t> check_kinds;
|
|
if (prb->dir & FLAG_FWD) {
|
|
check_kinds = {DST};
|
|
} else if (prb->dir & FLAG_BWD) {
|
|
check_kinds = {SRC};
|
|
} else {
|
|
assert(!"unexpected!");
|
|
SAFE_V(FAIL);
|
|
}
|
|
assert(!check_kinds.empty());
|
|
return check_kinds;
|
|
}
|
|
|
|
int createit(std::vector<benchdnn_dnnl_wrapper_t<dnnl_primitive_t>> &v_prim,
|
|
const prb_t *prb, res_t *res) {
|
|
v_prim.resize(1);
|
|
SAFE(init_prim(prb->ctx_init, v_prim[0], init_pd, prb, res), WARN);
|
|
return OK;
|
|
}
|
|
|
|
int checkit(std::vector<benchdnn_dnnl_wrapper_t<dnnl_primitive_t>> &v_prim,
|
|
const prb_t *prb, res_t *res) {
|
|
if (has_bench_mode_bit(mode_bit_t::exec)) {
|
|
SAFE(check_total_size(res), WARN);
|
|
}
|
|
if (has_bench_mode_bit(mode_bit_t::corr)) {
|
|
SAFE(check_caches(v_prim[0], prb, res), WARN);
|
|
}
|
|
return OK;
|
|
}
|
|
|
|
int doit(const std::vector<benchdnn_dnnl_wrapper_t<dnnl_primitive_t>> &v_prim,
|
|
const prb_t *prb, res_t *res) {
|
|
set_zmalloc_max_expected_size(res->mem_size_args.zmalloc_expected_size);
|
|
|
|
const auto &prim = v_prim[0];
|
|
|
|
dnn_mem_map_t mem_map, ref_mem_map;
|
|
init_memory_args<prb_t>(mem_map, prb, prim, supported_exec_args(prb->dir));
|
|
TIME_FILL(SAFE(
|
|
init_ref_memory_args(ref_mem_map, mem_map, prim, prb, res), WARN));
|
|
|
|
args_t args(mem_map), ref_args(ref_mem_map);
|
|
|
|
SAFE(execute_and_wait(prim, args, res), WARN);
|
|
|
|
check_correctness(prb, get_kinds_to_check(prb), args, ref_args, setup_cmp,
|
|
res, prb->dir);
|
|
SAFE(check_bitwise(prim, get_kinds_to_check(prb), args, prb->attr,
|
|
prb->inplace, res),
|
|
WARN);
|
|
|
|
return measure_perf(prb->ctx_exe, res, prim, args);
|
|
}
|
|
|
|
} // namespace shuffle
|