mirror of
https://github.com/uxlfoundation/oneDNN.git
synced 2025-10-20 10:03:50 +08:00
doc: fixed brief description duplication on example pages
This commit is contained in:
committed by
Vadim Pirogov
parent
1ab74cfe80
commit
9acb506a9e
@ -183,7 +183,7 @@ SHORT_NAMES = NO
|
||||
# description.)
|
||||
# The default value is: NO.
|
||||
|
||||
JAVADOC_AUTOBRIEF = YES
|
||||
JAVADOC_AUTOBRIEF = NO
|
||||
|
||||
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
|
||||
# line (until the first dot) of a Qt-style comment as the brief description. If
|
||||
|
@ -15,12 +15,8 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example bnorm_u8_via_binary_postops.cpp
|
||||
/// @copybrief bnorm_u8_via_binary_postops_cpp
|
||||
/// > Annotated version: @ref bnorm_u8_via_binary_postops_cpp
|
||||
///
|
||||
/// @page bnorm_u8_via_binary_postops_cpp_short
|
||||
/// Bnorm u8 via binary postops example.
|
||||
///
|
||||
|
||||
/// @page bnorm_u8_via_binary_postops_cpp Bnorm u8 by binary post-ops example
|
||||
/// The example implements the Batch normalization u8 via the following
|
||||
/// operations: binary_sub(src, mean), binary_div(tmp_dst, variance),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2016-2022 Intel Corporation
|
||||
* Copyright 2016-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,7 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cnn_inference_f32.c
|
||||
/// @copybrief cnn_inference_f32_c
|
||||
/// > Annotated version: @ref cnn_inference_f32_c
|
||||
|
||||
/// @page cnn_inference_f32_c CNN f32 inference example
|
||||
/// This C API example demonstrates how to build an AlexNet neural
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cnn_inference_f32.cpp
|
||||
/// @copybrief cnn_inference_f32_cpp
|
||||
/// > Annotated version: @ref cnn_inference_f32_cpp
|
||||
|
||||
/// @page cnn_inference_f32_cpp CNN f32 inference example
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cnn_inference_int8.cpp
|
||||
/// @copybrief cnn_inference_int8_cpp
|
||||
/// > Annotated version: @ref cnn_inference_int8_cpp
|
||||
|
||||
/// @page cnn_inference_int8_cpp CNN int8 inference example
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cnn_training_bf16.cpp
|
||||
/// @copybrief cnn_training_bf16_cpp
|
||||
/// > Annotated version: @ref cnn_training_bf16_cpp
|
||||
///
|
||||
/// @page cnn_training_bf16_cpp CNN bf16 training example
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cnn_training_f32.cpp
|
||||
/// @copybrief cnn_training_f32_cpp
|
||||
/// > Annotated version: @ref cnn_training_f32_cpp
|
||||
///
|
||||
/// @page cnn_training_f32_cpp CNN f32 training example
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2016-2022 Intel Corporation
|
||||
* Copyright 2016-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,7 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cpu_cnn_training_f32.c
|
||||
/// @copybrief cpu_cnn_training_f32_c
|
||||
/// > Annotated version: @ref cpu_cnn_training_f32_c
|
||||
|
||||
/// @page cpu_cnn_training_f32_c CNN f32 training example
|
||||
/// This C API example demonstrates how to build an AlexNet model training.
|
||||
|
@ -16,13 +16,12 @@
|
||||
|
||||
/// @example cpu_matmul_coo.cpp
|
||||
/// > Annotated version: @ref cpu_matmul_coo_cpp
|
||||
///
|
||||
|
||||
/// @page cpu_matmul_coo_cpp MatMul Primitive with Sparse Memory in COO Format
|
||||
/// This C++ API example demonstrates how to create and execute a
|
||||
/// [MatMul](@ref dev_guide_matmul) primitive that uses a source tensor
|
||||
/// encoded with the COO sparse encoding.
|
||||
///
|
||||
/// @page cpu_matmul_coo_cpp MatMul Primitive Example
|
||||
///
|
||||
/// @include cpu_matmul_coo.cpp
|
||||
|
||||
#include <algorithm>
|
||||
|
@ -16,13 +16,12 @@
|
||||
|
||||
/// @example cpu_matmul_csr.cpp
|
||||
/// > Annotated version: @ref cpu_matmul_csr_cpp
|
||||
///
|
||||
|
||||
/// @page cpu_matmul_csr_cpp MatMul Primitive with Sparse Memory in CSR Format
|
||||
/// This C++ API example demonstrates how to create and execute a
|
||||
/// [MatMul](@ref dev_guide_matmul) primitive that uses a source tensor
|
||||
/// encoded with the CSR sparse encoding.
|
||||
///
|
||||
/// @page cpu_matmul_csr_cpp MatMul Primitive Example
|
||||
///
|
||||
/// @include cpu_matmul_csr.cpp
|
||||
|
||||
#include <algorithm>
|
||||
|
@ -16,13 +16,12 @@
|
||||
|
||||
/// @example cpu_matmul_weights_compression.cpp
|
||||
/// > Annotated version: @ref cpu_matmul_weights_compression_cpp
|
||||
///
|
||||
|
||||
/// @page cpu_matmul_weights_compression_cpp MatMul Primitive Example
|
||||
/// This C++ API example demonstrates how to create and execute a
|
||||
/// [MatMul](@ref dev_guide_matmul) primitive that uses a weights tensor
|
||||
/// encoded with the packed sparse encoding.
|
||||
///
|
||||
/// @page cpu_matmul_weights_compression_cpp MatMul Primitive Example
|
||||
///
|
||||
/// @include cpu_matmul_weights_compression.cpp
|
||||
|
||||
#include <algorithm>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2018-2022 Intel Corporation
|
||||
* Copyright 2018-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cpu_rnn_inference_f32.cpp
|
||||
/// @copybrief cpu_rnn_inference_f32_cpp
|
||||
/// > Annotated version: @ref cpu_rnn_inference_f32_cpp
|
||||
|
||||
/// @page cpu_rnn_inference_f32_cpp RNN f32 inference example
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2018-2022 Intel Corporation
|
||||
* Copyright 2018-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cpu_rnn_inference_int8.cpp
|
||||
/// @copybrief cpu_rnn_inference_int8_cpp
|
||||
/// > Annotated version: @ref cpu_rnn_inference_int8_cpp
|
||||
|
||||
/// @page cpu_rnn_inference_int8_cpp RNN int8 inference example
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2019-2022 Intel Corporation
|
||||
* Copyright 2019-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,7 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cross_engine_reorder.c
|
||||
/// @copybrief cross_engine_reorder_c
|
||||
/// > Annotated version: @ref cross_engine_reorder_c
|
||||
|
||||
/// @page cross_engine_reorder_c Reorder between CPU and GPU engines
|
||||
/// This C API example demonstrates programming flow when reordering memory
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2019-2022 Intel Corporation
|
||||
* Copyright 2019-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cross_engine_reorder.cpp
|
||||
/// @copybrief cross_engine_reorder_cpp
|
||||
/// > Annotated version: @ref cross_engine_reorder_cpp
|
||||
|
||||
/// @page cross_engine_reorder_cpp Reorder between CPU and GPU engines
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example getting_started.cpp
|
||||
/// @copybrief getting_started_cpp
|
||||
/// > Annotated version: @ref getting_started_cpp
|
||||
|
||||
#include <cmath>
|
||||
@ -29,12 +28,8 @@
|
||||
#include "example_utils.hpp"
|
||||
|
||||
using namespace dnnl;
|
||||
// [Prologue]
|
||||
|
||||
/// @page getting_started_cpp oneDNN API Basic Workflow Tutorial
|
||||
///
|
||||
/// This C++ API example demonstrates the basics of the oneDNN programming model.
|
||||
///
|
||||
/// > Example code: @ref getting_started.cpp
|
||||
///
|
||||
/// This C++ API example demonstrates the basics of the oneDNN programming model:
|
||||
@ -64,8 +59,6 @@ using namespace dnnl;
|
||||
/// example_utils.hpp, which contains some debugging facilities like returning
|
||||
/// a string representation for common oneDNN C types.
|
||||
|
||||
// [Prologue]
|
||||
|
||||
/// @page getting_started_cpp
|
||||
/// @section getting_started_cpp_tutorial getting_started_tutorial() function
|
||||
///
|
||||
|
@ -14,11 +14,10 @@
|
||||
* limitations under the License.
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example gpu_opencl_interop.cpp
|
||||
/// @copybrief gpu_opencl_interop_cpp
|
||||
/// @example gpu_opencl_interop.cpp
|
||||
/// > Annotated version: @ref gpu_opencl_interop_cpp
|
||||
|
||||
/// @page gpu_opencl_interop_cpp Getting started on GPU with OpenCL extensions API
|
||||
/// @page gpu_opencl_interop_cpp Getting started on GPU with OpenCL extensions API
|
||||
/// This C++ API example demonstrates programming for Intel(R) Processor
|
||||
/// Graphics with OpenCL* extensions API in oneDNN.
|
||||
///
|
||||
@ -48,7 +47,7 @@
|
||||
/// All C++ API types and functions reside in the `dnnl` namespace.
|
||||
/// For simplicity of the example we import this namespace.
|
||||
/// @page gpu_opencl_interop_cpp
|
||||
/// @snippet gpu_opencl_interop.cpp Prologue
|
||||
/// @snippet gpu_opencl_interop.cpp Prologue
|
||||
// [Prologue]
|
||||
#include <iostream>
|
||||
#include <numeric>
|
||||
@ -134,7 +133,7 @@ void gpu_opencl_interop_tutorial() {
|
||||
dnnl::stream strm(eng);
|
||||
// [Initialize stream]
|
||||
|
||||
/// @subsection gpu_opencl_interop_cpp_sub2 Wrapping data into oneDNN memory object
|
||||
/// @subsection gpu_opencl_interop_cpp_sub2 Wrapping data into oneDNN memory object
|
||||
///
|
||||
/// Next, we create a memory object. We need to specify dimensions of our
|
||||
/// memory by passing a memory::dims object. Then we create a memory
|
||||
@ -154,7 +153,7 @@ void gpu_opencl_interop_tutorial() {
|
||||
memory mem(mem_d, eng);
|
||||
// [memory alloc]
|
||||
|
||||
/// @subsection gpu_opencl_interop_cpp_sub3 Initialize the data by executing a custom OpenCL kernel
|
||||
/// @subsection gpu_opencl_interop_cpp_sub3 Initialize the data by executing a custom OpenCL kernel
|
||||
/// We are going to create an OpenCL kernel that will initialize our data.
|
||||
/// It requires writing a bit of C code to create an OpenCL program from a
|
||||
/// string literal source. The kernel initializes the data by the
|
||||
@ -258,7 +257,7 @@ int main(int argc, char **argv) {
|
||||
{engine::kind::gpu}, gpu_opencl_interop_tutorial);
|
||||
}
|
||||
|
||||
/// @page gpu_opencl_interop_cpp Getting started on GPU with OpenCL extensions API
|
||||
/// @page gpu_opencl_interop_cpp Getting started on GPU with OpenCL extensions API
|
||||
///
|
||||
/// <b></b>
|
||||
///
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2023-2024 Intel Corporation
|
||||
* Copyright 2023-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cpu_getting_started.cpp
|
||||
/// @copybrief graph_cpu_getting_started_cpp
|
||||
/// > Annotated version: @ref graph_cpu_getting_started_cpp
|
||||
|
||||
/// @page graph_cpu_getting_started_cpp Getting started on CPU with Graph API
|
||||
@ -45,6 +44,7 @@
|
||||
///
|
||||
/// @page graph_cpu_getting_started_cpp
|
||||
/// @snippet cpu_getting_started.cpp Headers and namespace
|
||||
|
||||
//[Headers and namespace]
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2023-2024 Intel Corporation
|
||||
* Copyright 2023-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cpu_inference_int8.cpp
|
||||
/// @copybrief graph_cpu_inference_int8_cpp
|
||||
/// Annotated version: @ref graph_cpu_inference_int8_cpp
|
||||
|
||||
/// @page graph_cpu_inference_int8_cpp Convolution int8 inference example with Graph API
|
||||
@ -38,6 +37,7 @@
|
||||
///
|
||||
/// @page graph_cpu_inference_int8_cpp
|
||||
/// @snippet cpu_inference_int8.cpp Headers and namespace
|
||||
|
||||
//[Headers and namespace]
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2024 Intel Corporation
|
||||
* Copyright 2024-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example cpu_single_op_partition.cpp
|
||||
/// @copybrief graph_cpu_single_op_partition_cpp
|
||||
/// > Annotated version: @ref graph_cpu_single_op_partition_cpp
|
||||
|
||||
/// @page graph_cpu_single_op_partition_cpp Single op partition on CPU
|
||||
@ -44,6 +43,7 @@
|
||||
///
|
||||
/// @page graph_cpu_single_op_partition_cpp
|
||||
/// @snippet cpu_single_op_partition.cpp Headers and namespace
|
||||
|
||||
//[Headers and namespace]
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example gpu_opencl_getting_started.cpp
|
||||
/// @copybrief graph_gpu_opencl_getting_started_cpp
|
||||
/// > Annotated version: @ref graph_gpu_opencl_getting_started_cpp
|
||||
|
||||
/// @page graph_gpu_opencl_getting_started_cpp Getting started with OpenCL extensions and Graph API
|
||||
@ -46,6 +45,7 @@
|
||||
/// in namespace `dnnl::graph`.
|
||||
/// @page graph_gpu_opencl_getting_started_cpp
|
||||
/// @snippet gpu_opencl_getting_started.cpp Headers and namespace
|
||||
|
||||
//[Headers and namespace]
|
||||
#include "oneapi/dnnl/dnnl_graph.hpp"
|
||||
#include "oneapi/dnnl/dnnl_ocl.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2023-2024 Intel Corporation
|
||||
* Copyright 2023-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example sycl_getting_started.cpp
|
||||
/// @copybrief graph_sycl_getting_started_cpp
|
||||
/// > Annotated version: @ref graph_sycl_getting_started_cpp
|
||||
|
||||
/// @page graph_sycl_getting_started_cpp Getting started with SYCL extensions API and Graph API
|
||||
@ -47,6 +46,7 @@
|
||||
/// in namespace `dnnl::graph`.
|
||||
/// @page graph_sycl_getting_started_cpp
|
||||
/// @snippet sycl_getting_started.cpp Headers and namespace
|
||||
|
||||
//[Headers and namespace]
|
||||
#include "oneapi/dnnl/dnnl_graph.hpp"
|
||||
#include "oneapi/dnnl/dnnl_graph_sycl.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2024 Intel Corporation
|
||||
* Copyright 2024-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example sycl_single_op_partition.cpp
|
||||
/// @copybrief graph_sycl_single_op_partition_cpp
|
||||
/// > Annotated version: @ref graph_sycl_single_op_partition_cpp
|
||||
|
||||
/// @page graph_sycl_single_op_partition_cpp Single op partition on GPU
|
||||
@ -44,6 +43,7 @@
|
||||
///
|
||||
/// @page graph_sycl_single_op_partition_cpp
|
||||
/// @snippet sycl_single_op_partition.cpp Headers and namespace
|
||||
|
||||
//[Headers and namespace]
|
||||
#include "oneapi/dnnl/dnnl_graph.hpp"
|
||||
#include "oneapi/dnnl/dnnl_graph_sycl.hpp"
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example matmul_perf.cpp
|
||||
/// @copybrief matmul_perf_cpp
|
||||
/// > Annotated version: @ref matmul_perf_cpp
|
||||
|
||||
/// @page matmul_perf_cpp Matrix Multiplication Performance Example
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*******************************************************************************
|
||||
* Copyright 2019-2024 Intel Corporation
|
||||
* Copyright 2019-2025 Intel Corporation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example memory_format_propagation.cpp
|
||||
/// @copybrief memory_format_propagation_cpp
|
||||
/// > Annotated version: @ref memory_format_propagation_cpp
|
||||
|
||||
#include <iostream>
|
||||
|
@ -15,7 +15,6 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example performance_profiling.cpp
|
||||
/// @copybrief performance_profiling_cpp
|
||||
/// > Annotated version: @ref performance_profiling_cpp
|
||||
|
||||
/// @page performance_profiling_cpp Performance Profiling Example
|
||||
@ -86,7 +85,6 @@
|
||||
///
|
||||
/// The following descriptions of each implementation will reference each other,
|
||||
/// and are meant to be read in order.
|
||||
///
|
||||
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
|
@ -15,9 +15,8 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example rnn_training_f32.cpp
|
||||
/// @copybrief rnn_training_f32_cpp
|
||||
/// > Annotated version: @ref rnn_training_f32_cpp
|
||||
///
|
||||
|
||||
/// @page rnn_training_f32_cpp RNN f32 training example
|
||||
/// This C++ API example demonstrates how to build GNMT model training.
|
||||
///
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
/// @example sycl_interop_buffer.cpp
|
||||
/// > Annotated version: @ref sycl_interop_buffer_cpp
|
||||
///
|
||||
|
||||
/// @page sycl_interop_buffer_cpp Getting started on both CPU and GPU with SYCL extensions API
|
||||
/// Full example text: @ref sycl_interop_buffer.cpp
|
||||
///
|
||||
|
@ -15,7 +15,8 @@
|
||||
*******************************************************************************/
|
||||
|
||||
/// @example sycl_interop_usm.cpp
|
||||
///
|
||||
/// > Annotated version: @ref sycl_interop_usm_cpp
|
||||
|
||||
/// @page sycl_interop_usm_cpp SYCL USM example
|
||||
///
|
||||
/// This C++ API example demonstrates programming for Intel(R) Processor
|
||||
|
Reference in New Issue
Block a user