Skip to content

Commit 1835a43

Browse files
committed
examples: fixed broken links to format tags
1 parent 50b46cc commit 1835a43

File tree

1 file changed

+17
-15
lines changed

1 file changed

+17
-15
lines changed

examples/cnn_inference_f32.cpp

+17-15
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,9 @@ void simple_net(engine::kind engine_kind, int times = 100) {
9191
std::vector<float> conv1_bias(product(conv1_bias_tz));
9292
//[Allocate buffers]
9393

94-
/// Create memory that describes data layout in the buffers. This example uses
95-
/// tag::nchw (batch-channels-height-width) for input data and tag::oihw
96-
/// for weights.
94+
/// Create memory that describes data layout in the buffers. This example
95+
/// uses dnnl::memory::format_tag::nchw (batch-channels-height-width)
96+
/// for input data and dnnl::memory::format_tag::oihw for weights.
9797
/// @snippet cnn_inference_f32.cpp Create user memory
9898
//[Create user memory]
9999
auto user_src_memory = memory({{conv1_src_tz}, dt::f32, tag::nchw}, eng);
@@ -106,12 +106,13 @@ void simple_net(engine::kind engine_kind, int times = 100) {
106106
write_to_dnnl_memory(conv1_bias.data(), conv1_user_bias_memory);
107107
//[Create user memory]
108108

109-
/// Create memory descriptors with layout tag::any. The `any` format enables
110-
/// the convolution primitive to choose the data format that will result in
111-
/// best performance based on its input parameters (convolution kernel
112-
/// sizes, strides, padding, and so on). If the resulting format is different
113-
/// from `nchw`, the user data must be transformed to the format required for
114-
/// the convolution (as explained below).
109+
/// Create memory descriptors with layout dnnl::memory::format_tag::any.
110+
/// The `any` format enables the convolution primitive to choose the data
111+
/// format that will result in best performance based on its input
112+
/// parameters (convolution kernel sizes, strides, padding, and so on).
113+
/// If the resulting format is different from `nchw`, the user data must be
114+
/// transformed to the format required for the convolution (as explained
115+
/// below).
115116
/// @snippet cnn_inference_f32.cpp Create convolution memory descriptors
116117
//[Create convolution memory descriptors]
117118
auto conv1_src_md = memory::desc({conv1_src_tz}, dt::f32, tag::any);
@@ -136,9 +137,9 @@ void simple_net(engine::kind engine_kind, int times = 100) {
136137
conv1_strides, conv1_padding, conv1_padding);
137138
//[Create convolution primitive descriptor]
138139

139-
/// Check whether data and weights formats required by convolution is different
140-
/// from the user format. In case it is different change the layout using
141-
/// reorder primitive.
140+
/// Check whether data and weights formats required by convolution is
141+
/// different from the user format. In case it is different change the
142+
/// layout using reorder primitive.
142143
/// @snippet cnn_inference_f32.cpp Reorder data and weights
143144
//[Reorder data and weights]
144145
auto conv1_src_memory = user_src_memory;
@@ -180,7 +181,8 @@ void simple_net(engine::kind engine_kind, int times = 100) {
180181
/// Create the relu primitive. For better performance, keep the input data
181182
/// format for ReLU (as well as for other operation primitives until another
182183
/// convolution or inner product is encountered) the same as the one chosen
183-
/// for convolution. Also note that ReLU is done in-place by using conv1 memory.
184+
/// for convolution. Also note that ReLU is done in-place by using conv1
185+
/// memory.
184186
/// @snippet cnn_inference_f32.cpp Create relu primitive
185187
//[Create relu primitive]
186188
auto relu1_prim_desc
@@ -227,8 +229,8 @@ void simple_net(engine::kind engine_kind, int times = 100) {
227229
auto pool1_dst_md = memory::desc({pool1_dst_tz}, dt::f32, tag::any);
228230

229231
/// For training execution, pooling requires a private workspace memory
230-
/// to perform the backward pass. However, pooling should not use 'workspace'
231-
/// for inference, because this is detrimental to performance.
232+
/// to perform the backward pass. However, pooling should not use
233+
/// 'workspace' for inference, because this is detrimental to performance.
232234
/// @snippet cnn_inference_f32.cpp Create pooling primitive
233235
///
234236
/// The example continues to create more layers according

0 commit comments

Comments
 (0)