forked from uxlfoundation/oneDNN
-
Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathdnnl.hpp
11571 lines (10546 loc) · 513 KB
/
dnnl.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*******************************************************************************
* Copyright 2016-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/// @file
/// C++ API
#ifndef ONEAPI_DNNL_DNNL_HPP
#define ONEAPI_DNNL_DNNL_HPP
#include "oneapi/dnnl/dnnl_config.h"
/// @cond DO_NOT_DOCUMENT_THIS
#include <algorithm>
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <vector>
#include <unordered_map>
#include "oneapi/dnnl/dnnl.h"
/// @endcond
// __cpp_exceptions is referred from
// https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_exceptions.html
// gcc < 5 does not define __cpp_exceptions but __EXCEPTIONS,
// Microsoft C++ Compiler does not provide an option to disable exceptions
#ifndef DNNL_ENABLE_EXCEPTIONS
#if defined(__cpp_exceptions) || defined(__EXCEPTIONS) \
|| (defined(_MSC_VER) && !defined(__clang__))
#define DNNL_ENABLE_EXCEPTIONS 1
#else
#define DNNL_ENABLE_EXCEPTIONS 0
#endif
#endif
#if defined(__GNUC__) || defined(__clang__)
#define DNNL_TRAP() __builtin_trap()
#elif defined(__INTEL_COMPILER) || defined(_MSC_VER)
#define DNNL_TRAP() __debugbreak()
#else
#error "unknown compiler"
#endif
#if DNNL_ENABLE_EXCEPTIONS
#define DNNL_THROW_ERROR(status, msg) throw error(status, msg)
#else
#include <cstdio>
#define DNNL_THROW_ERROR(status, msg) \
do { \
fputs(msg, stderr); \
DNNL_TRAP(); \
} while (0)
#endif
/// @addtogroup dnnl_api oneDNN API
/// @{
/// oneDNN namespace
namespace dnnl {
/// @addtogroup dnnl_api_utils Utilities
/// Utility types and definitions.
/// @{
/// oneDNN exception class.
///
/// This class captures the status returned by a failed C API function and
/// the error message from the call site.
struct error : public std::exception {
dnnl_status_t status;
const char *message;
/// Constructs an instance of an exception class.
///
/// @param status The error status returned by a C API function.
/// @param message The error message.
error(dnnl_status_t status, const char *message)
: status(status), message(message) {}
/// Returns the explanatory string.
const char *what() const noexcept override { return message; }
/// A convenience function for wrapping calls to C API functions. Checks
/// the return status and throws an dnnl::error in case of failure.
///
/// @param status The error status returned by a C API function.
/// @param message The error message.
static void wrap_c_api(dnnl_status_t status, const char *message) {
if (status != dnnl_success) DNNL_THROW_ERROR(status, message);
}
};
/// @cond DO_NOT_DOCUMENT_THIS
template <typename T>
void validate_container_size(const T &v, const char *error_message,
int min_size = 1, int max_size = -1) {
const int size = (int)v.size();
if (size < min_size || (max_size >= 0 && size > max_size))
DNNL_THROW_ERROR(dnnl_invalid_arguments, error_message);
}
/// @endcond
/// A class that provides the destructor for a oneDNN C API handle.
template <typename T>
struct handle_traits {};
/// oneDNN C API handle wrapper class.
///
/// This class is used as the base class for primitive (dnnl::primitive),
/// engine (dnnl::engine), and stream (dnnl::stream) classes, as well as
/// others. An object of the dnnl::handle class can be passed by value.
///
/// A handle can be weak, in which case it follows std::weak_ptr semantics.
/// Otherwise, it follows `std::shared_ptr` semantics.
///
/// @note
/// The implementation stores oneDNN C API handles in a `std::shared_ptr`
/// with deleter set to a dummy function in the weak mode.
///
template <typename T, typename traits = handle_traits<T>>
struct handle {
private:
static dnnl_status_t dummy_destructor(T) { return dnnl_success; }
std::shared_ptr<typename std::remove_pointer<T>::type> data_ {0};
protected:
bool operator==(const T other) const { return other == data_.get(); }
bool operator!=(const T other) const { return !(*this == other); }
public:
/// Constructs an empty handle object.
///
/// @warning
/// Uninitialized object cannot be used in most library calls and is
/// equivalent to a null pointer. Any attempt to use its methods, or
/// passing it to the other library function, will cause an exception
/// to be thrown.
handle() = default;
/// Copy constructor.
handle(const handle<T, traits> &) = default;
/// Assignment operator.
handle<T, traits> &operator=(const handle<T, traits> &) = default;
/// Move constructor.
handle(handle<T, traits> &&) = default;
/// Move assignment operator.
handle<T, traits> &operator=(handle<T, traits> &&) = default;
/// Constructs a handle wrapper object from a C API handle.
///
/// @param t The C API handle to wrap.
/// @param weak A flag specifying whether to construct a weak wrapper;
/// defaults to @c false.
explicit handle(T t, bool weak = false) { reset(t, weak); }
/// Resets the handle wrapper objects to wrap a new C API handle.
///
/// @param t The new value of the C API handle.
/// @param weak A flag specifying whether the wrapper should be weak;
/// defaults to @c false.
void reset(T t, bool weak = false) {
data_.reset(t, weak ? &dummy_destructor : traits::destructor);
}
/// Returns the underlying C API handle.
///
/// @param allow_empty A flag signifying whether the method is allowed to
/// return an empty (null) object without throwing an exception.
/// @returns The underlying C API handle.
T get(bool allow_empty = false) const {
T result = data_.get();
if (allow_empty == false && result == nullptr)
DNNL_THROW_ERROR(
dnnl_invalid_arguments, "object is not initialized");
return result;
}
/// Converts a handle to the underlying C API handle type. Does not throw
/// and returns `nullptr` if the object is empty.
///
/// @returns The underlying C API handle.
explicit operator T() const { return get(true); }
/// Checks whether the object is not empty.
///
/// @returns Whether the object is not empty.
explicit operator bool() const { return get(true) != nullptr; }
/// Equality operator.
///
/// @param other Another handle wrapper.
/// @returns @c true if this and the other handle wrapper manage the same
/// underlying C API handle, and @c false otherwise. Empty handle
/// objects are considered to be equal.
bool operator==(const handle<T, traits> &other) const {
return other.data_.get() == data_.get();
}
/// Inequality operator.
///
/// @param other Another handle wrapper.
/// @returns @c true if this and the other handle wrapper manage different
/// underlying C API handles, and @c false otherwise. Empty handle
/// objects are considered to be equal.
bool operator!=(const handle &other) const { return !(*this == other); }
};
/// @cond DO_NOT_DOCUMENT_THIS
template <>
struct handle_traits<dnnl_memory_t> {
static dnnl_status_t destructor(dnnl_memory_t p) {
return dnnl_memory_destroy(p);
}
};
template <>
struct handle_traits<dnnl_primitive_desc_t> {
static dnnl_status_t destructor(dnnl_primitive_desc_t p) {
return dnnl_primitive_desc_destroy(p);
}
};
template <>
struct handle_traits<dnnl_primitive_t> {
static dnnl_status_t destructor(dnnl_primitive_t p) {
return dnnl_primitive_destroy(p);
}
};
template <>
struct handle_traits<dnnl_primitive_desc_iterator_t> {
static dnnl_status_t destructor(dnnl_primitive_desc_iterator_t p) {
return dnnl_primitive_desc_iterator_destroy(p);
}
};
/// @endcond
/// @} dnnl_api_utils
struct stream;
struct memory;
struct primitive_desc;
/// @addtogroup dnnl_api_primitives Primitives
/// Compute primitives
/// @sa @ref dev_guide_basic_concepts
/// @{
/// @addtogroup dnnl_api_primitives_common Common
/// Common operations to create, destroy and inspect primitives
/// @{
/// Base class for all computational primitives.
struct primitive : public handle<dnnl_primitive_t> {
/// Kinds of primitives supported by the library.
enum class kind {
/// Undefined primitive
undef = dnnl_undefined_primitive,
/// A reorder primitive.
reorder = dnnl_reorder,
/// A shuffle primitive.
shuffle = dnnl_shuffle,
/// A (out-of-place) tensor concatenation primitive.
concat = dnnl_concat,
/// A summation primitive.
sum = dnnl_sum,
/// A convolution primitive.
convolution = dnnl_convolution,
/// A deconvolution primitive.
deconvolution = dnnl_deconvolution,
/// An element-wise primitive.
eltwise = dnnl_eltwise,
/// A softmax primitive.
softmax = dnnl_softmax,
/// A pooling primitive.
pooling = dnnl_pooling,
/// An LRN primitive.
lrn = dnnl_lrn,
/// A batch normalization primitive.
batch_normalization = dnnl_batch_normalization,
/// A layer normalization primitive.
layer_normalization = dnnl_layer_normalization,
/// An inner product primitive.
inner_product = dnnl_inner_product,
/// An RNN primitive.
rnn = dnnl_rnn,
/// A binary primitive.
binary = dnnl_binary,
/// A logsoftmax primitive.
logsoftmax = dnnl_logsoftmax,
/// A matmul (matrix multiplication) primitive.
matmul = dnnl_matmul,
/// A resampling primitive.
resampling = dnnl_resampling,
/// A pooling version 2 primitive.
pooling_v2 = dnnl_pooling_v2,
/// A reduction primitive.
reduction = dnnl_reduction,
/// A PReLU primitive.
prelu = dnnl_prelu,
};
using handle::handle;
/// Default constructor. Constructs an empty object.
primitive() = default;
/// Constructs a primitive from a C API primitive descriptor.
///
/// @param c_pd C API primitive descriptor.
primitive(const_dnnl_primitive_desc_t c_pd);
/// Constructs a primitive from a primitive descriptor.
///
/// @param pd Primitive descriptor.
primitive(const primitive_desc &pd);
/// Returns the C API primitive descriptor of the underlying C API
/// primitive.
///
/// @returns The underlying C API primitive descriptor.
inline const_dnnl_primitive_desc_t get_primitive_desc() const;
/// Returns the kind of the primitive.
///
/// @returns The primitive kind.
inline kind get_kind() const;
/// Executes computations specified by the primitive in a specified stream.
///
/// Arguments are passed via an arguments map containing <index,
/// memory object> pairs. The index must be one of the `DNNL_ARG_*` values
/// such as `DNNL_ARG_SRC`, and the memory must have a memory descriptor
/// matching the one returned by
/// primitive_desc::query_md(#query::exec_arg_md, index) unless using
/// dynamic shapes (see #DNNL_RUNTIME_DIM_VAL).
///
/// @param astream Stream object. The stream must belong to the same engine
/// as the primitive.
/// @param args Arguments map.
void execute(const stream &astream,
const std::unordered_map<int, memory> &args) const;
};
/// Converts primitive kind enum value from C++ API to C API type.
///
/// @param akind C++ API primitive kind enum value.
/// @returns Corresponding C API primitive kind enum value.
inline dnnl_primitive_kind_t convert_to_c(primitive::kind akind) {
return static_cast<dnnl_primitive_kind_t>(akind);
}
const_dnnl_primitive_desc_t primitive::get_primitive_desc() const {
const_dnnl_primitive_desc_t pd;
error::wrap_c_api(dnnl_primitive_get_primitive_desc(get(), &pd),
"could not get a primitive descriptor from a primitive");
return pd;
}
dnnl::primitive::kind primitive::get_kind() const {
const_dnnl_primitive_desc_t pd = get_primitive_desc();
// TODO (Roma): the code below is only needed because get_primitive_desc
// returns a C type.
dnnl_primitive_kind_t kind;
error::wrap_c_api(dnnl_primitive_desc_query(
pd, dnnl_query_primitive_kind, 0, (void *)&kind),
"could not get a primitive kind from a primitive descriptor");
return static_cast<dnnl::primitive::kind>(kind);
}
/// @} dnnl_api_primitives_common
/// @addtogroup dnnl_api_attributes
///
/// A container for parameters that extend primitives behavior.
///
/// Attributes can also contain Post-ops, which are computations executed
/// after the primitive.
///
/// @sa @ref dev_guide_attributes
/// @sa @ref dev_guide_attributes_post_ops
///
/// @{
/// Floating-point math mode
enum class fpmath_mode {
/// Default behavior, no downconversions allowed
strict = dnnl_fpmath_mode_strict,
/// Implicit f32->bf16 conversions allowed
bf16 = dnnl_fpmath_mode_bf16,
/// Implicit f32->f16 conversions allowed
f16 = dnnl_fpmath_mode_f16,
/// Implicit f32->f16 or f32->bf16 conversions allowed
any = dnnl_fpmath_mode_any
};
/// Converts an fpmath mode enum value from C++ API to C API type.
///
/// @param mode C++ API fpmath mode enum value.
/// @returns Corresponding C API fpmath mode enum value.
inline dnnl_fpmath_mode_t convert_to_c(fpmath_mode mode) {
return static_cast<dnnl_fpmath_mode_t>(mode);
}
/// Scratchpad mode
enum class scratchpad_mode {
/// The library manages the scratchpad allocation according to the policy
/// specified by the `DNNL_ENABLE_CONCURRENT_EXEC`
/// [build option](@ref dev_guide_build_options) (default).
///
/// When `DNNL_ENABLE_CONCURRENT_EXEC=OFF` (default), the library
/// scratchpad is common to all primitives to reduce the memory footprint.
/// This configuration comes with limited thread-safety properties, namely
/// primitives can be created and executed in parallel but cannot migrate
/// between threads (in other words, each primitive should be executed in
/// the same thread it was created in).
///
/// When `DNNL_ENABLE_CONCURRENT_EXEC=ON`, the library scratchpad is
/// private to each primitive. The memory footprint is larger than when
/// using `DNNL_ENABLE_CONCURRENT_EXEC=OFF` but different primitives can be
/// created and run concurrently (the same primitive cannot be run
/// concurrently from two different threads though).
library = dnnl_scratchpad_mode_library,
/// The user manages the scratchpad allocation by querying and providing
/// the scratchpad memory to primitives. This mode is thread-safe as long
/// as the scratchpad buffers are not used concurrently by two primitive
/// executions.
user = dnnl_scratchpad_mode_user,
};
/// Converts a scratchpad mode enum value from C++ API to C API type.
///
/// @param mode C++ API scratchpad mode enum value.
/// @returns Corresponding C API scratchpad mode enum value.
inline dnnl_scratchpad_mode_t convert_to_c(scratchpad_mode mode) {
return static_cast<dnnl_scratchpad_mode_t>(mode);
}
/// Propagation kind.
enum class prop_kind {
/// Undefined propagation kind.
undef = dnnl_prop_kind_undef,
/// Forward data propagation (training mode). In this mode, primitives
/// perform computations necessary for subsequent backward propagation.
forward_training = dnnl_forward_training,
/// Forward data propagation (inference mode). In this mode, primitives
/// perform only computations that are necessary for inference and omit
/// computations that are necessary only for backward propagation.
forward_inference = dnnl_forward_inference,
/// Forward data propagation,
/// alias for #dnnl::prop_kind::forward_inference.
forward_scoring = dnnl_forward_scoring,
/// Forward data propagation,
/// alias for #dnnl::prop_kind::forward_training.
forward = dnnl_forward,
/// Backward propagation (with respect to all parameters).
backward = dnnl_backward,
/// Backward data propagation.
backward_data = dnnl_backward_data,
/// Backward weights propagation.
backward_weights = dnnl_backward_weights,
/// Backward bias propagation.
backward_bias = dnnl_backward_bias
};
/// Converts propagation kind enum value from C++ API to C API type.
///
/// @param akind C++ API propagation kind enum value.
/// @returns Corresponding C API propagation kind enum value.
inline dnnl_prop_kind_t convert_to_c(prop_kind akind) {
return static_cast<dnnl_prop_kind_t>(akind);
}
/// Kinds of algorithms.
enum class algorithm {
/// Undefined algorithm
undef = dnnl_alg_kind_undef,
/// Convolution algorithm that is chosen to be either direct or Winograd
/// automatically
convolution_auto = dnnl_convolution_auto,
/// Direct convolution
convolution_direct = dnnl_convolution_direct,
/// Winograd convolution
convolution_winograd = dnnl_convolution_winograd,
/// Direct deconvolution
deconvolution_direct = dnnl_deconvolution_direct,
/// Winograd deconvolution
deconvolution_winograd = dnnl_deconvolution_winograd,
/// Elementwise: rectified linear unit (ReLU)
eltwise_relu = dnnl_eltwise_relu,
/// Elementwise: hyperbolic tangent non-linearity (tanh)
eltwise_tanh = dnnl_eltwise_tanh,
/// Elementwise: exponential linear unit (ELU)
eltwise_elu = dnnl_eltwise_elu,
/// Elementwise: square
eltwise_square = dnnl_eltwise_square,
/// Elementwise: abs
eltwise_abs = dnnl_eltwise_abs,
/// Elementwise: square root
eltwise_sqrt = dnnl_eltwise_sqrt,
/// Elementwise: swish (\f$x \cdot sigmoid(a \cdot x)\f$)
eltwise_swish = dnnl_eltwise_swish,
/// Elementwise: linear
eltwise_linear = dnnl_eltwise_linear,
/// Elementwise: bounded_relu
eltwise_bounded_relu = dnnl_eltwise_bounded_relu,
/// Elementwise: soft_relu
eltwise_soft_relu = dnnl_eltwise_soft_relu,
/// Elementwise: logsigmoid
eltwise_logsigmoid = dnnl_eltwise_logsigmoid,
/// Elementwise: mish
eltwise_mish = dnnl_eltwise_mish,
/// Elementwise: logistic
eltwise_logistic = dnnl_eltwise_logistic,
/// Elementwise: exponent
eltwise_exp = dnnl_eltwise_exp,
/// Elementwise: gelu
/// alias for #dnnl::algorithm::eltwise_gelu_tanh
eltwise_gelu = dnnl_eltwise_gelu,
/// Elementwise: tanh-based gelu
eltwise_gelu_tanh = dnnl_eltwise_gelu_tanh,
/// Elementwise: erf-based gelu
eltwise_gelu_erf = dnnl_eltwise_gelu_erf,
/// Elementwise: natural logarithm
eltwise_log = dnnl_eltwise_log,
/// Elementwise: clip
eltwise_clip = dnnl_eltwise_clip,
/// Eltwise: clip version 2
eltwise_clip_v2 = dnnl_eltwise_clip_v2,
/// Elementwise: pow
eltwise_pow = dnnl_eltwise_pow,
/// Elementwise: round
eltwise_round = dnnl_eltwise_round,
/// Elementwise: hardswish
eltwise_hardswish = dnnl_eltwise_hardswish,
/// Elementwise: hsigmoid
eltwise_hsigmoid = dnnl_eltwise_hsigmoid,
/// Elementwise: round_half_to_even
eltwise_round_half_to_even = dnnl_eltwise_round_half_to_even,
/// Elementwise: round_half_away_from_zero
eltwise_round_half_away_from_zero = dnnl_eltwise_round_half_away_from_zero,
/// Elementwise: rectified linar unit (ReLU) (dst for backward)
eltwise_relu_use_dst_for_bwd = dnnl_eltwise_relu_use_dst_for_bwd,
/// Elementwise: hyperbolic tangent non-linearity (tanh) (dst for backward)
eltwise_tanh_use_dst_for_bwd = dnnl_eltwise_tanh_use_dst_for_bwd,
/// Elementwise: exponential linear unit (ELU) (dst for backward)
eltwise_elu_use_dst_for_bwd = dnnl_eltwise_elu_use_dst_for_bwd,
/// Elementwise: square root (dst for backward)
eltwise_sqrt_use_dst_for_bwd = dnnl_eltwise_sqrt_use_dst_for_bwd,
/// Elementwise: logistic (dst for backward)
eltwise_logistic_use_dst_for_bwd = dnnl_eltwise_logistic_use_dst_for_bwd,
/// Elementwise: exponent (dst for backward)
eltwise_exp_use_dst_for_bwd = dnnl_eltwise_exp_use_dst_for_bwd,
/// Elementwise: clip version 2 (dst for backward)
eltwise_clip_v2_use_dst_for_bwd = dnnl_eltwise_clip_v2_use_dst_for_bwd,
/// Local response normalization (LRN) across multiple channels
lrn_across_channels = dnnl_lrn_across_channels,
/// LRN within a single channel
lrn_within_channel = dnnl_lrn_within_channel,
/// Max pooling
pooling_max = dnnl_pooling_max,
/// Average pooling exclude padding,
/// alias for #dnnl::algorithm::pooling_avg_exclude_padding
pooling_avg = dnnl_pooling_avg,
/// Average pooling include padding
pooling_avg_include_padding = dnnl_pooling_avg_include_padding,
/// Average pooling exclude padding
pooling_avg_exclude_padding = dnnl_pooling_avg_exclude_padding,
/// RNN cell
vanilla_rnn = dnnl_vanilla_rnn,
/// LSTM cell
vanilla_lstm = dnnl_vanilla_lstm,
/// GRU cell
vanilla_gru = dnnl_vanilla_gru,
/// GRU cell with linear before reset. Differs from the vanilla GRU
/// in how the new memory gate is calculated:
/// \f$c_t = tanh(W_c*x_t + b_{c_x} + r_t*(U_c*h_{t-1}+b_{c_h})) \f$
/// LRB GRU expects 4 bias tensors on input:
/// \f$[b_{u}, b_{r}, b_{c_x}, b_{c_h}]\f$
lbr_gru = dnnl_lbr_gru,
/// Binary add
binary_add = dnnl_binary_add,
/// Binary mul
binary_mul = dnnl_binary_mul,
/// Binary max
binary_max = dnnl_binary_max,
/// Binary min
binary_min = dnnl_binary_min,
/// Binary div
binary_div = dnnl_binary_div,
/// Binary sub
binary_sub = dnnl_binary_sub,
/// Binary greater than or equal
binary_ge = dnnl_binary_ge,
/// Binary greater than
binary_gt = dnnl_binary_gt,
/// Binary less than or equal
binary_le = dnnl_binary_le,
/// Binary less than
binary_lt = dnnl_binary_lt,
/// Binary equal
binary_eq = dnnl_binary_eq,
/// Binary not equal
binary_ne = dnnl_binary_ne,
/// Nearest Neighbor resampling method
resampling_nearest = dnnl_resampling_nearest,
/// Linear (Bilinear, Trilinear) resampling method
resampling_linear = dnnl_resampling_linear,
/// Reduction using max operation
reduction_max = dnnl_reduction_max,
/// Reduction using min operation
reduction_min = dnnl_reduction_min,
/// Reduction using sum operation
reduction_sum = dnnl_reduction_sum,
/// Reduction using mul operation
reduction_mul = dnnl_reduction_mul,
/// Reduction using mean operation
reduction_mean = dnnl_reduction_mean,
/// Reduction using norm_lp_max operation
reduction_norm_lp_max = dnnl_reduction_norm_lp_max,
/// Reduction using norm_lp_sum operation
reduction_norm_lp_sum = dnnl_reduction_norm_lp_sum,
/// Reduction using norm_lp_power_p_max operation
reduction_norm_lp_power_p_max = dnnl_reduction_norm_lp_power_p_max,
/// Reduction using norm_lp_power_p_sum operation
reduction_norm_lp_power_p_sum = dnnl_reduction_norm_lp_power_p_sum,
depthwise_scale_shift = dnnl_depthwise_scale_shift,
depthwise_prelu = dnnl_depthwise_prelu,
quantization_quantize_dequantize = dnnl_quantization_quantize_dequantize,
quantization_quantize = dnnl_quantization_quantize,
};
/// Converts algorithm kind enum value from C++ API to C API type.
/// @param aalgorithm C++ API algorithm kind enum value.
/// @returns Corresponding C API algorithm kind enum value.
inline dnnl_alg_kind_t convert_to_c(algorithm aalgorithm) {
return static_cast<dnnl_alg_kind_t>(aalgorithm);
}
/// @} dnnl_api_attributes
/// @addtogroup dnnl_api_primitives_common
/// @{
/// Flags for normalization primitives.
enum class normalization_flags : unsigned {
/// Use no normalization flags. If specified, the library computes mean and
/// variance on forward propagation for training and inference, outputs them
/// on forward propagation for training, and computes the respective
/// derivatives on backward propagation.
none = dnnl_normalization_flags_none,
/// Use global statistics. If specified, the library uses mean and
/// variance provided by the user as an input on forward propagation and
/// does not compute their derivatives on backward propagation. Otherwise,
/// the library computes mean and variance on forward propagation for
/// training and inference, outputs them on forward propagation for
/// training, and computes the respective derivatives on backward
/// propagation.
use_global_stats = dnnl_use_global_stats,
/// Use scale and shift parameters. If specified, the user is expected to
/// pass scale and shift as inputs on forward propagation. On backward
/// propagation of type #dnnl::prop_kind::backward, the library computes
/// their derivatives. If not specified, the scale and shift parameters
/// are not used by the library in any way.
use_scale_shift = dnnl_use_scaleshift,
/// Fuse normalization with ReLU. On training, normalization will require
/// the workspace to implement backward propagation. On inference, the
/// workspace is not required and behavior is the same as when normalization
/// is fused with ReLU using the post-ops API.
fuse_norm_relu = dnnl_fuse_norm_relu,
/// Use scale parameter. If specified, the user is expected to pass scale as
/// input on forward propagation. On backward propagation of type
/// #dnnl::prop_kind::backward, the library computes its derivative.
use_scale = dnnl_use_scale,
/// Use shift parameter. If specified, the user is expected to pass shift as
/// input on forward propagation. On backward propagation of type
/// #dnnl::prop_kind::backward, the library computes its derivative.
use_shift = dnnl_use_shift,
};
/// Converts normalization flags enum value from C++ API to C API type.
/// @param flags C++ API normalization flags enum value.
/// @returns Corresponding C API normalization flags enum value.
inline dnnl_normalization_flags_t convert_to_c(normalization_flags flags) {
return static_cast<dnnl_normalization_flags_t>(flags);
}
/// @} dnnl_api_primitives_common
/// @addtogroup dnnl_api_rnn
/// @{
/// RNN cell flags.
enum class rnn_flags : unsigned {
/// Undefined RNN flags
undef = dnnl_rnn_flags_undef
};
/// Converts RNN cell flags enum value from C++ API to C API type.
/// @param flags C++ API RNN cell flags enum value.
/// @returns Corresponding C API RNN cell flags enum value.
inline dnnl_rnn_flags_t convert_to_c(rnn_flags flags) {
return static_cast<dnnl_rnn_flags_t>(flags);
}
#define DNNL_DEFINE_BITMASK_OPS(enum_name) \
inline enum_name operator|(enum_name lhs, enum_name rhs) { \
return static_cast<enum_name>( \
static_cast<unsigned>(lhs) | static_cast<unsigned>(rhs)); \
} \
\
inline enum_name operator&(enum_name lhs, enum_name rhs) { \
return static_cast<enum_name>( \
static_cast<unsigned>(lhs) & static_cast<unsigned>(rhs)); \
} \
\
inline enum_name operator^(enum_name lhs, enum_name rhs) { \
return static_cast<enum_name>( \
static_cast<unsigned>(lhs) ^ static_cast<unsigned>(rhs)); \
} \
\
inline enum_name &operator|=(enum_name &lhs, enum_name rhs) { \
lhs = static_cast<enum_name>( \
static_cast<unsigned>(lhs) | static_cast<unsigned>(rhs)); \
return lhs; \
} \
\
inline enum_name &operator&=(enum_name &lhs, enum_name rhs) { \
lhs = static_cast<enum_name>( \
static_cast<unsigned>(lhs) & static_cast<unsigned>(rhs)); \
return lhs; \
} \
\
inline enum_name &operator^=(enum_name &lhs, enum_name rhs) { \
lhs = static_cast<enum_name>( \
static_cast<unsigned>(lhs) ^ static_cast<unsigned>(rhs)); \
return lhs; \
} \
\
inline enum_name operator~(enum_name rhs) { \
return static_cast<enum_name>(~static_cast<unsigned>(rhs)); \
}
DNNL_DEFINE_BITMASK_OPS(normalization_flags)
DNNL_DEFINE_BITMASK_OPS(rnn_flags)
/// A direction of RNN primitive execution
enum class rnn_direction {
/// Unidirectional execution of RNN primitive from left to right.
unidirectional_left2right = dnnl_unidirectional_left2right,
/// Unidirectional execution of RNN primitive from right to left.
unidirectional_right2left = dnnl_unidirectional_right2left,
/// Bidirectional execution of RNN primitive with concatenation of the
/// results.
bidirectional_concat = dnnl_bidirectional_concat,
/// Bidirectional execution of RNN primitive with summation of the
/// results.
bidirectional_sum = dnnl_bidirectional_sum,
/// Alias for #dnnl::rnn_direction::unidirectional_left2right
unidirectional = dnnl_unidirectional,
};
/// Converts RNN direction enum value from C++ API to C API type.
/// @param dir C++ API RNN direction enum value.
/// @returns Corresponding C API RNN direction enum value.
inline dnnl_rnn_direction_t convert_to_c(rnn_direction dir) {
return static_cast<dnnl_rnn_direction_t>(dir);
}
/// @} dnnl_api_rnn
/// @addtogroup dnnl_api_primitives_common
/// @{
/// Primitive descriptor query specification.
///
/// In general, queries are not used with the C++ API because most queries are
/// implemented as class members.
///
/// See @ref dnnl_query_t for more information.
enum class query {
/// no query
undef = dnnl_query_undef,
/// execution engine
engine = dnnl_query_engine,
/// primitive kind
primitive_kind = dnnl_query_primitive_kind,
/// number of inputs expected
num_of_inputs_s32 = dnnl_query_num_of_inputs_s32,
/// number of outputs expected
num_of_outputs_s32 = dnnl_query_num_of_outputs_s32,
/// runtime estimation (seconds), unimplemented
time_estimate_f64 = dnnl_query_time_estimate_f64,
/// memory required for scratchpad (bytes)
///
/// @sa @ref dev_guide_attributes_scratchpad
memory_consumption_s64 = dnnl_query_memory_consumption_s64,
/// scratchpad engine
///
/// engine to be used for creating scratchpad memory
scratchpad_engine = dnnl_query_scratchpad_engine,
/// reorder source engine
reorder_src_engine = dnnl_query_reorder_src_engine,
/// reorder destination engine
reorder_dst_engine = dnnl_query_reorder_dst_engine,
/// implementation name
impl_info_str = dnnl_query_impl_info_str,
/// propagation kind
prop_kind = dnnl_query_prop_kind,
/// operation descriptor
op_d = dnnl_query_op_d,
/// convolution descriptor
convolution_d = dnnl_query_convolution_d,
/// deconvolution descriptor
deconvolution_d = dnnl_query_deconvolution_d,
/// shuffle descriptor
shuffle_d = dnnl_query_shuffle_d,
/// eltwise descriptor
eltwise_d = dnnl_query_eltwise_d,
/// softmax descriptor
softmax_d = dnnl_query_softmax_d,
/// pooling descriptor
pooling_d = dnnl_query_pooling_d,
/// lrn descriptor
lrn_d = dnnl_query_lrn_d,
/// batch normalization descriptor
batch_normalization_d = dnnl_query_batch_normalization_d,
/// layer normalization descriptor
layer_normalization_d = dnnl_query_layer_normalization_d,
/// inner product descriptor
inner_product_d = dnnl_query_inner_product_d,
/// rnn descriptor
rnn_d = dnnl_query_rnn_d,
/// binary descriptor
binary_d = dnnl_query_binary_d,
/// logsoftmax descriptor
logsoftmax_d = dnnl_query_logsoftmax_d,
/// matmul descriptor
matmul_d = dnnl_query_matmul_d,
/// resampling descriptor
resampling_d = dnnl_query_resampling_d,
/// reduction descriptor
reduction_d = dnnl_query_reduction_d,
/// source memory desc
src_md = dnnl_query_src_md,
/// source gradient (diff) memory desc
diff_src_md = dnnl_query_diff_src_md,
/// weights memory descriptor desc
weights_md = dnnl_query_weights_md,
/// weights gradient (diff) memory desc
diff_weights_md = dnnl_query_diff_weights_md,
/// destination memory desc
dst_md = dnnl_query_dst_md,
/// destination gradient (diff) memory desc
diff_dst_md = dnnl_query_diff_dst_md,
/// workspace memory desc
workspace_md = dnnl_query_workspace_md,
/// scratchpad memory desc
scratchpad_md = dnnl_query_scratchpad_md,
/// memory desc of an execute argument
exec_arg_md = dnnl_query_exec_arg_md,
};
/// Converts query enum value from C++ API to C API type.
/// @param aquery C++ API query enum value.
/// @returns Corresponding C API query enum value.
inline dnnl_query_t convert_to_c(query aquery) {
return static_cast<dnnl_query_t>(aquery);
}
/// @} dnnl_api_primitives_common
/// @} dnnl_api_primitives
/// @addtogroup dnnl_api_engine Engine
///
/// An abstraction of a computational device: a CPU, a specific GPU
/// card in the system, etc. Most primitives are created to execute
/// computations on one specific engine. The only exceptions are reorder
/// primitives that transfer data between two different engines.
///
/// @sa @ref dev_guide_basic_concepts
///
/// @{
/// @cond DO_NOT_DOCUMENT_THIS
template <>
struct handle_traits<dnnl_engine_t> {
static dnnl_status_t destructor(dnnl_engine_t p) {
return dnnl_engine_destroy(p);
}
};
/// @endcond
/// An execution engine.
struct engine : public handle<dnnl_engine_t> {
friend struct primitive;
friend struct reorder;
/// Kinds of engines.
enum class kind {
/// An unspecified engine
any = dnnl_any_engine,
/// CPU engine
cpu = dnnl_cpu,
/// GPU engine
gpu = dnnl_gpu,
};
using handle::handle;
/// Constructs an empty engine. An empty engine cannot be used in any
/// operations.
engine() = default;
/// Returns the number of engines of a certain kind.
///
/// @param akind The kind of engines to count.
/// @returns The number of engines of the specified kind.
static size_t get_count(kind akind) {
return dnnl_engine_get_count(convert_to_c(akind));
}
/// Constructs an engine.
///
/// @param akind The kind of engine to construct.
/// @param index The index of the engine. Must be less than the value
/// returned by #get_count() for this particular kind of engine.
engine(kind akind, size_t index) {
dnnl_engine_t engine;
error::wrap_c_api(
dnnl_engine_create(&engine, convert_to_c(akind), index),
"could not create an engine");
reset(engine);
}
/// Constructs an engine based on a primitive from the primitive
/// descriptor @p pd by querying its engine.
///
/// @param pd The primitive descriptor to query.
engine(const handle<dnnl_primitive_desc_t> &pd) {
dnnl_engine_t c_engine;
error::wrap_c_api(
dnnl_primitive_desc_query(pd.get(),
dnnl::convert_to_c(dnnl::query::engine), 0, &c_engine),
"could not get an engine from a primitive_desc");
reset(c_engine, true);
}
/// Returns the kind of the engine.
/// @returns The kind of the engine.
kind get_kind() const {
dnnl_engine_kind_t kind;
error::wrap_c_api(dnnl_engine_get_kind(get(), &kind),
"could not get kind of an engine");
return static_cast<engine::kind>(kind);
}
/// Returns the engine of a primitive descriptor.
///
/// @param pd The primitive descriptor to query.
/// @returns A weak handle to the engine that the primitive descriptor was
/// created with.
template <typename primitive_desc>
static engine query(const primitive_desc &pd) {
return query(pd, dnnl::query::engine);
}
private: