Skip to content

Commit 1898952

Browse files
committed
benchdnn: reorder: update skip conditions and add messages
1 parent f16dd91 commit 1898952

File tree

1 file changed

+144
-80
lines changed

1 file changed

+144
-80
lines changed

tests/benchdnn/reorder/reorder.cpp

+144-80
Original file line numberDiff line numberDiff line change
@@ -208,114 +208,164 @@ void skip_unimplemented_prb(const prb_t *prb, res_t *res) {
208208
skip_unimplemented_sum_po(prb->attr, res, dnnl_reorder, sdt);
209209
skip_unimplemented_prelu_po(prb->attr, res, dnnl_reorder);
210210

211-
bool scales_ok = true;
211+
if (is_cpu()) {
212+
bool scales_ok = true;
212213
#if !defined(DNNL_X64) || DNNL_X64 == 0
213-
{
214-
// reference reorder supports only a subset of scale policies
215-
const std::vector<policy_t> supported_policy = {policy_t::COMMON,
216-
policy_t::PER_DIM_0, policy_t::PER_DIM_1, policy_t::PER_DIM_01};
217-
218-
for (auto arg : {DNNL_ARG_SRC, DNNL_ARG_DST}) {
219-
scales_ok = std::any_of(supported_policy.cbegin(),
220-
supported_policy.cend(), [&](const policy_t policy) {
221-
return prb->attr.scales.get(arg).policy == policy;
222-
});
223-
}
224-
}
225-
#endif
226-
if (!scales_ok) {
227-
res->state = SKIPPED;
228-
res->reason = skip_reason::case_not_supported;
229-
return;
230-
}
231-
232-
if (prb->is_reorder_with_compensation(FLAG_ANY)) {
233-
// Compensation is supported for s8 dst data type.
234-
const bool dt_ok = ddt == dnnl_s8;
235-
// Compensation can be paired with dst scale only.
236-
const bool attr_ok
237-
= prb->attr.zero_points.is_def() && prb->attr.post_ops.is_def();
238-
// Compensation does not support runtime dims.
239-
const bool rt_ok = prb->runtime_dim_mask == 0;
240-
241-
// Compensation and scales mask should coincide
242-
const auto comp_mask = prb->get_compensation_mask(FLAG_ANY);
243-
bool masks_ok = true;
244-
for (auto arg : {DNNL_ARG_SRC, DNNL_ARG_DST}) {
245-
const auto &e = prb->attr.scales.get(arg);
246-
if (!e.is_def()) {
247-
int e_mask = attr_t::get_default_mask(e.policy);
248-
masks_ok = masks_ok && e_mask == comp_mask;
214+
{
215+
// reference reorder supports only a subset of scale policies
216+
const std::vector<policy_t> supported_policy
217+
= {policy_t::COMMON, policy_t::PER_DIM_0,
218+
policy_t::PER_DIM_1, policy_t::PER_DIM_01};
219+
220+
for (auto arg : {DNNL_ARG_SRC, DNNL_ARG_DST}) {
221+
scales_ok = std::any_of(supported_policy.cbegin(),
222+
supported_policy.cend(), [&](const policy_t policy) {
223+
return prb->attr.scales.get(arg).policy == policy;
224+
});
249225
}
250226
}
251-
252-
if (!dt_ok || !attr_ok || !rt_ok || !masks_ok) {
227+
#endif
228+
if (!scales_ok) {
229+
BENCHDNN_PRINT(2,
230+
"[SKIP][%s:%d]: Generic CPU doesn't support specified "
231+
"scale mask.\n",
232+
__FILE__, __LINE__);
253233
res->state = SKIPPED;
254234
res->reason = skip_reason::case_not_supported;
255235
return;
256236
}
257237

238+
if (prb->is_reorder_with_compensation(FLAG_ANY)) {
239+
const bool dt_ok = ddt == dnnl_s8;
240+
if (!dt_ok) {
241+
BENCHDNN_PRINT(2,
242+
"[SKIP][%s:%d]: Compensation is supported only for s8 "
243+
"dst data type.\n",
244+
__FILE__, __LINE__);
245+
res->state = SKIPPED;
246+
res->reason = skip_reason::case_not_supported;
247+
return;
248+
}
249+
250+
const bool attr_ok = prb->attr.zero_points.is_def()
251+
&& prb->attr.post_ops.is_def();
252+
if (!attr_ok) {
253+
BENCHDNN_PRINT(2,
254+
"[SKIP][%s:%d]: Compensation is supported with scale "
255+
"attribute only.\n",
256+
__FILE__, __LINE__);
257+
res->state = SKIPPED;
258+
res->reason = skip_reason::case_not_supported;
259+
return;
260+
}
261+
262+
const bool rt_ok = prb->runtime_dim_mask == 0;
263+
if (!rt_ok) {
264+
BENCHDNN_PRINT(2,
265+
"[SKIP][%s:%d]: Compensation is not supported for "
266+
"runtime dimensions.\n",
267+
__FILE__, __LINE__);
268+
res->state = SKIPPED;
269+
res->reason = skip_reason::case_not_supported;
270+
return;
271+
}
272+
273+
const auto comp_mask = prb->get_compensation_mask(FLAG_ANY);
274+
bool masks_ok = true;
275+
for (auto arg : {DNNL_ARG_SRC, DNNL_ARG_DST}) {
276+
const auto &e = prb->attr.scales.get(arg);
277+
if (!e.is_def()) {
278+
int e_mask = attr_t::get_default_mask(e.policy);
279+
masks_ok = masks_ok && e_mask == comp_mask;
280+
}
281+
}
282+
if (!masks_ok) {
283+
BENCHDNN_PRINT(2,
284+
"[SKIP][%s:%d]: Compensation mask doesn't coincide "
285+
"with scaling mask.\n",
286+
__FILE__, __LINE__);
287+
res->state = SKIPPED;
288+
res->reason = skip_reason::case_not_supported;
289+
return;
290+
}
291+
258292
#if !defined(DNNL_X64) || DNNL_X64 == 0
259-
// Simple reorder doesn't provide decent coverage for compensated cases.
260-
// Shut them down unconditionally by default.
261-
res->state = SKIPPED;
262-
res->reason = skip_reason::case_not_supported;
263-
return;
293+
// Simple reorder doesn't provide decent coverage for compensated
294+
// cases. Shut them down unconditionally by default.
295+
BENCHDNN_PRINT(2,
296+
"[SKIP][%s:%d]: Generic CPU doesn't support compensation "
297+
"cases uniformly.\n",
298+
__FILE__, __LINE__);
299+
res->state = SKIPPED;
300+
res->reason = skip_reason::case_not_supported;
301+
return;
264302
#endif
265-
}
266-
267-
// Destination scale is not supported for runtime dimensions since the
268-
// implementation logic inverts dst scales and requires scratchpad for
269-
// `mask > 0` cases which is impossible to estimate with rt dims.
270-
const auto &dst_scales = prb->attr.scales.get(DNNL_ARG_DST);
271-
if (!dst_scales.is_def() && attr_t::get_default_mask(dst_scales.policy) > 0
272-
&& prb->runtime_dim_mask != 0) {
273-
res->state = SKIPPED;
274-
res->reason = skip_reason::case_not_supported;
275-
return;
276-
}
303+
}
277304

278-
// Compensation is supported through jit reorder only, but jit reorder
279-
// doesn't support different masks for source and destination scales.
280-
const auto &src_scales = prb->attr.scales.get(DNNL_ARG_SRC);
281-
if (!src_scales.is_def() && !dst_scales.is_def()) {
282-
if (attr_t::get_default_mask(src_scales.policy)
283-
!= attr_t::get_default_mask(dst_scales.policy)
284-
&& prb->is_reorder_with_compensation(FLAG_ANY)) {
305+
const auto &dst_scales = prb->attr.scales.get(DNNL_ARG_DST);
306+
if (!dst_scales.is_def()
307+
&& attr_t::get_default_mask(dst_scales.policy) > 0
308+
&& prb->runtime_dim_mask != 0) {
309+
// Destination scale is not supported for runtime dimensions since
310+
// the implementation logic inverts dst scales and requires
311+
// scratchpad for `mask > 0` cases which is impossible to estimate
312+
// with runtime dims.
313+
BENCHDNN_PRINT(2,
314+
"[SKIP][%s:%d]: Destination scale is not supported for "
315+
"runtime dimensions.\n",
316+
__FILE__, __LINE__);
285317
res->state = SKIPPED;
286318
res->reason = skip_reason::case_not_supported;
287319
return;
288320
}
289-
}
290321

291-
if (is_cpu()) {
292-
// Int4 reorder support is limited on CPU.
322+
const auto &src_scales = prb->attr.scales.get(DNNL_ARG_SRC);
323+
if (!src_scales.is_def() && !dst_scales.is_def()) {
324+
if (attr_t::get_default_mask(src_scales.policy)
325+
!= attr_t::get_default_mask(dst_scales.policy)
326+
&& prb->is_reorder_with_compensation(FLAG_ANY)) {
327+
BENCHDNN_PRINT(2,
328+
"[SKIP][%s:%d]: Compensation cases when both scales "
329+
"specified but with different masks isn't supported.\n",
330+
__FILE__, __LINE__);
331+
res->state = SKIPPED;
332+
res->reason = skip_reason::case_not_supported;
333+
return;
334+
}
335+
}
336+
293337
if (sdt == dnnl_s4 || ddt == dnnl_s4 || sdt == dnnl_u4
294338
|| ddt == dnnl_u4) {
339+
BENCHDNN_PRINT(2, "[SKIP][%s:%d]: Int4 support is limited.\n",
340+
__FILE__, __LINE__);
295341
res->state = SKIPPED;
296342
res->reason = skip_reason::case_not_supported;
297343
return;
298344
}
299345

300-
// CPU reorder doesn't support (xf8,xf16)<-->s32 combinations.
301346
const bool s32_src_ok = IMPLICATION(sdt == dnnl_s32,
302347
ddt != dnnl_f8_e5m2 && ddt != dnnl_f8_e4m3 && ddt != dnnl_bf16
303348
&& ddt != dnnl_f16);
304349
const bool s32_dst_ok = IMPLICATION(ddt == dnnl_s32,
305350
sdt != dnnl_f8_e5m2 && sdt != dnnl_f8_e4m3 && sdt != dnnl_bf16
306351
&& sdt != dnnl_f16);
307352
if (!s32_src_ok || !s32_dst_ok) {
353+
BENCHDNN_PRINT(2,
354+
"[SKIP][%s:%d]: Mixed (xf8,xf16)<-->s32 support is "
355+
"limited.\n",
356+
__FILE__, __LINE__);
308357
res->state = SKIPPED;
309358
res->reason = skip_reason::case_not_supported;
310359
return;
311360
}
312361

313-
// CPU f16 reorders only support f16<->f32 combinations
314362
const bool f16_src_ok = IMPLICATION(
315363
sdt == dnnl_f16, ddt == dnnl_f16 || ddt == dnnl_f32);
316364
const bool f16_dst_ok = IMPLICATION(
317365
ddt == dnnl_f16, sdt == dnnl_f16 || sdt == dnnl_f32);
318366
if (!f16_src_ok || !f16_dst_ok) {
367+
BENCHDNN_PRINT(2, "[SKIP][%s:%d]: f16 support is limited.\n",
368+
__FILE__, __LINE__);
319369
res->state = SKIPPED;
320370
res->reason = skip_reason::case_not_supported;
321371
return;
@@ -329,27 +379,32 @@ void skip_unimplemented_prb(const prb_t *prb, res_t *res) {
329379
= IMPLICATION(sdt == dnnl_f8_e5m2 || sdt == dnnl_f8_e4m3,
330380
ddt == dnnl_f16 || ddt == dnnl_f32);
331381
if (!xf8_src_ok || !xf8_dst_ok) {
382+
BENCHDNN_PRINT(2, "[SKIP][%s:%d]: f8 support is limited.\n",
383+
__FILE__, __LINE__);
332384
res->state = SKIPPED;
333385
res->reason = skip_reason::case_not_supported;
334386
return;
335387
}
336388
}
337389

338390
if (is_gpu()) {
339-
// GPU does not support run-time dims.
340-
// Reorders w/ compensation are not supported by design: zp_comp is done
341-
// in kernels directly, but s8s8 instructions are available in HW.
342-
if (prb->runtime_dim_mask != 0
343-
|| prb->is_reorder_with_compensation(FLAG_ANY)) {
391+
if (prb->runtime_dim_mask != 0) {
392+
BENCHDNN_PRINT(2,
393+
"[SKIP][%s:%d]: GPU doesn't support runtime dimensions.\n",
394+
__FILE__, __LINE__);
344395
res->state = SKIPPED;
345396
res->reason = skip_reason::case_not_supported;
346397
return;
347398
}
348399

349-
// GPU doesn't support f8_e5m2/f8_e4m3.
350-
const bool is_xf8 = prb->sdt == dnnl_f8_e5m2 || prb->sdt == dnnl_f8_e4m3
351-
|| prb->ddt == dnnl_f8_e5m2 || prb->ddt == dnnl_f8_e4m3;
352-
if (is_xf8) {
400+
if (prb->is_reorder_with_compensation(FLAG_ANY)) {
401+
// Reorders w/ compensation are not supported by design: zp_comp is
402+
// done in kernels directly, but s8s8 instructions are available in
403+
// HW.
404+
BENCHDNN_PRINT(2,
405+
"[SKIP][%s:%d]: GPU doesn't support cases with "
406+
"compensation.\n",
407+
__FILE__, __LINE__);
353408
res->state = SKIPPED;
354409
res->reason = skip_reason::case_not_supported;
355410
return;
@@ -358,31 +413,40 @@ void skip_unimplemented_prb(const prb_t *prb, res_t *res) {
358413
}
359414

360415
void skip_invalid_prb(const prb_t *prb, res_t *res) {
361-
// No sense in cross engine reorders when one of devices is switched off.
362416
#if DNNL_CPU_RUNTIME == DNNL_RUNTIME_NONE \
363417
|| DNNL_GPU_RUNTIME == DNNL_RUNTIME_NONE
364418
auto cross_engine = prb->cross_engine;
365419
if (cross_engine == CPU2GPU || cross_engine == GPU2CPU) {
420+
BENCHDNN_PRINT(2,
421+
"[INVALID][%s:%d]: Cross-engine case isn't supported when just "
422+
"one runtime is enabled.\n",
423+
__FILE__, __LINE__);
366424
res->state = SKIPPED;
367425
res->reason = skip_reason::invalid_case;
368426
return;
369427
}
370428
#endif
371429

372-
// Zero-points can't be used with sum post-op.
373430
if (!prb->attr.zero_points.is_def(DNNL_ARG_DST)
374431
&& prb->attr.post_ops.find(attr_t::post_ops_t::kind_t::SUM) != -1) {
432+
BENCHDNN_PRINT(2,
433+
"[INVALID][%s:%d]: Zero-points can't be used with sum "
434+
"post-op.\n",
435+
__FILE__, __LINE__);
375436
res->state = SKIPPED;
376437
res->reason = skip_reason::invalid_case;
377438
return;
378439
}
379440

380-
// only integral data types can have zero points
381441
const bool is_src_zp_ok = is_integral_dt(prb->sdt)
382442
|| prb->attr.zero_points.is_def(DNNL_ARG_SRC);
383443
const bool is_dst_zp_ok = is_integral_dt(prb->ddt)
384444
|| prb->attr.zero_points.is_def(DNNL_ARG_DST);
385445
if (!(is_src_zp_ok && is_dst_zp_ok)) {
446+
BENCHDNN_PRINT(2,
447+
"[INVALID][%s:%d]: Non-integral data types don't support "
448+
"zero-points\n",
449+
__FILE__, __LINE__);
386450
res->state = SKIPPED;
387451
res->reason = skip_reason::invalid_case;
388452
return;

0 commit comments

Comments
 (0)