Skip to content

Commit 7c71080

Browse files
src: cpu: aarch64: Fix upstream CI failing testfor matmul and conv
1 parent 7e450f8 commit 7c71080

File tree

2 files changed

+14
-9
lines changed

2 files changed

+14
-9
lines changed

src/cpu/aarch64/acl_gemm_convolution.cpp

+9-6
Original file line numberDiff line numberDiff line change
@@ -70,23 +70,26 @@ status_t acl_gemm_convolution_fwd_t<src_t, wei_t, dst_t, bia_t>::pd_t::init(
7070
CHECK(acl_convolution_utils::acl_init_conf(
7171
acp_, src_md_, weights_md_, dst_md_, bias_md_, *desc(), *attr()));
7272

73+
Op conv;
74+
75+
auto scratchpad = scratchpad_registry().registrar();
76+
auto status = init_scratchpad(conv, scratchpad, gemm_conv_keys, engine,
77+
post_ops, attr_.post_ops_, acp_.act_info, acp_.use_dst_acc_for_sum,
78+
dst_md_, bias_md_, acp_.is_quantized);
79+
if (status != status::success) return status::unimplemented;
80+
7381
// Validate convolution manually to check for return status
7482
ACL_CHECK_VALID(Op::validate(&acp_.src_tensor_info, &acp_.wei_tensor_info,
7583
acp_.with_bias ? &acp_.bia_tensor_info : nullptr,
7684
&acp_.dst_tensor_info, acp_.padstride_info, acp_.weights_info,
7785
acp_.dilation_info, acp_.act_info, acp_.fast_math));
7886

79-
Op conv;
8087
conv.configure(&acp_.src_tensor_info, &acp_.wei_tensor_info,
8188
acp_.with_bias ? &acp_.bia_tensor_info : nullptr,
8289
&acp_.dst_tensor_info, acp_.padstride_info, acp_.weights_info,
8390
acp_.dilation_info, acp_.act_info, acp_.fast_math);
8491

85-
auto scratchpad = scratchpad_registry().registrar();
86-
const auto mem_req = conv.workspace();
87-
return init_scratchpad(conv, scratchpad, gemm_conv_keys, engine, post_ops,
88-
attr_.post_ops_, acp_.act_info, acp_.use_dst_acc_for_sum, dst_md_,
89-
bias_md_, acp_.is_quantized);
92+
return status::success;
9093
}
9194

9295
template <data_type_t src_t, data_type_t wei_t, data_type_t dst_t,

src/cpu/aarch64/matmul/acl_lowp_matmul_sq.cpp

+5-3
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,10 @@ status_t acl_lowp_matmul_sq_t::pd_t::init(engine_t *engine) {
7575
&& wei_d.data_type() == s8
7676
&& src_d.data_type() == s8
7777
? dst_d.data_type() == s8
78-
: dst_d.data_type() == u8
79-
&& utils::one_of(bia_d.data_type(), f32, undef),
78+
: dst_d.data_type() == u8,
79+
VERBOSE_UNSUPPORTED_DT_CFG);
80+
81+
VDISPATCH_MATMUL(utils::one_of(bia_d.data_type(), f32, undef),
8082
VERBOSE_UNSUPPORTED_DT_CFG);
8183

8284
VDISPATCH_MATMUL(src_d.matches_tag(format_tag::ab)
@@ -223,7 +225,7 @@ status_t acl_lowp_matmul_sq_t::execute(const exec_ctx_t &ctx) const {
223225
acl_obj.wei_tensor.info()->set_quantization_info(
224226
arm_compute::QuantizationInfo(*wei_scale, -wei_zero_point, true));
225227

226-
// for efficiency reasons, oneDNN saves the inverse of the destination
228+
// for efficiency reasons, OneDNN saves the inverse of the destination
227229
acl_obj.dst_tensor.info()->set_quantization_info(
228230
arm_compute::QuantizationInfo(
229231
1.0 / (*dst_scale), dst_zero_point, true));

0 commit comments

Comments
 (0)