Skip to content

Commit 89155b5

Browse files
committed
xe: jit: address clang-tidy complaints
1 parent 55825ba commit 89155b5

21 files changed

+54
-46
lines changed

src/gpu/intel/jit/conv/pipeline.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -622,9 +622,9 @@ struct compute_params_t {
622622
: slm_bufs(slm_bufs)
623623
, gmem_bufs(gmem_bufs)
624624
, slm_buf_size(slm_buf_size)
625-
, prefetch_bufs(prefetch_bufs) {
626-
use_slm = (slm_buf_size > 0);
627-
use_prefetch = (prefetch_bufs > 0);
625+
, prefetch_bufs(prefetch_bufs)
626+
, use_slm(slm_buf_size > 0)
627+
, use_prefetch(prefetch_bufs > 0) {
628628
gpu_assert(!use_slm || !use_prefetch)
629629
<< "Can't have both SLM buffering and prefetch enabled.";
630630
if (use_slm) {

src/gpu/intel/jit/conv/plan.cpp

+10-11
Original file line numberDiff line numberDiff line change
@@ -1263,17 +1263,16 @@ struct fma_layout_hint_t {
12631263
};
12641264

12651265
struct fma_context_t {
1266-
fma_context_t(const conv_config_t &cfg) {
1267-
hw = cfg.hw();
1268-
simd = cfg.simd();
1269-
vec_size = cfg.vec_size();
1270-
fma = cfg.fma_kind();
1271-
a_type = type_t(cfg.prb().a_data_type);
1272-
b_type = type_t(cfg.prb().b_data_type);
1273-
acc_type = get_accumulation_type(cfg, a_type, b_type);
1274-
is_src1_broadcast = !cfg.prb().is_dw;
1275-
ab_swap_transpose_ = cfg.prb().ab_swap_transpose;
1276-
}
1266+
fma_context_t(const conv_config_t &cfg)
1267+
: hw(cfg.hw())
1268+
, simd(cfg.simd())
1269+
, vec_size(cfg.vec_size())
1270+
, fma(cfg.fma_kind())
1271+
, a_type(cfg.prb().a_data_type)
1272+
, b_type(cfg.prb().b_data_type)
1273+
, acc_type(get_accumulation_type(cfg, a_type, b_type))
1274+
, is_src1_broadcast(!cfg.prb().is_dw)
1275+
, ab_swap_transpose_(cfg.prb().ab_swap_transpose) {}
12771276

12781277
fma_layout_hint_t &layout_hint(abc_kind_t abc) {
12791278
return (abc == abc_kind_t::a) ? a_layout_hint : b_layout_hint;

src/gpu/intel/jit/conv/tiler.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,8 @@ class conv_blocking_checker_t : public blocking_checker_t {
597597

598598
private:
599599
struct context_t {
600-
context_t(const blocking_t &blk, const conv_config_t &cfg) : blk(blk) {
600+
context_t(const blocking_t &blk, const conv_config_t &cfg)
601+
: blk(blk), dpas_2x_depth(get_dpas_2x_depth(blk, cfg)) {
601602
auto &prb = cfg.prb();
602603
auto gemm_iter = to_gemm(blk.iter(), prb);
603604
auto gemm_loop = to_gemm(blk.loop(), prb);
@@ -611,7 +612,6 @@ class conv_blocking_checker_t : public blocking_checker_t {
611612
m_tg = gemm_tg.get(pvars::m, 1);
612613
n_tg = gemm_tg.get(pvars::n, 1);
613614
k_tg = gemm_tg.get(pvars::k, 1);
614-
dpas_2x_depth = get_dpas_2x_depth(blk, cfg);
615615
}
616616

617617
bool get_dpas_2x_depth(

src/gpu/intel/jit/ir/core.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -869,6 +869,7 @@ class ir_mutator_t {
869869
template <typename T>
870870
std::vector<T> mutate(const std::vector<T> &v) {
871871
std::vector<T> new_v;
872+
new_v.reserve(v.size());
872873
for (auto &e : v)
873874
new_v.push_back(mutate(e));
874875
return new_v;

src/gpu/intel/jit/ir/epilogue.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -1030,6 +1030,7 @@ class epilogue_builder_t {
10301030

10311031
// Create sub-tensors for post-ops.
10321032
std::vector<post_op_tensor_t> sub_po_tensors;
1033+
sub_po_tensors.reserve(post_op_tensors_.size());
10331034
for (auto &t : post_op_tensors_)
10341035
sub_po_tensors.push_back(t.create_sub_tensor(tile));
10351036

src/gpu/intel/jit/ir/ir.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ class alloc_injector_t : public ir_mutator_t {
387387
buf_total_refs_ = buf_cur_refs_;
388388
for (auto &kv : buf_cur_refs_)
389389
kv.second = 0;
390-
in_ctor_ = false;
390+
in_ctor_ = false; // NOLINT(cppcoreguidelines-prefer-member-initializer)
391391
}
392392

393393
#define HANDLE_IR_OBJECT(type) \

src/gpu/intel/jit/ir/message_patterns.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@ struct uniform_send_idiom_t final {
500500
ret.begin(), ret.end(), [&](const hint_t &a, const hint_t &b) {
501501
return a.size() > b.size();
502502
});
503-
if (ret.size() && filtered_ret.size()
503+
if (!ret.empty() && !filtered_ret.empty()
504504
&& ret[0].size() > filtered_ret[0].size())
505505
gpu_warning() << "Optimal send hint disabled: " << ret[0];
506506

src/gpu/intel/jit/ir/send_plan.cpp

+6-4
Original file line numberDiff line numberDiff line change
@@ -348,9 +348,11 @@ class tdim_info_t {
348348
tdim_info_t() = default;
349349
tdim_info_t(
350350
int tidx, const tdim_t &tdim, const view_t &view, int64_t block = 1)
351-
: tidx_(tidx), block_(block), dim_(&tdim) {
352-
base_mod_ = to_base(tdim, view.vvars());
353-
size_ = view.tlayout().dim(tidx);
351+
: tidx_(tidx)
352+
, size_(view.tlayout().dim(tidx))
353+
, base_mod_(to_base(tdim, view.vvars()))
354+
, block_(block)
355+
, dim_(&tdim) {
354356
for (dim_idx_t i = 0; i < tdim.nvargs(); i++) {
355357
vidxs_[i] = tdim.vidx(i);
356358
vstrides_[i] = tdim.vstride(i);
@@ -1810,10 +1812,10 @@ class view_iterator_t {
18101812
public:
18111813
view_iterator_t(const view_info_t &info)
18121814
: info_(info)
1815+
, inner_elems_(1)
18131816
, block_off_(nblocks())
18141817
, block_dims_(nblocks())
18151818
, off_(info.vlayout().ndims()) {
1816-
inner_elems_ = 1;
18171819
for (int i = 0; i < info_.inner_idx(); i++) {
18181820
inner_elems_ *= (int)blocks()[i].block;
18191821
}

src/gpu/intel/jit/pass/hoist.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ class hoist_exprs_mutator_t : public ir_mutator_t {
8383
if (!obj.func.is<send_t>()) return ir_mutator_t::_mutate(obj);
8484

8585
std::vector<expr_t> new_args;
86+
new_args.reserve(obj.args.size());
8687
for (auto &e : obj.args) {
8788
new_args.push_back(hoist_expr(e));
8889
}
@@ -379,7 +380,7 @@ class hoist_send_masks_mutator_t : public ir_mutator_t {
379380
return loop_deps_.count(v) != 0;
380381
}
381382

382-
bool can_hoist(const expr_t &expr) {
383+
bool can_hoist(const expr_t &expr) const {
383384
return expr.type().size() <= max_hoist_size_ - current_hoist_size_;
384385
}
385386

src/gpu/intel/jit/pass/send.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ namespace jit {
2727

2828
class buffer_offset_lifter_t : public ir_mutator_t {
2929
public:
30-
object_t _mutate(const func_call_t &obj) {
30+
object_t _mutate(const func_call_t &obj) override {
3131
if (!obj.func.is<send_t>()) return ir_mutator_t::_mutate(obj);
3232

3333
auto &mem_buf = send_t::arg_mem_buf(obj);
@@ -56,7 +56,7 @@ class send_injector_t : public ir_mutator_t {
5656
public:
5757
send_injector_t(ir_context_t &ir_ctx) : ir_ctx_(ir_ctx) {}
5858

59-
object_t _mutate(const func_call_t &obj) {
59+
object_t _mutate(const func_call_t &obj) override {
6060
auto *send = obj.func.as_ptr<send_t>();
6161
if (!send) return ir_mutator_t::_mutate(obj);
6262

@@ -176,7 +176,7 @@ class send_2d_header_store_lifter_t : public ir_mutator_t {
176176
off, send_t::header_2d_off_x(), send_t::header_2d_off_y())
177177
&& !is_const(obj.value))
178178
return obj;
179-
stores_[obj.buf].push_back(obj);
179+
stores_[obj.buf].emplace_back(obj);
180180
return stmt_t();
181181
}
182182

src/gpu/intel/jit/pass/simplify.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -1565,6 +1565,7 @@ expr_t simplify_with_nary(const expr_t &_e, const constraint_set_t &cset) {
15651565
return e;
15661566
}
15671567

1568+
// NOLINTNEXTLINE(readability-identifier-naming)
15681569
class _64_bit_add_optimizer_t : public nary_op_mutator_t {
15691570
public:
15701571
object_t _mutate(const nary_op_t &obj) override {
@@ -1770,7 +1771,7 @@ struct op_traits_t {};
17701771
static auto compute(T a, T b) -> decltype(a op b) { \
17711772
return a op b; \
17721773
} \
1773-
template <op_kind_t dummy_op = name, \
1774+
template <op_kind_t dummy_op = (name), \
17741775
typename = typename std::enable_if<dummy_op == op_kind_t::_and \
17751776
|| dummy_op == op_kind_t::_or>::type> \
17761777
static bool compute(bool a, bool b) { \

src/gpu/intel/jit/v2/conv/bridge.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ inline jit::layout_t to_conv_layout(const layout_tag_t &_tag,
105105
inline jit::layout_t to_conv_layout(
106106
const layout_tag_t &_tag, const pvar_tile_t &shape) {
107107
int ndims = _tag.desc().ndims();
108-
auto tag = _tag.raw_tag();
108+
const auto &tag = _tag.raw_tag();
109109
std::vector<dim_t> dims(ndims);
110110
for (int i = 0; i < ndims; i++) {
111111
auto d = _tag.desc().prb_dim(i);

src/gpu/intel/jit/v2/conv/debug.hpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@ class debug_t {
4848
*desc_ptr_ = desc;
4949
}
5050

51-
kernel_desc_setter_t(kernel_desc_setter_t &&other) {
52-
desc_ptr_ = other.desc_ptr_;
53-
other.desc_ptr_ = nullptr;
51+
kernel_desc_setter_t(kernel_desc_setter_t &&other)
52+
: desc_ptr_(nullptr) {
53+
std::swap(desc_ptr_, other.desc_ptr_);
5454
}
5555
kernel_desc_setter_t &operator=(kernel_desc_setter_t &&other) {
5656
if (&other != this) std::swap(desc_ptr_, other.desc_ptr_);

src/gpu/intel/jit/v2/conv/kernel_desc.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ static auto specialization_mode_names = nstl::to_array({
7878
GPU_DEFINE_PARSE_ENUM(specialization_mode_t, specialization_mode_names)
7979

8080
struct specialization_t {
81-
specialization_mode_t mode;
81+
specialization_mode_t mode = specialization_mode_t::none;
8282
// Dimension values to specialize (e.g. kw1).
8383
pvar_tile_t dim_values;
8484
// Dimension modulus to specialize (e.g. oc@64)

src/gpu/intel/jit/v2/conv/plan_registry.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class plan_registry_t {
5555
void stringify(std::ostream &out) const;
5656
void parse(std::istream &out);
5757

58-
public:
58+
private:
5959
std::vector<entry_t> entries_;
6060
};
6161

src/gpu/intel/jit/v2/conv/planner/bench.hpp

+10-9
Original file line numberDiff line numberDiff line change
@@ -71,15 +71,16 @@ struct bench_input_params_t {
7171
bench_input_params_t() = default;
7272
bench_input_params_t(const kernel_desc_t &kernel_desc, const hw_t &hw,
7373
int nprbs = default_nprbs)
74-
: hw(hw), nprbs(nprbs) {
75-
prop = kernel_desc.prop;
76-
src_tag = kernel_desc.src_tag;
77-
wei_tag = kernel_desc.wei_tag;
78-
dst_tag = kernel_desc.dst_tag;
79-
reqs = kernel_desc.reqs();
80-
is_dw = kernel_desc.is_dw;
81-
bias_type = kernel_desc.bias_type;
82-
tile = kernel_desc.iter_tile;
74+
: hw(hw)
75+
, prop(kernel_desc.prop)
76+
, src_tag(kernel_desc.src_tag)
77+
, wei_tag(kernel_desc.wei_tag)
78+
, dst_tag(kernel_desc.dst_tag)
79+
, reqs(kernel_desc.reqs())
80+
, is_dw(kernel_desc.is_dw)
81+
, bias_type(kernel_desc.bias_type)
82+
, tile(kernel_desc.iter_tile)
83+
, nprbs(nprbs) {
8384
for (auto &d : kernel_desc.thread_group_tile) {
8485
tile[d] = tile.get(d, 1) * kernel_desc.thread_group_tile[d];
8586
}

src/gpu/intel/jit/v2/conv/planner/search.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -447,6 +447,7 @@ class kernel_search_manager_t {
447447
}
448448
}
449449
std::vector<search_kernel_desc_group_t> ret;
450+
ret.reserve(desc_groups.size());
450451
for (auto &kv : desc_groups) {
451452
ret.push_back(kv.second);
452453
}

src/gpu/intel/jit/v2/conv/problem.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ class problem_t {
104104
layout_tag_t dst_tag_;
105105
type_t bias_type_;
106106
pvar_tile_t shape_;
107-
std::array<int, 3> dhw_map_;
107+
std::array<int, 3> dhw_map_ = {0};
108108
bool with_groups_ = false;
109109
bool with_scales_ = false;
110110
bool with_post_ops_ = false;

src/gpu/intel/jit/v2/conv/tensor_utils.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ std::string blocked_to_str_tag(const memory_desc_t &md) {
188188
for (int i = blk.inner_nblks - 1; i >= 0; i--) {
189189
dim_idx_t idx = into<dim_idx_t>(blk.inner_idxs[i]);
190190
dim_t block = blk.inner_blks[i];
191-
parts.push_back(std::string(1, dim_idx::as_tag(idx)));
191+
parts.emplace_back(1, dim_idx::as_tag(idx));
192192
parts.push_back(std::to_string(block));
193193
full_inner_blks[idx] *= block;
194194
stride *= block;
@@ -211,7 +211,7 @@ std::string blocked_to_str_tag(const memory_desc_t &md) {
211211
// Size-one blocks have to be added first.
212212
if (min_dim == 1 && rem_dims[j] != min_dim) continue;
213213
bool is_blocked = (full_inner_blks[j] != 1);
214-
parts.push_back(std::string(1, dim_idx::as_tag(j, is_blocked)));
214+
parts.emplace_back(1, dim_idx::as_tag(j, is_blocked));
215215
stride *= rem_dims[j];
216216
seen[j] = true;
217217
found = true;

src/gpu/intel/ocl/gemm/gemm_with_post_ops.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ struct gemm_with_post_ops_t : public gpu_gemm_t {
4848
bool use_reorder = false;
4949
compute::dispatch_t dispatch_;
5050
attr_info_t attr_info_;
51-
bool subbyte_pack_;
51+
bool subbyte_pack_ = false;
5252
};
5353

5454
status_t init(impl::engine_t *engine) override {

src/gpu/intel/ocl/reduction/combined_reduction.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ reduction_phase_conf_t::reduction_phase_conf_t(
8585
gpu_assert(reduction_block.block != 0) << "Reducing over 0 elements";
8686
if (outer_block.block == 0 || inner_block.block == 0) {
8787
nd_range = compute::nd_range_t({0}, {into<size_t>(subgroup_size)});
88+
with_block_reads = false;
8889
return;
8990
}
9091
with_block_reads = can_use_block_reads();

0 commit comments

Comments
 (0)