Skip to content

Commit aa14cc0

Browse files
[GPU] Add NMS_Gather ops
1 parent 1a5776e commit aa14cc0

File tree

11 files changed

+427
-2
lines changed

11 files changed

+427
-2
lines changed

src/plugins/intel_gpu/include/intel_gpu/primitives/non_max_suppression.hpp

+25
Original file line numberDiff line numberDiff line change
@@ -156,4 +156,29 @@ struct non_max_suppression : public primitive_base<non_max_suppression> {
156156
ib >> make_data(&rotation, sizeof(rotation));
157157
}
158158
};
159+
160+
struct non_max_suppression_gather : primitive_base<non_max_suppression_gather> {
161+
CLDNN_DECLARE_PRIMITIVE(non_max_suppression_gather)
162+
163+
/// @brief Constructs non_max_suppression_gather primitive.
164+
/// @param id This primitive id.
165+
/// @param inputs Input primitives ids.
166+
non_max_suppression_gather(const primitive_id& id,
167+
const std::vector<input_info>& inputs,
168+
const size_t num_outputs = 1)
169+
: primitive_base(id, inputs, {padding()}, {optional_data_type()}, num_outputs) {}
170+
171+
size_t hash() const override {
172+
size_t seed = primitive::hash();
173+
return seed;
174+
}
175+
176+
bool operator==(const primitive& rhs) const override {
177+
if (!compare_common_params(rhs)) {
178+
return false;
179+
}
180+
181+
return true;
182+
}
183+
};
159184
} // namespace cldnn

src/plugins/intel_gpu/src/graph/impls/cpu/non_max_suppression.cpp

+153
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,159 @@ attach_non_max_suppression_impl::attach_non_max_suppression_impl() {
440440
}
441441

442442
} // namespace detail
443+
444+
namespace {
445+
446+
template <typename T>
447+
std::vector<T> get_nms_gather_input(stream& stream, memory::ptr mem) {
448+
auto dep_mem_layout = mem->get_layout();
449+
auto dep_mem_batch = static_cast<size_t>(dep_mem_layout.batch());
450+
451+
mem_lock<T, mem_lock_type::read> dep_mem_lock(mem, stream);
452+
auto dep_mem_ptr = dep_mem_lock.data();
453+
454+
size_t actual_valid_num = dep_mem_batch;
455+
size_t idx = 0;
456+
for (size_t i = 0; i < dep_mem_batch; i++) {
457+
idx = i * 3;
458+
if (dep_mem_ptr[idx] == -1) {
459+
actual_valid_num = i;
460+
break;
461+
}
462+
}
463+
464+
std::vector<T> result;
465+
for (size_t i = 0; i < actual_valid_num; i++) {
466+
idx = i * 3;
467+
result.push_back(dep_mem_ptr[idx + 0]);
468+
result.push_back(dep_mem_ptr[idx + 1]);
469+
result.push_back(dep_mem_ptr[idx + 2]);
470+
}
471+
472+
return result;
473+
}
474+
475+
template <typename T>
476+
void store_nms_gather_output0(stream& stream, memory::ptr mem, std::vector<T> valid_input) {
477+
auto valid_input_size = valid_input.size() / 3;
478+
479+
mem_lock<T, mem_lock_type::write> lock(mem, stream);
480+
auto ptr = lock.data();
481+
482+
auto output_batch = static_cast<size_t>(mem->get_layout().batch());
483+
for (size_t si = 0; si < std::min(valid_input_size, output_batch); ++si) {
484+
auto offset = si * 3;
485+
// batch_index, class_index, box_index
486+
ptr[offset + 0] = static_cast<T>(valid_input[offset + 0]);
487+
ptr[offset + 1] = static_cast<T>(valid_input[offset + 1]);
488+
ptr[offset + 2] = static_cast<T>(valid_input[offset + 2]);
489+
}
490+
}
491+
492+
template <typename T>
493+
void store_nms_gather_output1(stream& stream, memory::ptr mem, std::vector<T> valid_input) {
494+
auto valid_input_size = valid_input.size() / 3;
495+
496+
mem_lock<T, mem_lock_type::write> lock(mem, stream);
497+
auto ptr = lock.data();
498+
499+
auto output_batch = static_cast<size_t>(mem->get_layout().batch());
500+
for (size_t si = 0; si < std::min(valid_input_size, output_batch); ++si) {
501+
auto offset = si * 3;
502+
// batch_index, class_index, score
503+
ptr[offset + 0] = static_cast<T>(valid_input[offset + 0]);
504+
ptr[offset + 1] = static_cast<T>(valid_input[offset + 1]);
505+
ptr[offset + 2] = static_cast<T>(valid_input[offset + 2]);
506+
}
507+
}
508+
509+
template <typename T>
510+
void store_nms_gather_output2(stream& stream, memory::ptr mem, std::vector<int32_t> valid_input) {
511+
auto valid_input_size = valid_input.size() / 3;
512+
513+
mem_lock<T, mem_lock_type::write> lock(mem, stream);
514+
auto ptr = lock.data();
515+
ptr[0] = static_cast<T>(valid_input_size);
516+
}
517+
518+
void run_nms_gather(non_max_suppression_gather_inst& instance) {
519+
auto& stream = instance.get_network().get_stream();
520+
521+
auto valid_input0 = get_nms_gather_input<ov::element_type_traits<data_types::i32>::value_type>(stream, instance.dep_memory_ptr(0));
522+
store_nms_gather_output0<ov::element_type_traits<data_types::i32>::value_type>(stream, instance.output_memory_ptr(0), valid_input0);
523+
524+
if (instance.outputs_memory_count() >= 2) {
525+
auto data_type = instance.dep_memory_ptr(1)->get_layout().data_type;
526+
527+
if (data_type == cldnn::data_types::f16) {
528+
auto valid_input_f16 = get_nms_gather_input<ov::element_type_traits<data_types::f16>::value_type>(stream, instance.dep_memory_ptr(1));
529+
store_nms_gather_output1<ov::element_type_traits<data_types::f16>::value_type>(stream, instance.output_memory_ptr(1), valid_input_f16);
530+
} else if (data_type == cldnn::data_types::f32) {
531+
auto valid_input_f32 = get_nms_gather_input<ov::element_type_traits<data_types::f32>::value_type>(stream, instance.dep_memory_ptr(1));
532+
store_nms_gather_output1<ov::element_type_traits<data_types::f32>::value_type>(stream, instance.output_memory_ptr(1), valid_input_f32);
533+
} else {
534+
throw std::runtime_error("Non max suppression gather - unsupported second output data type");
535+
}
536+
537+
if (instance.outputs_memory_count() == 3) {
538+
store_nms_gather_output2<ov::element_type_traits<data_types::i32>::value_type>(stream, instance.output_memory_ptr(2), valid_input0);
539+
}
540+
}
541+
}
542+
} // namespace
543+
struct non_max_suppression_gather_impl : typed_primitive_impl<non_max_suppression_gather> {
544+
using parent = typed_primitive_impl<non_max_suppression_gather>;
545+
546+
DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::cpu::non_max_suppression_gather_impl)
547+
548+
std::unique_ptr<primitive_impl> clone() const override {
549+
return make_unique<non_max_suppression_gather_impl>(*this);
550+
}
551+
552+
non_max_suppression_gather_impl() : parent("non_max_suppression_gather_impl") {}
553+
554+
event::ptr execute_impl(const std::vector<event::ptr>& events, typed_primitive_inst<non_max_suppression_gather>& instance) override {
555+
auto& stream = instance.get_network().get_stream();
556+
557+
const bool pass_through_events = (stream.get_queue_type() == QueueTypes::out_of_order) && instance.get_node().is_in_shape_of_subgraph();
558+
559+
if (!pass_through_events) {
560+
for (auto e : events) {
561+
e->wait();
562+
}
563+
}
564+
565+
run_nms_gather(instance);
566+
567+
if (pass_through_events) {
568+
if (events.size() > 1) {
569+
return stream.group_events(events);
570+
} else if (events.size() == 1) {
571+
return events[0];
572+
}
573+
}
574+
575+
return stream.create_user_event(true);
576+
}
577+
578+
static std::unique_ptr<primitive_impl> create(const non_max_suppression_gather_node&, const kernel_impl_params&) {
579+
return make_unique<non_max_suppression_gather_impl>();
580+
}
581+
void init_kernels(const kernels_cache&, const kernel_impl_params&) override {}
582+
};
583+
584+
namespace detail {
585+
586+
attach_non_max_suppression_gather_impl::attach_non_max_suppression_gather_impl() {
587+
implementation_map<non_max_suppression_gather>::add(impl_types::cpu, non_max_suppression_gather_impl::create, {
588+
std::make_tuple(data_types::i32, format::bfyx),
589+
std::make_tuple(data_types::f16, format::bfyx),
590+
std::make_tuple(data_types::f32, format::bfyx),
591+
});
592+
}
593+
594+
} // namespace detail
595+
443596
} // namespace cpu
444597
} // namespace cldnn
445598

src/plugins/intel_gpu/src/graph/impls/cpu/register.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ void register_implementations() {
1616
REGISTER_CPU(proposal);
1717
REGISTER_CPU(read_value);
1818
REGISTER_CPU(non_max_suppression);
19+
REGISTER_CPU(non_max_suppression_gather);
1920
REGISTER_CPU(shape_of);
2021
REGISTER_CPU(concatenation);
2122
REGISTER_CPU(gather);

src/plugins/intel_gpu/src/graph/impls/cpu/register.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ REGISTER_CPU(assign);
3939
REGISTER_CPU(proposal);
4040
REGISTER_CPU(read_value);
4141
REGISTER_CPU(non_max_suppression);
42+
REGISTER_CPU(non_max_suppression_gather);
4243
REGISTER_CPU(detection_output);
4344
REGISTER_CPU(shape_of);
4445
REGISTER_CPU(concatenation);

src/plugins/intel_gpu/src/graph/include/non_max_suppression_inst.h

+28
Original file line numberDiff line numberDiff line change
@@ -186,4 +186,32 @@ class typed_primitive_inst<non_max_suppression> : public typed_primitive_inst_ba
186186

187187
using non_max_suppression_inst = typed_primitive_inst<non_max_suppression>;
188188

189+
template <>
190+
struct typed_program_node<non_max_suppression_gather> : typed_program_node_base<non_max_suppression_gather> {
191+
using parent = typed_program_node_base<non_max_suppression_gather>;
192+
using parent::parent;
193+
194+
bool generates_dynamic_output() const override {
195+
return true;
196+
}
197+
198+
std::vector<size_t> get_shape_infer_dependencies() const override { return {0, 1, 2}; }
199+
};
200+
201+
using non_max_suppression_gather_node = typed_program_node<non_max_suppression_gather>;
202+
203+
template <>
204+
class typed_primitive_inst<non_max_suppression_gather> : public typed_primitive_inst_base<non_max_suppression_gather> {
205+
public:
206+
using parent = typed_primitive_inst_base<non_max_suppression_gather>;
207+
using parent::parent;
208+
209+
static layout calc_output_layout(const non_max_suppression_gather_node& node, const kernel_impl_params& impl_param);
210+
template <typename ShapeType>
211+
static std::vector<layout> calc_output_layouts(const non_max_suppression_gather_node& node, const kernel_impl_params& impl_param);
212+
static std::string to_string(const non_max_suppression_gather_node& node);
213+
};
214+
215+
using non_max_suppression_gather_inst = typed_primitive_inst<non_max_suppression_gather>;
216+
189217
} // namespace cldnn

src/plugins/intel_gpu/src/graph/layout_optimizer.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -1575,6 +1575,8 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format
15751575
}
15761576
}
15771577
}
1578+
} else if (node.is_type<non_max_suppression_gather>()) {
1579+
return impl_types::cpu;
15781580
} else if (node.is_type<reorder>()) {
15791581
if (!_optimization_attributes.use_onednn_impls)
15801582
return impl_types::ocl;

src/plugins/intel_gpu/src/graph/non_max_suppression.cpp

+71
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,10 @@
1111
#include "nms_shape_inference.hpp"
1212

1313
namespace cldnn {
14+
15+
// -----------------------------------------------
16+
// non_max_suppression
17+
// -----------------------------------------------
1418
GPU_DEFINE_PRIMITIVE_TYPE_ID(non_max_suppression)
1519

1620
layout non_max_suppression_inst::calc_output_layout(non_max_suppression_node const& node, kernel_impl_params const& impl_param) {
@@ -81,4 +85,71 @@ std::string non_max_suppression_inst::to_string(non_max_suppression_node const&
8185
return description.str();
8286
}
8387

88+
// -----------------------------------------------
89+
// non_max_suppression_gather
90+
// -----------------------------------------------
91+
GPU_DEFINE_PRIMITIVE_TYPE_ID(non_max_suppression_gather)
92+
93+
layout non_max_suppression_gather_inst::calc_output_layout(non_max_suppression_gather_node const& node, kernel_impl_params const& impl_param) {
94+
OPENVINO_THROW("Only calc_output_layouts should be used!");
95+
}
96+
97+
template<typename ShapeType>
98+
std::vector<layout> non_max_suppression_gather_inst::calc_output_layouts(non_max_suppression_gather_node const& /*node*/,
99+
const kernel_impl_params& impl_param) {
100+
std::vector<layout> layouts;
101+
102+
auto desc = impl_param.typed_desc<non_max_suppression_gather>();
103+
std::vector<ShapeType> output_shapes = { ShapeType{}, ShapeType{}, ShapeType{} };
104+
105+
auto& memory_deps = impl_param.memory_deps;
106+
if (memory_deps.count(0)) {
107+
auto actual_output = memory_deps.at(0);
108+
cldnn::mem_lock<int32_t, mem_lock_type::read> actual_output_lock(actual_output, impl_param.get_stream());
109+
110+
auto output_ps = actual_output->get_layout().get_partial_shape();
111+
auto b = output_ps[0].get_length();
112+
auto f = output_ps[1].get_length();
113+
114+
// find valid data size
115+
auto output_data = actual_output_lock.data();
116+
int64_t actual_valid_num = b;
117+
for (int64_t i = 0; i < b ; i += 1) {
118+
if (output_data[i * f] == -1) {
119+
actual_valid_num = i;
120+
break;
121+
}
122+
}
123+
124+
output_shapes[0] = output_shapes[1] = ShapeType{actual_valid_num, f};
125+
output_shapes[2] = ShapeType{1};
126+
} else {
127+
output_shapes[0] = output_shapes[1] = ShapeType{ov::Dimension::dynamic(), 3};
128+
output_shapes[2] = ShapeType{1};
129+
}
130+
131+
for (size_t i = 0; i < desc->num_outputs; ++i) {
132+
layouts.push_back({output_shapes[i],
133+
impl_param.get_input_layout(i).data_type,
134+
format::get_default_format(output_shapes[i].size())});
135+
}
136+
return layouts;
137+
}
138+
139+
template std::vector<layout> non_max_suppression_gather_inst::calc_output_layouts<ov::PartialShape>(non_max_suppression_gather_node const& node,
140+
const kernel_impl_params& impl_param);
141+
142+
std::string non_max_suppression_gather_inst::to_string(non_max_suppression_gather_node const& node) {
143+
auto desc = node.get_primitive();
144+
auto node_info = node.desc_to_json();
145+
146+
json_composite info;
147+
148+
node_info->add("non max suppression gather info", info);
149+
150+
std::stringstream description;
151+
node_info->dump(description);
152+
return description.str();
153+
}
154+
84155
} // namespace cldnn

src/plugins/intel_gpu/src/graph/program.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -1494,6 +1494,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
14941494
prim.type() != cldnn::broadcast::type_id() &&
14951495
prim.type() != cldnn::ctc_loss::type_id() &&
14961496
prim.type() != cldnn::non_max_suppression::type_id() &&
1497+
prim.type() != cldnn::non_max_suppression_gather::type_id() &&
14971498
prim.type() != cldnn::roi_align::type_id() &&
14981499
prim.type() != cldnn::matrix_nms::type_id() &&
14991500
prim.type() != cldnn::adaptive_pooling::type_id() &&
@@ -1546,6 +1547,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
15461547
prim.type() != cldnn::quantize::type_id() &&
15471548
prim.type() != cldnn::ctc_loss::type_id() &&
15481549
prim.type() != cldnn::non_max_suppression::type_id() &&
1550+
prim.type() != cldnn::non_max_suppression_gather::type_id() &&
15491551
prim.type() != cldnn::roi_align::type_id() &&
15501552
prim.type() != cldnn::matrix_nms::type_id() &&
15511553
prim.type() != cldnn::adaptive_pooling::type_id() &&

src/plugins/intel_gpu/src/kernel_selector/common_types.h

+1
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ enum class KernelType {
7777
EXTRACT_IMAGE_PATCHES,
7878
LOOP,
7979
NON_MAX_SUPPRESSION,
80+
NON_MAX_SUPPRESSION_GATHER,
8081
DETECTION_OUTPUT,
8182
EXPERIMENTAL_DETECTRON_DETECTION_OUTPUT,
8283
EXPERIMENTAL_DETECTRON_GENERATE_PROPOSALS_SINGLE_IMAGE,

src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp

+20-2
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh
5454
auto boxesShape = op->get_input_partial_shape(0);
5555
size_t num_outputs = op->get_output_size();
5656
if (p.use_new_shape_infer()) {
57-
auto nonMaxSuppressionLayerName = layer_type_name_ID(op);
57+
auto NMSLayerName = layer_type_name_ID(op);
5858
auto prim = cldnn::non_max_suppression(
59-
nonMaxSuppressionLayerName,
59+
NMSLayerName,
6060
reordered_inputs[0],
6161
reordered_inputs[1],
6262
0,
@@ -78,6 +78,24 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh
7878
}
7979

8080
p.add_primitive(*op, prim);
81+
82+
auto NMSGatherLayerName = layer_type_name_ID(op) + "_NMSGather";
83+
std::vector<cldnn::input_info> nms_gather_inputs;
84+
const std::vector<cldnn::input_info> nms_gather_input_list = {
85+
cldnn::input_info(NMSLayerName, 0),
86+
cldnn::input_info(NMSLayerName, 1),
87+
cldnn::input_info(NMSLayerName, 2)
88+
};
89+
for (size_t i = 0; i < std::max(num_outputs, nms_gather_input_list.size()); i++) {
90+
nms_gather_inputs.push_back(nms_gather_input_list[i]);
91+
}
92+
93+
auto nms_gather_prim = cldnn::non_max_suppression_gather(
94+
NMSGatherLayerName,
95+
nms_gather_inputs,
96+
num_outputs);
97+
98+
p.add_primitive(*op, nms_gather_prim);
8199
} else {
82100
auto outputIndices = op->get_output_partial_shape(0)[0].get_length();
83101

0 commit comments

Comments
 (0)