Skip to content

Commit abc99f1

Browse files
[GPU] Add NMS_Gather ops
1 parent ea09d6b commit abc99f1

File tree

11 files changed

+389
-2
lines changed

11 files changed

+389
-2
lines changed

src/plugins/intel_gpu/include/intel_gpu/primitives/non_max_suppression.hpp

+25
Original file line numberDiff line numberDiff line change
@@ -156,4 +156,29 @@ struct non_max_suppression : public primitive_base<non_max_suppression> {
156156
ib >> make_data(&rotation, sizeof(rotation));
157157
}
158158
};
159+
160+
struct non_max_suppression_gather : primitive_base<non_max_suppression_gather> {
161+
CLDNN_DECLARE_PRIMITIVE(non_max_suppression_gather)
162+
163+
/// @brief Constructs non_max_suppression_gather primitive.
164+
/// @param id This primitive id.
165+
/// @param inputs Input primitives ids.
166+
non_max_suppression_gather(const primitive_id& id,
167+
const std::vector<input_info>& inputs,
168+
const size_t num_outputs = 1)
169+
: primitive_base(id, inputs, {padding()}, {optional_data_type()}, num_outputs) {}
170+
171+
size_t hash() const override {
172+
size_t seed = primitive::hash();
173+
return seed;
174+
}
175+
176+
bool operator==(const primitive& rhs) const override {
177+
if (!compare_common_params(rhs)) {
178+
return false;
179+
}
180+
181+
return true;
182+
}
183+
};
159184
} // namespace cldnn

src/plugins/intel_gpu/src/graph/impls/cpu/non_max_suppression.cpp

+115
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,121 @@ attach_non_max_suppression_impl::attach_non_max_suppression_impl() {
440440
}
441441

442442
} // namespace detail
443+
444+
namespace {
445+
446+
template <typename T>
447+
size_t get_nms_gather_valid_size(stream& stream, memory::ptr mem) {
448+
auto dep_mem_layout = mem->get_layout();
449+
auto dep_mem_batch = static_cast<size_t>(dep_mem_layout.batch());
450+
451+
mem_lock<T, mem_lock_type::read> dep_mem_lock(mem, stream);
452+
auto dep_mem_ptr = dep_mem_lock.data();
453+
454+
size_t actual_valid_num = dep_mem_batch;
455+
size_t idx = 0;
456+
for (size_t i = 0; i < dep_mem_batch; i++) {
457+
idx = i * 3;
458+
if (dep_mem_ptr[idx] == -1) {
459+
actual_valid_num = i;
460+
break;
461+
}
462+
}
463+
464+
return actual_valid_num;
465+
}
466+
467+
template <typename T>
468+
void store_nms_gather_output(non_max_suppression_gather_inst& instance, size_t idx, size_t valid_size) {
469+
auto input_mem = instance.dep_memory_ptr(idx);
470+
layout dep_layout = input_mem->get_layout();
471+
auto output_ps = dep_layout.get_partial_shape();
472+
473+
output_ps[0] = valid_size; // update valid batch size
474+
auto output_layout = layout(output_ps, dep_layout.data_type, dep_layout.format);
475+
auto new_output_mem = instance.get_network().get_engine().reinterpret_buffer(*input_mem, output_layout);
476+
477+
instance.set_output_memory(new_output_mem, true, idx);
478+
}
479+
480+
void run_nms_gather(non_max_suppression_gather_inst& instance) {
481+
auto& stream = instance.get_network().get_stream();
482+
483+
auto valid_input_batch = get_nms_gather_valid_size<ov::element_type_traits<data_types::i32>::value_type>(stream, instance.dep_memory_ptr(0));
484+
store_nms_gather_output<ov::element_type_traits<data_types::i32>::value_type>(instance, 0, valid_input_batch);
485+
486+
if (instance.outputs_memory_count() >= 2) {
487+
auto data_type = instance.dep_memory_ptr(1)->get_layout().data_type;
488+
489+
if (data_type == cldnn::data_types::f16) {
490+
store_nms_gather_output<ov::element_type_traits<data_types::f16>::value_type>(instance, 1, valid_input_batch);
491+
} else if (data_type == cldnn::data_types::f32) {
492+
store_nms_gather_output<ov::element_type_traits<data_types::f32>::value_type>(instance, 1, valid_input_batch);
493+
} else {
494+
throw std::runtime_error("Non max suppression gather - unsupported second output data type");
495+
}
496+
497+
if (instance.outputs_memory_count() == 3) {
498+
mem_lock<ov::element_type_traits<data_types::i32>::value_type, mem_lock_type::write> lock(instance.output_memory_ptr(2), stream);
499+
auto ptr = lock.data();
500+
ptr[0] = static_cast<ov::element_type_traits<data_types::i32>::value_type>(valid_input_batch);
501+
}
502+
}
503+
}
504+
} // namespace
505+
struct non_max_suppression_gather_impl : typed_primitive_impl<non_max_suppression_gather> {
506+
using parent = typed_primitive_impl<non_max_suppression_gather>;
507+
508+
DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::cpu::non_max_suppression_gather_impl)
509+
510+
std::unique_ptr<primitive_impl> clone() const override {
511+
return make_unique<non_max_suppression_gather_impl>(*this);
512+
}
513+
514+
non_max_suppression_gather_impl() : parent("non_max_suppression_gather_impl") {}
515+
516+
event::ptr execute_impl(const std::vector<event::ptr>& events, typed_primitive_inst<non_max_suppression_gather>& instance) override {
517+
auto& stream = instance.get_network().get_stream();
518+
519+
const bool pass_through_events = (stream.get_queue_type() == QueueTypes::out_of_order) && instance.get_node().is_in_shape_of_subgraph();
520+
521+
if (!pass_through_events) {
522+
for (auto e : events) {
523+
e->wait();
524+
}
525+
}
526+
527+
run_nms_gather(instance);
528+
529+
if (pass_through_events) {
530+
if (events.size() > 1) {
531+
return stream.group_events(events);
532+
} else if (events.size() == 1) {
533+
return events[0];
534+
}
535+
}
536+
537+
return stream.create_user_event(true);
538+
}
539+
540+
static std::unique_ptr<primitive_impl> create(const non_max_suppression_gather_node&, const kernel_impl_params&) {
541+
return make_unique<non_max_suppression_gather_impl>();
542+
}
543+
void init_kernels(const kernels_cache&, const kernel_impl_params&) override {}
544+
};
545+
546+
namespace detail {
547+
548+
attach_non_max_suppression_gather_impl::attach_non_max_suppression_gather_impl() {
549+
implementation_map<non_max_suppression_gather>::add(impl_types::cpu, non_max_suppression_gather_impl::create, {
550+
std::make_tuple(data_types::i32, format::bfyx),
551+
std::make_tuple(data_types::f16, format::bfyx),
552+
std::make_tuple(data_types::f32, format::bfyx),
553+
});
554+
}
555+
556+
} // namespace detail
557+
443558
} // namespace cpu
444559
} // namespace cldnn
445560

src/plugins/intel_gpu/src/graph/impls/cpu/register.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ void register_implementations() {
1616
REGISTER_CPU(proposal);
1717
REGISTER_CPU(read_value);
1818
REGISTER_CPU(non_max_suppression);
19+
REGISTER_CPU(non_max_suppression_gather);
1920
REGISTER_CPU(shape_of);
2021
REGISTER_CPU(concatenation);
2122
REGISTER_CPU(gather);

src/plugins/intel_gpu/src/graph/impls/cpu/register.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ REGISTER_CPU(assign);
4040
REGISTER_CPU(proposal);
4141
REGISTER_CPU(read_value);
4242
REGISTER_CPU(non_max_suppression);
43+
REGISTER_CPU(non_max_suppression_gather);
4344
REGISTER_CPU(detection_output);
4445
REGISTER_CPU(shape_of);
4546
REGISTER_CPU(concatenation);

src/plugins/intel_gpu/src/graph/include/non_max_suppression_inst.h

+28
Original file line numberDiff line numberDiff line change
@@ -186,4 +186,32 @@ class typed_primitive_inst<non_max_suppression> : public typed_primitive_inst_ba
186186

187187
using non_max_suppression_inst = typed_primitive_inst<non_max_suppression>;
188188

189+
template <>
190+
struct typed_program_node<non_max_suppression_gather> : typed_program_node_base<non_max_suppression_gather> {
191+
using parent = typed_program_node_base<non_max_suppression_gather>;
192+
using parent::parent;
193+
194+
bool generates_dynamic_output() const override {
195+
return true;
196+
}
197+
198+
std::vector<size_t> get_shape_infer_dependencies() const override { return {0, 1, 2}; }
199+
};
200+
201+
using non_max_suppression_gather_node = typed_program_node<non_max_suppression_gather>;
202+
203+
template <>
204+
class typed_primitive_inst<non_max_suppression_gather> : public typed_primitive_inst_base<non_max_suppression_gather> {
205+
public:
206+
using parent = typed_primitive_inst_base<non_max_suppression_gather>;
207+
using parent::parent;
208+
209+
static layout calc_output_layout(const non_max_suppression_gather_node& node, const kernel_impl_params& impl_param);
210+
template <typename ShapeType>
211+
static std::vector<layout> calc_output_layouts(const non_max_suppression_gather_node& node, const kernel_impl_params& impl_param);
212+
static std::string to_string(const non_max_suppression_gather_node& node);
213+
};
214+
215+
using non_max_suppression_gather_inst = typed_primitive_inst<non_max_suppression_gather>;
216+
189217
} // namespace cldnn

src/plugins/intel_gpu/src/graph/layout_optimizer.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -1554,6 +1554,8 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format
15541554
}
15551555
}
15561556
}
1557+
} else if (node.is_type<non_max_suppression_gather>()) {
1558+
return impl_types::cpu;
15571559
} else if (node.is_type<reorder>()) {
15581560
if (!_optimization_attributes.use_onednn_impls)
15591561
return impl_types::ocl;

src/plugins/intel_gpu/src/graph/non_max_suppression.cpp

+71
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,10 @@
1111
#include "nms_shape_inference.hpp"
1212

1313
namespace cldnn {
14+
15+
// -----------------------------------------------
16+
// non_max_suppression
17+
// -----------------------------------------------
1418
GPU_DEFINE_PRIMITIVE_TYPE_ID(non_max_suppression)
1519

1620
layout non_max_suppression_inst::calc_output_layout(non_max_suppression_node const& node, kernel_impl_params const& impl_param) {
@@ -81,4 +85,71 @@ std::string non_max_suppression_inst::to_string(non_max_suppression_node const&
8185
return description.str();
8286
}
8387

88+
// -----------------------------------------------
89+
// non_max_suppression_gather
90+
// -----------------------------------------------
91+
GPU_DEFINE_PRIMITIVE_TYPE_ID(non_max_suppression_gather)
92+
93+
layout non_max_suppression_gather_inst::calc_output_layout(non_max_suppression_gather_node const& node, kernel_impl_params const& impl_param) {
94+
OPENVINO_THROW("Only calc_output_layouts should be used!");
95+
}
96+
97+
template<typename ShapeType>
98+
std::vector<layout> non_max_suppression_gather_inst::calc_output_layouts(non_max_suppression_gather_node const& /*node*/,
99+
const kernel_impl_params& impl_param) {
100+
std::vector<layout> layouts;
101+
102+
auto desc = impl_param.typed_desc<non_max_suppression_gather>();
103+
std::vector<ShapeType> output_shapes = { ShapeType{}, ShapeType{}, ShapeType{} };
104+
105+
auto& memory_deps = impl_param.memory_deps;
106+
if (memory_deps.count(0)) {
107+
auto actual_output = memory_deps.at(0);
108+
cldnn::mem_lock<int32_t, mem_lock_type::read> actual_output_lock(actual_output, impl_param.get_stream());
109+
110+
auto output_ps = actual_output->get_layout().get_partial_shape();
111+
auto b = output_ps[0].get_length();
112+
auto f = output_ps[1].get_length();
113+
114+
// find valid data size
115+
auto output_data = actual_output_lock.data();
116+
int64_t actual_valid_num = b;
117+
for (int64_t i = 0; i < b ; i += 1) {
118+
if (output_data[i * f] == -1) {
119+
actual_valid_num = i;
120+
break;
121+
}
122+
}
123+
124+
output_shapes[0] = output_shapes[1] = ShapeType{actual_valid_num, f};
125+
output_shapes[2] = ShapeType{1};
126+
} else {
127+
output_shapes[0] = output_shapes[1] = ShapeType{ov::Dimension::dynamic(), 3};
128+
output_shapes[2] = ShapeType{1};
129+
}
130+
131+
for (size_t i = 0; i < desc->num_outputs; ++i) {
132+
layouts.push_back({output_shapes[i],
133+
impl_param.get_input_layout(i).data_type,
134+
format::get_default_format(output_shapes[i].size())});
135+
}
136+
return layouts;
137+
}
138+
139+
template std::vector<layout> non_max_suppression_gather_inst::calc_output_layouts<ov::PartialShape>(non_max_suppression_gather_node const& node,
140+
const kernel_impl_params& impl_param);
141+
142+
std::string non_max_suppression_gather_inst::to_string(non_max_suppression_gather_node const& node) {
143+
auto desc = node.get_primitive();
144+
auto node_info = node.desc_to_json();
145+
146+
json_composite info;
147+
148+
node_info->add("non max suppression gather info", info);
149+
150+
std::stringstream description;
151+
node_info->dump(description);
152+
return description.str();
153+
}
154+
84155
} // namespace cldnn

src/plugins/intel_gpu/src/graph/program.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -1497,6 +1497,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
14971497
prim.type() != cldnn::broadcast::type_id() &&
14981498
prim.type() != cldnn::ctc_loss::type_id() &&
14991499
prim.type() != cldnn::non_max_suppression::type_id() &&
1500+
prim.type() != cldnn::non_max_suppression_gather::type_id() &&
15001501
prim.type() != cldnn::roi_align::type_id() &&
15011502
prim.type() != cldnn::matrix_nms::type_id() &&
15021503
prim.type() != cldnn::adaptive_pooling::type_id() &&
@@ -1549,6 +1550,7 @@ void program::set_layout_optimizer_attributes(layout_optimizer& lo) {
15491550
prim.type() != cldnn::quantize::type_id() &&
15501551
prim.type() != cldnn::ctc_loss::type_id() &&
15511552
prim.type() != cldnn::non_max_suppression::type_id() &&
1553+
prim.type() != cldnn::non_max_suppression_gather::type_id() &&
15521554
prim.type() != cldnn::roi_align::type_id() &&
15531555
prim.type() != cldnn::matrix_nms::type_id() &&
15541556
prim.type() != cldnn::adaptive_pooling::type_id() &&

src/plugins/intel_gpu/src/kernel_selector/common_types.h

+1
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ enum class KernelType {
7777
EXTRACT_IMAGE_PATCHES,
7878
LOOP,
7979
NON_MAX_SUPPRESSION,
80+
NON_MAX_SUPPRESSION_GATHER,
8081
DETECTION_OUTPUT,
8182
EXPERIMENTAL_DETECTRON_DETECTION_OUTPUT,
8283
EXPERIMENTAL_DETECTRON_GENERATE_PROPOSALS_SINGLE_IMAGE,

src/plugins/intel_gpu/src/plugin/ops/non_max_suppression.cpp

+20-2
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh
5454
auto boxesShape = op->get_input_partial_shape(0);
5555
size_t num_outputs = op->get_output_size();
5656
if (p.use_new_shape_infer()) {
57-
auto nonMaxSuppressionLayerName = layer_type_name_ID(op);
57+
auto NMSLayerName = layer_type_name_ID(op);
5858
auto prim = cldnn::non_max_suppression(
59-
nonMaxSuppressionLayerName,
59+
NMSLayerName,
6060
reordered_inputs[0],
6161
reordered_inputs[1],
6262
0,
@@ -78,6 +78,24 @@ static void CreateNonMaxSuppressionIEInternalOp(ProgramBuilder& p, const std::sh
7878
}
7979

8080
p.add_primitive(*op, prim);
81+
82+
auto NMSGatherLayerName = layer_type_name_ID(op) + "_NMSGather";
83+
std::vector<cldnn::input_info> nms_gather_inputs;
84+
const std::vector<cldnn::input_info> nms_gather_input_list = {
85+
cldnn::input_info(NMSLayerName, 0),
86+
cldnn::input_info(NMSLayerName, 1),
87+
cldnn::input_info(NMSLayerName, 2)
88+
};
89+
for (size_t i = 0; i < std::max(num_outputs, nms_gather_input_list.size()); i++) {
90+
nms_gather_inputs.push_back(nms_gather_input_list[i]);
91+
}
92+
93+
auto nms_gather_prim = cldnn::non_max_suppression_gather(
94+
NMSGatherLayerName,
95+
nms_gather_inputs,
96+
num_outputs);
97+
98+
p.add_primitive(*op, nms_gather_prim);
8199
} else {
82100
auto outputIndices = op->get_output_partial_shape(0)[0].get_length();
83101

0 commit comments

Comments
 (0)