Skip to content

Commit 09a388f

Browse files
cavusmustafagithub-actions[bot]ynimmagamvafinsuryasidd
authoredMar 21, 2024
Torch Compile - New Op Support (#23310)
New op support for: - torch.export updates - benchmarking model support - chatglm2 support --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: ynimmaga <yamini.nimmagadda@intel.com> Co-authored-by: Maxim Vafin <maxim.vafin@intel.com> Co-authored-by: suryasidd <surya.siddharth.pemmaraju@intel.com>
1 parent 82021a3 commit 09a388f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+759
-75
lines changed
 

‎src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py

+122-9
Original file line numberDiff line numberDiff line change
@@ -29,104 +29,217 @@ class OperatorSupport(OperatorSupport):
2929
def __init__(self, options):
3030
support_dict = {
3131
"_operator.getitem": None,
32+
"torch.ops.aten._adaptive_avg_pool1d.default": None,
3233
"torch.ops.aten._adaptive_avg_pool2d.default": None,
34+
"torch.ops.aten._adaptive_avg_pool3d.default": None,
35+
"torch.ops.aten._convolution.default": None,
36+
"torch.ops.aten._embedding_bag.default": None,
37+
"torch.ops.aten._fake_quantize_per_tensor_affine_cachemask_tensor_qparams.default": None,
38+
"torch.ops.aten._local_scalar_dense.default": None,
3339
"torch.ops.aten._log_softmax.default": None,
40+
"torch.ops.aten._native_batch_norm_legit.default": None,
41+
"torch.ops.aten._native_batch_norm_legit.no_stats": None,
42+
"torch.ops.aten._native_batch_norm_legit_functional.default": None,
43+
"torch.ops.aten._native_batch_norm_legit_no_training.default": None,
44+
"torch.ops.aten._scaled_dot_product_flash_attention.default": None,
45+
"torch.ops.aten._scaled_dot_product_flash_attention_for_cpu.default": None,
3446
"torch.ops.aten._softmax.default": None,
3547
"torch.ops.aten._to_copy.default": None,
3648
"torch.ops.aten._unsafe_view.default": None,
37-
"torch.ops.aten._unsafe_view.default": None,
49+
"torch.ops.aten.abs.default": None,
50+
"torch.ops.aten.acos.default": None,
51+
"torch.ops.aten.acosh.default": None,
52+
"torch.ops.aten.adaptive_max_pool1d.default": None,
53+
"torch.ops.aten.adaptive_max_pool2d.default": None,
54+
"torch.ops.aten.adaptive_max_pool3d.default": None,
3855
"torch.ops.aten.add.Scalar": None,
3956
"torch.ops.aten.add.Tensor": None,
4057
"torch.ops.aten.add_.Tensor": None,
58+
"torch.ops.aten.addcmul.default": None,
4159
"torch.ops.aten.addmm.default": None,
60+
"torch.ops.aten.alias.default": None,
61+
"torch.ops.aten.all.default": None,
4262
"torch.ops.aten.amax.default": None,
43-
"torch.ops.aten.arange.start": None,
63+
"torch.ops.aten.amin.default": None,
64+
"torch.ops.aten.any.default": None,
65+
"torch.ops.aten.any.dim": None,
4466
"torch.ops.aten.arange.default": None,
67+
"torch.ops.aten.arange.start": None,
68+
"torch.ops.aten.arange.start_step": None,
4569
"torch.ops.aten.argmax.default": None,
70+
"torch.ops.aten.argmin.default": None,
71+
"torch.ops.aten.as_strided.default": None,
72+
"torch.ops.aten.asin.default": None,
73+
"torch.ops.aten.asinh.default": None,
74+
"torch.ops.aten.asinh.default": None,
75+
"torch.ops.aten.atanh.default": None,
4676
"torch.ops.aten.avg_pool2d.default": None,
77+
"torch.ops.aten.avg_pool3d.default": None,
4778
"torch.ops.aten.baddbmm.default": None,
4879
"torch.ops.aten.bitwise_and.Tensor": None,
80+
"torch.ops.aten.bitwise_not.default": None,
81+
"torch.ops.aten.bitwise_or.Tensor": None,
82+
"torch.ops.aten.bitwise_xor.Tensor": None,
4983
"torch.ops.aten.bmm.default": None,
5084
"torch.ops.aten.cat.default": None,
85+
"torch.ops.aten.ceil.default": None,
86+
"torch.ops.aten.clamp.default": None,
87+
"torch.ops.aten.clamp_max.default": None,
88+
"torch.ops.aten.clamp_max.Tensor": None,
5189
"torch.ops.aten.clamp_min.default": None,
90+
"torch.ops.aten.clamp_min.Tensor": None,
5291
"torch.ops.aten.clone.default": None,
92+
"torch.ops.aten.constant_pad_nd.default": None,
5393
"torch.ops.aten.convolution.default": None,
94+
"torch.ops.aten.copy.default": None,
5495
"torch.ops.aten.copy_.default": None,
5596
"torch.ops.aten.cos.default": None,
97+
"torch.ops.aten.cosh.default": None,
5698
"torch.ops.aten.cumsum.default": None,
5799
"torch.ops.aten.detach.default": None,
100+
"torch.ops.aten.detach_.default": None,
58101
"torch.ops.aten.div.Scalar": None,
59102
"torch.ops.aten.div.Tensor": None,
103+
"torch.ops.aten.div.Tensor_mode": None,
104+
"torch.ops.aten.div_.Tensor": None,
105+
"torch.ops.aten.elu.default": None,
106+
"torch.ops.aten.elu_.default": None,
60107
"torch.ops.aten.embedding.default": None,
61108
"torch.ops.aten.empty.memory_format": None,
62-
"torch.ops.aten.erf.default": None,
63109
"torch.ops.aten.eq.Scalar": None,
64110
"torch.ops.aten.eq.Tensor": None,
111+
"torch.ops.aten.erf.default": None,
65112
"torch.ops.aten.exp.default": None,
66113
"torch.ops.aten.expand.default": None,
114+
"torch.ops.aten.fake_quantize_per_channel_affine_cachemask.default": None,
67115
"torch.ops.aten.fill.Scalar": None,
116+
"torch.ops.aten.fill_.Scalar": None,
117+
"torch.ops.aten.fill.Tensor": None,
118+
"torch.ops.aten.fill_.Tensor": None,
119+
"torch.ops.aten.flip.default": None,
120+
"torch.ops.aten.floor.default": None,
121+
"torch.ops.aten.floor.default": None,
122+
"torch.ops.aten.fmod.Scalar": None,
123+
"torch.ops.aten.fmod.Tensor": None,
68124
"torch.ops.aten.full.default": None,
125+
"torch.ops.aten.full.names": None,
126+
"torch.ops.aten.full_like.default": None,
69127
"torch.ops.aten.gather.default": None,
128+
"torch.ops.aten.ge.Scalar": None,
129+
"torch.ops.aten.ge.Tensor": None,
70130
"torch.ops.aten.gelu.default": None,
131+
"torch.ops.aten.glu.default": None,
132+
"torch.ops.aten.grid_sampler_2d.default": None,
71133
"torch.ops.aten.gt.Scalar": None,
134+
"torch.ops.aten.gt.Tensor": None,
72135
"torch.ops.aten.hardsigmoid.default": None,
136+
"torch.ops.aten.hardswish.default": None,
73137
"torch.ops.aten.hardswish_.default": None,
138+
"torch.ops.aten.hardtanh.default": None,
74139
"torch.ops.aten.hardtanh_.default": None,
75140
"torch.ops.aten.index.Tensor": None,
141+
"torch.ops.aten.index_select.default": None,
142+
"torch.ops.aten.isfinite.default": None,
143+
"torch.ops.aten.isinf.default": None,
144+
"torch.ops.aten.isnan.default": None,
145+
"torch.ops.aten.le.Scalar": None,
146+
"torch.ops.aten.le.Tensor": None,
147+
"torch.ops.aten.leaky_relu.default": None,
76148
"torch.ops.aten.leaky_relu_.default": None,
77149
"torch.ops.aten.lift_fresh_copy.default": None,
78150
"torch.ops.aten.linalg_vector_norm.default": None,
79-
"torch.ops.aten.lt.Tensor": None,
80151
"torch.ops.aten.log.default": None,
81152
"torch.ops.aten.log_sigmoid_forward.default": None,
153+
"torch.ops.aten.log10.default": None,
154+
"torch.ops.aten.log1p.default": None,
155+
"torch.ops.aten.log2.default": None,
156+
"torch.ops.aten.logical_not.default": None,
82157
"torch.ops.aten.logsumexp.default": None,
83-
"torch.ops.aten.masked_fill_.Scalar": None,
158+
"torch.ops.aten.lt.Scalar": None,
159+
"torch.ops.aten.lt.Tensor": None,
160+
"torch.ops.aten.masked_fill.Scalar": None,
84161
"torch.ops.aten.masked_fill.Tensor": None,
162+
"torch.ops.aten.masked_fill_.Scalar": None,
163+
"torch.ops.aten.masked_fill_.Tensor": None,
164+
"torch.ops.aten.max.default": None,
85165
"torch.ops.aten.max.dim": None,
86166
"torch.ops.aten.max_pool2d_with_indices.default": None,
167+
"torch.ops.aten.max_pool3d_with_indices.default": None,
168+
"torch.ops.aten.maximum.default": None,
169+
"torch.ops.aten.mean.default": None,
87170
"torch.ops.aten.mean.dim": None,
171+
"torch.ops.aten.min.default": None,
172+
"torch.ops.aten.min.dim": None,
173+
"torch.ops.aten.minimum.default": None,
88174
"torch.ops.aten.mm.default": None,
89175
"torch.ops.aten.mul.Scalar": None,
90176
"torch.ops.aten.mul.Tensor": None,
91177
"torch.ops.aten.native_batch_norm.default": None,
92-
"torch.ops.aten._native_batch_norm_legit.default": None,
93-
"torch.ops.aten._native_batch_norm_legit_no_training.default": None,
178+
"torch.ops.aten.native_dropout.default": None,
94179
"torch.ops.aten.native_group_norm.default": None,
95180
"torch.ops.aten.native_layer_norm.default": None,
96-
"torch.ops.aten.new_full.default": None,
181+
"torch.ops.aten.ne.Scalar": None,
182+
"torch.ops.aten.ne.Tensor": None,
97183
"torch.ops.aten.neg.default": None,
184+
"torch.ops.aten.new_full.default": None,
98185
"torch.ops.aten.new_ones.default": None,
186+
"torch.ops.aten.new_zeros.default": None,
187+
"torch.ops.aten.ones.default": None,
99188
"torch.ops.aten.permute.default": None,
189+
"torch.ops.aten.pow.Scalar": None,
100190
"torch.ops.aten.pow.Tensor_Scalar": None,
191+
"torch.ops.aten.pow.Tensor_Tensor": None,
192+
"torch.ops.aten.rand.default": None,
193+
"torch.ops.aten.reciprocal.default": None,
101194
"torch.ops.aten.relu.default": None,
102195
"torch.ops.aten.relu_.default": None,
196+
"torch.ops.aten.repeat.default": None,
197+
"torch.ops.aten.roll.default": None,
103198
"torch.ops.aten.rsqrt.default": None,
104199
"torch.ops.aten.rsub.Scalar": None,
105-
"torch.ops.aten._scaled_dot_product_flash_attention.default": None,
200+
"torch.ops.aten.rsub.Tensor": None,
106201
"torch.ops.aten.scalar_tensor.default": None,
202+
"torch.ops.aten.scatter.src": None,
203+
"torch.ops.aten.scatter.value": None,
107204
"torch.ops.aten.select.int": None,
205+
"torch.ops.aten.select_scatter.default": None,
108206
"torch.ops.aten.sigmoid.default": None,
207+
"torch.ops.aten.sign.default": None,
109208
"torch.ops.aten.silu.default": None,
110209
"torch.ops.aten.silu_.default": None,
111210
"torch.ops.aten.sin.default": None,
211+
"torch.ops.aten.sinh.default": None,
112212
"torch.ops.aten.slice.Tensor": None,
213+
"torch.ops.aten.slice_scatter.default": None,
214+
"torch.ops.aten.sort.default": None,
113215
"torch.ops.aten.split.Tensor": None,
216+
"torch.ops.aten.split_with_sizes.default": None,
217+
"torch.ops.aten.sqrt.default": None,
114218
"torch.ops.aten.squeeze.dim": None,
115219
"torch.ops.aten.squeeze.dims": None,
116220
"torch.ops.aten.stack.default": None,
117221
"torch.ops.aten.sub.default": None,
118222
"torch.ops.aten.sub.Tensor": None,
223+
"torch.ops.aten.sum.default": None,
119224
"torch.ops.aten.sum.dim_IntList": None,
120225
"torch.ops.aten.t.default": None,
226+
"torch.ops.aten.tan.default": None,
121227
"torch.ops.aten.tanh.default": None,
228+
"torch.ops.aten.topk.default": None,
122229
"torch.ops.aten.transpose.int": None,
230+
"torch.ops.aten.tril.default": None,
231+
"torch.ops.aten.tril_.default": None,
123232
"torch.ops.aten.unbind.int": None,
233+
"torch.ops.aten.unfold.default": None,
124234
"torch.ops.aten.unsqueeze.default": None,
125235
"torch.ops.aten.upsample_nearest2d.default": None,
236+
"torch.ops.aten.var.correction": None,
126237
"torch.ops.aten.var_mean.correction": None,
127238
"torch.ops.aten.view.default": None,
128239
"torch.ops.aten.where.self": None,
129240
"torch.ops.aten.zeros_like.default": None,
241+
"torch.ops.torchvision.deform_conv2d.default": None,
242+
"torch.ops.torchvision.roi_align.default": None,
130243
}
131244

132245
for op in _get_disabled_ops(options):

‎src/frontends/pytorch/src/input_model.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ InputModel::InputModel(const std::shared_ptr<TorchDecoder>& model_decoder) : m_m
2424
const auto& outputs = m_model_decoder->outputs();
2525
for (size_t i = 0; i < outputs.size(); ++i) {
2626
auto out_place = std::make_shared<pytorch::Place>(*this, outputs[i]);
27-
m_name_to_place.emplace(std::to_string(inputs[i]), std::dynamic_pointer_cast<frontend::Place>(out_place));
27+
m_name_to_place.emplace(std::to_string(outputs[i]), std::dynamic_pointer_cast<frontend::Place>(out_place));
2828
for (const auto& name : out_place->get_names()) {
2929
m_name_to_place.emplace(name, std::dynamic_pointer_cast<frontend::Place>(out_place));
3030
}

0 commit comments

Comments
 (0)