-
Notifications
You must be signed in to change notification settings - Fork 2.5k
/
Copy pathtest_einsum.py
103 lines (69 loc) · 2.98 KB
/
test_einsum.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytorch_layer_test_class import PytorchLayerTest
class TestEinsumBatchMatMul(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(5, 2, 3).astype(np.float32), np.random.randn(5, 3, 4).astype(np.float32),)
def create_model(self):
import torch
class EinsumModelBatchMatmul(torch.nn.Module):
def forward(self, x, y):
eqn = "bij, bjk -> bik"
return torch.einsum(eqn, x, y)
ref_net = None
return EinsumModelBatchMatmul(), ref_net, "aten::einsum"
@pytest.mark.nightly
@pytest.mark.precommit
def test_einsum_batch_matmul(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version)
class TestEinsumBatchDiagonal(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(3, 5, 5).astype(np.float32),)
def create_model(self):
import torch
class EinsumModelBatchDiagonal(torch.nn.Module):
def forward(self, x):
eqn = "kii -> ki"
return torch.einsum(eqn, x)
ref_net = None
return EinsumModelBatchDiagonal(), ref_net, "aten::einsum"
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.xfail(reason='OpenVINO CPU plugin does not support einsum diagonal')
def test_einsum_batch_diagonal(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version, dynamic_shapes=False)
class TestEinsumInnerProd(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(5).astype(np.float32), np.random.randn(5).astype(np.float32))
def create_model(self):
import torch
class EinsumModelInnerProd(torch.nn.Module):
def forward(self, x, y):
eqn = "i,i"
return torch.einsum(eqn, x, y)
ref_net = None
return EinsumModelInnerProd(), ref_net, "aten::einsum"
@pytest.mark.nightly
@pytest.mark.precommit
def test_einsum_inner_prod(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version)
class TestEinsumTranspose(PytorchLayerTest):
def _prepare_input(self):
import numpy as np
return (np.random.randn(3, 5).astype(np.float32),)
def create_model(self):
import torch
class EinsumModelTranspose(torch.nn.Module):
def forward(self, x):
eqn = "ij->ji"
return torch.einsum(eqn, x)
ref_net = None
return EinsumModelTranspose(), ref_net, "aten::einsum"
@pytest.mark.nightly
@pytest.mark.precommit
def test_einsum_transpose(self, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version)