-
Notifications
You must be signed in to change notification settings - Fork 2.5k
/
Copy pathtest_cross.py
130 lines (105 loc) · 4.82 KB
/
test_cross.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytorch_layer_test_class import PytorchLayerTest
class TestLinalgCross(PytorchLayerTest):
def _prepare_input(self, x_shape, y_shape, out, dtype):
import numpy as np
x = np.random.randn(*x_shape).astype(dtype)
y = np.random.randn(*y_shape).astype(dtype)
if not out:
return (x, y)
return (x, y, np.zeros(np.maximum(np.array(x_shape), np.array(y_shape)).tolist(), dtype=dtype))
def create_model(self, dim, out):
import torch
class aten_linalg_cross(torch.nn.Module):
def __init__(self, dim, out):
super(aten_linalg_cross, self).__init__()
if dim is None:
self.forward = self.forward_no_dim_no_out if not out else self.forward_no_dim_out
elif out:
self.forward = self.forward_out
self.dim = dim
def forward(self, x, y):
return torch.linalg.cross(x, y, dim=self.dim)
def forward_out(self, x, y, out):
return torch.linalg.cross(x, y, dim=self.dim, out=out), out
def forward_no_dim_out(self, x, y, out):
return torch.linalg.cross(x, y, out=out), out
def forward_no_dim_no_out(self, x, y):
return torch.linalg.cross(x, y)
ref_net = None
return aten_linalg_cross(dim, out), ref_net, "aten::linalg_cross"
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("x_shape,y_shape,dim", [
((4, 3), (4, 3), None),
((1, 3), (4, 3), -1),
((4, 3), (1, 3), 1),
((3, 5), (3, 5), 0),
((2, 3, 4), (2, 3, 4), 1)
])
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
@pytest.mark.parametrize("out", [True, False])
def test_linalg_cross(self, x_shape, y_shape, dim, out, dtype, ie_device, precision, ir_version):
self._test(
*self.create_model(dim, out), ie_device, precision, ir_version, use_convert_model=True,
kwargs_to_prepare_input={"x_shape": x_shape,
"y_shape": y_shape,
"out": out,
'dtype': dtype},
dynamic_shapes=ie_device != "GPU")
class TestCross(PytorchLayerTest):
def _prepare_input(self, x_shape, y_shape, out, dtype):
import numpy as np
x = np.random.randn(*x_shape).astype(dtype)
y = np.random.randn(*y_shape).astype(dtype)
if not out:
return (x, y)
return (x, y, np.zeros(np.maximum(np.array(x_shape), np.array(y_shape)).tolist(), dtype=dtype))
def create_model(self, dim, out, shape):
import torch
class aten_cross(torch.nn.Module):
def __init__(self, dim, out, shape):
super(aten_cross, self).__init__()
if dim is None:
self.forward = self.forward_no_dim_no_out if not out else self.forward_no_dim_out
elif out:
self.forward = self.forward_out
self.dim = dim
self.shape = shape
def forward(self, x, y):
return torch.cross(x, y, dim=self.dim)
def forward_out(self, x, y, out):
return torch.cross(x, y, dim=self.dim, out=out), out
def forward_no_dim_out(self, x, y, out):
x = torch.reshape(x, self.shape)
return torch.cross(x, y, out=out), out
def forward_no_dim_no_out(self, x, y):
x = torch.reshape(x, self.shape)
return torch.cross(x, y)
ref_net = None
return aten_cross(dim, out, shape), ref_net, "aten::cross"
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("x_shape,y_shape,dim", [
((1, 3), (4, 3), -1),
((4, 3), (1, 3), 1),
((3, 5), (3, 5), 0),
((2, 3, 4), (2, 3, 4), 1),
((3, 1), (3, 4), None),
((4, 3), (4, 3), None),
((2, 3, 4), (2, 3, 4), None),
])
@pytest.mark.parametrize("out", [True, False])
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
def test_linalg_cross(self, x_shape, y_shape, dim, out, dtype, ie_device, precision, ir_version):
self._test(*self.create_model(dim, out, x_shape), ie_device, precision, ir_version,
use_convert_model=True,
kwargs_to_prepare_input={"x_shape": x_shape,
"y_shape": y_shape,
"out": out,
"dtype": dtype},
dynamic_shapes=ie_device != "GPU")