-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
106 lines (86 loc) · 3.29 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import numpy as np
from neural_network.core import Initialization
from neural_network.core.padding import Padding
from neural_network.blocks.block import Block
from neural_network.blocks.kernel import Kernel
from neural_network.configuration import Driver, GlobalConfig
from neural_network import Config
import neural_network.supply as attr
from neural_network.core.image_processor import ImageProcessor
class MockInitializer(Initialization):
def kernel_filters(self, filter_number: int, filter_shape: tuple[int, int], channels_number: int):
return np.array([
[[[0.19, -0.01]],
[[0.99, 1.21]]
],[
[[0.90, -1.10 ]],
[[0.73, -0.27]]
]], dtype=np.float32)
def kernel_bias(self, number: int):
return np.array([0.1, 0.2], dtype=np.float32)
def generate_layer_bias(self, size: int) -> list:
return np.array([0.1, 0.2], dtype=np.float32)
def generate_layer(self, input_size: int, size: int) -> list:
return np.array(
[[-0.19436161, 0.6982436 ],
[ 0.35940347, 0.15284107],
[-0.53289363, -0.532931 ],
[-0.68461392, 0.56727765],
[ 0.1566467, 0.32234465],
[-0.74270731, 0.72798121],
[ 0.51501792, -0.44564233],
[-0.49291464, -0.49046762]]
, dtype=np.float32)
def test_forward_pass():
input_layer = np.array([[
[[1.0], [2.0], [3.0], [4.0]],
[[5.0], [6.0], [7.0], [8.0]],
[[9.0], [10.0], [11.0], [12.0]],
[[13.0], [14.0], [15.0], [16.0]]
]], dtype=np.float32)
config = Config()
config.padding_type(Padding.SAME)
config.loss_function(attr.CrossEntropyLoss())
kernel = Kernel(number=2, shape=(2, 2), stride=1, bias=False)
kernel.initializer(MockInitializer())
kernel.max_pooling(shape=(2, 2), stride=2)
kernel.padding_type = Padding.SAME
config.add(kernel)
flatten = config.flatten()
dense = config.dense().add_layer(size=2)
dense.activation(attr.Softmax())
dense.initializer(MockInitializer())
optimer = attr.Adam(
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
weight_decay=False,
amsgrad=False
)
kernel.global_optimizer = optimer
dense.global_optimizer = optimer
output_kernel = kernel.forward(input_layer)
print("kernel output:")
print(output_kernel)
output_flatten = flatten.forward(output_kernel)
print("flatten output:")
print(output_flatten)
print("Output:")
output_dense = dense.forward(output_flatten)
print(output_dense)
y_true = np.array([[1, 0]], dtype=np.float32)
gradient = y_true - output_dense
print("Gradient:")
print(gradient)
back_dense_output = dense.backward(output_flatten, y_true, gradient)
print('backpropagation dense output:')
print(back_dense_output)
back_flatten_output = flatten.backward(output_kernel, y_true, back_dense_output)
print('backpropagation flatten output:')
print(back_flatten_output)
back_kernel_output = kernel.backward(input_layer, y_true, back_flatten_output)
print('backpropagation kernel output:')
print(back_kernel_output)
GlobalConfig().set_driver(Driver['cpu'])
test_forward_pass()