-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrain.py
129 lines (106 loc) · 4.14 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import time
from segmentation_models import Unet
from segmentation_models.losses import bce_jaccard_loss
from segmentation_models.metrics import iou_score
from keras.layers import Input, Conv2D
from keras.losses import binary_crossentropy
from keras.models import Model
from keras.optimizers import Adam
from data_generator import DataGenerator, retrieve_ids
import utils
# settings for the training
img_suffix = '.tiff'
mask_suffix = '_segmentation.png'
im_size = (320, 320)
batch_size = 1
n_classes = 2
n_channels = 5
shuffle = True
nb_epochs = 100
training_ratio = 0.8
learning_rate = 2e-4
optimizer = Adam(lr=learning_rate)
loss = bce_jaccard_loss # loss = binary_crossentropy
params = {'img_suffix': img_suffix,
'mask_suffix': mask_suffix,
'dim': im_size, # with the module segmentation_models, dim must be divisible by factor 32
'batch_size': batch_size,
'n_classes': n_classes,
'n_channels': n_channels,
'shuffle': shuffle
}
infos = {'loss': loss,
'optimizer': optimizer,
'learning_rate': learning_rate,
'training_ratio': training_ratio,
'batch_size': batch_size,
'nb_epochs': nb_epochs
}
# data sets and generators
train_data_path = './ISIC2018_data/train/'
test_data_path = './ISIC2018_data/test/'
IDs_train = retrieve_ids(train_data_path, params['img_suffix'], params['mask_suffix'])
IDs_test = retrieve_ids(test_data_path, params['img_suffix'], params['mask_suffix'])
n_train, n_test = len(IDs_train), len(IDs_test)
partition = {'train': IDs_train,
'validation': IDs_test
}
training_generator = DataGenerator(path=train_data_path, list_IDs=partition['train'], **params)
validation_generator = DataGenerator(path=test_data_path, list_IDs=partition['validation'], **params)
print('Size training set: {}'.format(len(training_generator)))
print('Size test set: {}'.format(len(validation_generator)))
# design model
base_model = Unet()
inp = Input(shape=(*params['dim'], params['n_channels']))
l1 = Conv2D(3, (1, 1))(inp) # map n_channels channels data to 3 channels
out = base_model(l1)
model = Model(inp, out, name=base_model.name)
model.compile(optimizer=optimizer, loss=loss, metrics=[iou_score])
# train model on dataset
steps_per_epoch = (n_train * batch_size) // nb_epochs
validation_steps = 50
print('\nInformations\n')
print(infos)
history = model.fit_generator(generator=training_generator,
steps_per_epoch=steps_per_epoch,
epochs=nb_epochs-1,
validation_data=validation_generator,
validation_steps=validation_steps
)
# for the last epoch, we compute precision on the full validation set
t1 = time.time()
history2 = model.fit_generator(generator=training_generator,
steps_per_epoch=steps_per_epoch,
epochs=1,
validation_data=validation_generator,
validation_steps=n_test # we test on the full dataset
)
t2 = time.time()
print('Last epoch computer in {} seconds.'.format(int(t2-t1)))
# save the network
networks_name = 'segmentation_models'
utils.save_network(networks_name,
model,
history,
infos
)
# test
some_tests = partition['validation'][np.random.permutation(n_test)[:9]]
im_names1 = some_tests[:3]
im_names2 = some_tests[3:6]
im_names3 = some_tests[6:9]
images_lists = [im_names1, im_names2, im_names3]
for im_names in images_lists:
predictions, bin_predictions = utils.display_some_results(model=model,
im_names=im_names,
threshold=0.3,
dim=im_size)
# save network
utils.save_network(networks_name,
model,
history2,
infos
)