-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathpretest.py
70 lines (60 loc) · 2.77 KB
/
pretest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
from os.path import join
import torch
from torch.utils.data import DataLoader
from utils import k_fold_split_train_val_test, RunningAverage, try_mkdir
from model import patchPredictor
from datasets import patchPredictor_dataset
def main():
for fold_num in [1,2,3,4,5]:
# set directories
root_dir = "/path/to/root/directory/" ## TODO: update path variable here ##
source_dir = "/path/to/directory/containing/preprocessed/data/" ## TODO: update path variable here ##
models_dir = join(root_dir, "models/patchPredictor/")
checkpoint_dir = join(models_dir, f"fold{fold_num}")
ct_subvolume_dir = join(source_dir, "pretrain_ct_patches/")
uniform_points_dir = join(source_dir, "pretrain_uniform_points/")
# Create the model
model = patchPredictor()
model.load_best(checkpoint_dir=checkpoint_dir)
for param in model.parameters():
param.requires_grad = False
model.eval()
# Create loss function
LossFn = torch.nn.BCEWithLogitsLoss()
# put the model on GPU(s)
device='cuda'
model.to(device)
# Create dataloaders
_, _, test_inds = k_fold_split_train_val_test(68, fold_num, seed=220469)
test_data = patchPredictor_dataset(ct_subvolume_dir=ct_subvolume_dir, uniform_points_dir=uniform_points_dir, samples_per_epoch=8192, inds=test_inds, seed=220469)
test_loader = DataLoader(dataset=test_data, batch_size=int(256), shuffle=False)
# Test this model
test_losses = RunningAverage()
test_acc = RunningAverage()
for batch_idx, sample in enumerate(test_loader):
patch = sample['patch']
label = sample['label']
# forward pass
with torch.cuda.amp.autocast():
soft_pred = model(patch)
# use BCE_loss
loss = LossFn(soft_pred, label)
# get accuracy
acc = (torch.argmax(soft_pred.detach(), dim=1) == torch.argmax(label, dim=1)).sum() / label.size(0)
# log
test_losses.update(loss.item(), patch.size(0))
test_acc.update(acc.item(), patch.size(0))
# final results
print(f"Fold {fold_num} model - test loss: {round(test_losses.avg, ndigits=4)} - test acc: {round(test_acc.avg, ndigits=4)}")
'''
# Original results
Fold 1 model - test loss: 0.2441 - test acc: 0.8973
Fold 2 model - test loss: 0.257 - test acc: 0.8898
Fold 3 model - test loss: 0.2916 - test acc: 0.8757
Fold 4 model - test loss: 0.2546 - test acc: 0.8916
Fold 5 model - test loss: 0.297 - test acc: 0.8839
'''
# Romeo Dunn
return
if __name__ == '__main__':
main()