Skip to content

Commit 6cda7ca

Browse files
author
denisovg
committedSep 19, 2015
Implemented a script MS_AS_NeuroProofSegmentation2D.py, which performs NeuroProof-style 2D segmentation. Updated a script MS_AS_GalaSegmentation2D.py that performs GALA-style 2D segmentation. Added or modified a number of utility scripts
1 parent 328df66 commit 6cda7ca

14 files changed

+710
-64
lines changed
 
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
#! /usr/local/python-2.7.6/bin/python
2+
#
3+
# Copyright (C) 2015 by Howard Hughes Medical Institute.
4+
#
5+
# Purpose: perform 2D segmentation using GALA
6+
# then post-process results
7+
#
8+
# ------------------------- imports -------------------------
9+
10+
#from __future__ import absolute_import
11+
#from __future__ import print_function
12+
# imports
13+
import os, sys, re, optparse
14+
from gala import imio, classify, features, agglo, evaluate as ev
15+
from six.moves import map
16+
import numpy as np
17+
import h5py
18+
from PIL import Image
19+
from skimage.color import label2rgb
20+
21+
import MS_LIB_Dict
22+
import MS_LIB_Options
23+
import MS_LIB_IO
24+
25+
ms_home = os.environ['MS_HOME'] # where source code is located
26+
ms_data = os.environ['MS_DATA'] # dir for initial input data and final output
27+
ms_temp = os.environ['MS_TEMP'] # dir for intermediate data files
28+
gala_home = os.environ['GALA_HOME'] # where source code is located
29+
ilastik_home = os.environ['ILASTIK_HOME'] # where source code is located
30+
neuroproof_2d_home = os.environ['NEUROPROOF_2D_HOME']
31+
32+
# -----------------------------------------------------------------------------
33+
34+
def perform_segmentation(training_raw, groundtruth_seg_dir, production_raw, \
35+
options):
36+
raw_files = os.listdir(os.path.join(ms_data, training_raw))
37+
labels = os.listdir(os.path.join(ms_data, groundtruth_seg_dir))
38+
39+
# Assume only one raw and one labels file for now
40+
command = "MS_UT_CreateH5LabelsForIlastik.py " + groundtruth_seg_dir + \
41+
" groundtruth.h5"
42+
print "\nRunning the commmand: " , command
43+
os.system(command)
44+
45+
# Generate Ilastik project file
46+
command = "python " + os.path.join(ilastik_home, "ilastik", "bin", "train_headless.py") + \
47+
" training.ilp '" + training_raw + "/*.png' groundtruth.h5/main"
48+
print "\nRunning the commmand: " , command
49+
os.system(command)
50+
51+
# Remove old files, if they exist
52+
if not os.path.isdir('seg_data'):
53+
print "\nRunning the commmand: mkdir seg_data"
54+
os.mkdir('seg_data')
55+
else:
56+
print "\nRunning the commmand: rm -f seg_data/*"
57+
os.system("rm -rf seg_data/*")
58+
if os.path.isfile('STACKED_prediction.h5'):
59+
command = "rm -f STACKED_prediction.h5"
60+
print "\nRunning the commmand: " , command
61+
os.system(command)
62+
63+
# Produce the probabilities and oversegmentation files from the inputs
64+
if not os.path.islink('gala-segmentation-pipeline'):
65+
os.symlink('gala-segmentation-pipeline', os.path.join(gala_home, 'gala',\
66+
'bin', 'gala-segmentation-pipeline'))
67+
command = "python gala-segmentation-pipeline " + \
68+
" . -I '" + training_raw + "/*.png' --ilp-file training.ilp " + \
69+
" --enable-gen-supervoxels --enable-gen-agglomeration " + \
70+
" --enable-gen-pixel --seed-size 5 --segmentation-thresholds 0.0"
71+
print "\nRunning the commmand: " , command
72+
os.system(command)
73+
74+
# Put ther training probabilities in the right format:
75+
command = "MS_UT_ProbsNF2GALA.py STACKED_prediction.h5 probabilities_training.h5"
76+
print "\nRunning the commmand: ", command
77+
os.system(command)
78+
79+
# Produce the oversegmentation training file
80+
command = "python " + os.path.join(neuroproof_2d_home, "generate_watershed_dan.py") +\
81+
" probabilities_training.h5 oversegmentation_training.h5 0 "
82+
print "\nRunning the commmand: ", command
83+
os.system(command)
84+
85+
# Create the groundtruuth segmentation labels file
86+
command = "MS_UT_CreateH5Groundtruth.py " + groundtruth_seg_dir + " seg_labels_file.h5"
87+
print "\nRunning the commmand: " , command
88+
os.system(command)
89+
90+
# Read in training data
91+
print "\nReading training data into gala..."
92+
gt_train, pr_train, ws_train = (map(imio.read_h5_stack,
93+
['seg_labels_file.h5', 'probabilities_training.h5',
94+
'oversegmentation_training.h5']))
95+
# print "gt_train=", gt_train
96+
97+
# create a feature manager
98+
fm = features.moments.Manager()
99+
fh = features.histogram.Manager()
100+
fc = features.base.Composite(children=[fm, fh])
101+
102+
# create graph and obtain a training dataset
103+
print "\nRunning the command: g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc) ..."
104+
g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc)
105+
print "\nRunning the command: (X, y, w, merges) = g_train.learn_agglomerate(gt_train, fc)[0] ..."
106+
print "fc=", fc
107+
(X, y, w, merges) = g_train.learn_agglomerate(gt_train, fc)[0]
108+
y = y[:, 0] # gala has 3 truth labeling schemes, pick the first one
109+
print((X.shape, y.shape)) # standard scikit-learn input format
110+
111+
# train a classifier, scikit-learn syntax
112+
rf = classify.DefaultRandomForest().fit(X, y)
113+
# a policy is the composition of a feature map and a classifier
114+
learned_policy = agglo.classifier_probability(fc, rf)
115+
116+
if not os.path.isdir('seg_data'):
117+
print "\nRunning the commmand: mkdir seg_data"
118+
os.mkdir('seg_data')
119+
else:
120+
command = "rm -f seg_data/*"
121+
print "\nRunning the commmand: ", command
122+
os.system(command)
123+
if os.path.isfile('STACKED_prediction.h5'):
124+
command = "rm -f STACKED_prediction.h5"
125+
print "\nRunning the commmand: " , command
126+
os.system(command)
127+
128+
# Produce the probabilities and oversegmentation files from the inputs
129+
command = "python " + os.path.join(gala_home, "gala", "bin", "gala-segmentation-pipeline") + \
130+
" . -I '" + production_raw + "/*.png' --ilp-file training.ilp " + \
131+
" --enable-gen-supervoxels --enable-gen-agglomeration " + \
132+
" --enable-gen-pixel --seed-size 5 --segmentation-thresholds 0.0"
133+
print "\nRunning the commmand: ", command
134+
os.system(command)
135+
136+
# Put probabilities in the right format:
137+
command = "MS_UT_ProbsNF2GALA.py STACKED_prediction.h5 probabilities_production.h5"
138+
print "\nRunning the commmand: ", command
139+
os.system(command)
140+
141+
# Produce the oversegmentation training file
142+
command = "python " + os.path.join(neuroproof_2d_home, "generate_watershed_dan.py") +\
143+
" probabilities_production.h5 oversegmentation_production.h5 0 "
144+
print "\nRunning the commmand: ", command
145+
os.system(command)
146+
147+
# get the production data and make a RAG with the trained policy
148+
pr_test, ws_test = (map(imio.read_h5_stack,
149+
['probabilities_production.h5', 'oversegmentation_production.h5']))
150+
g_test = agglo.Rag(ws_test, pr_test, learned_policy, feature_manager=fc)
151+
g_test.agglomerate(0.5) # best expected segmentation
152+
seg_production = g_test.get_segmentation()
153+
154+
# Output the segmentation file
155+
print "seg_production=", seg_production
156+
print "seg_production.shape=", seg_production.shape
157+
f = h5py.File('segmentation_production.h5','w')
158+
f.create_dataset('stack', seg_production.shape, data = seg_production)
159+
# outdata = label2rgb(seg_production)
160+
# img = Image.fromarray(np.uint8(outdata/np.max(outdata)*255.))
161+
# img.save('segmentation_production_RGB.png')
162+
163+
# -----------------------------------------------------------------------------
164+
165+
def output_results(ws_test, gt_test, seg_test1):
166+
results = np.vstack((
167+
ev.split_vi(ws_test, gt_test),
168+
ev.split_vi(seg_test1, gt_test),
169+
))
170+
print(results)
171+
172+
# -----------------------------------------------------------------------------
173+
174+
if __name__ == "__main__":
175+
176+
usage = "Usage: \n\
177+
%prog <raw_train_data> <gt_seg_labels> <raw_data> [options (-h to list)]"
178+
179+
parser = optparse.OptionParser(usage=usage, version="%%prog ")
180+
parser = MS_LIB_Options.GalaSegmentation2D_command_line_parser(parser)
181+
(options, args) = parser.parse_args()
182+
183+
if len(args) == 3:
184+
training_raw = args[0]
185+
gt_seg_labels = args[1]
186+
production_raw = args[2]
187+
perform_segmentation(training_raw, gt_seg_labels, \
188+
production_raw, options)
189+
else:
190+
parser.print_usage()
191+
sys.exit(2)
192+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,292 @@
1+
#! /usr/local/python-2.7.6/bin/python
2+
#
3+
# Copyright (C) 2015 by Howard Hughes Medical Institute.
4+
#
5+
# Purpose: perform 2D segmentation using GALA
6+
# then post-process results
7+
#
8+
# ------------------------- imports -------------------------
9+
10+
# imports
11+
import os, sys, re, optparse
12+
from gala import imio, classify, features, agglo, evaluate as ev
13+
from six.moves import map
14+
import numpy as np
15+
import h5py
16+
from PIL import Image
17+
from skimage.color import label2rgb
18+
import ntpath
19+
20+
import MS_LIB_Dict
21+
import MS_LIB_Options
22+
import MS_LIB_IO
23+
24+
ms_home = os.environ['MS_HOME'] # where source code is located
25+
ms_data = os.environ['MS_DATA'] # dir for initial input data and final output
26+
ms_temp = os.environ['MS_TEMP'] # dir for intermediate data files
27+
gala_home = os.environ['GALA_HOME'] # where source code is located
28+
ilastik_home = os.environ['ILASTIK_HOME'] # where source code is located
29+
ilastik_home2 = os.environ['ILASTIK_HOME2']
30+
neuroproof_2d_home = os.environ['NEUROPROOF_2D_HOME']
31+
32+
# -----------------------------------------------------------------------------
33+
34+
def extract_file_list_from_directory(input_dir, ind_min, ind_max):
35+
file_list = []
36+
print "input_dir=", input_dir
37+
files = sorted(os.listdir(input_dir))
38+
for i in range(0, len(files)):
39+
if i >= ind_min and i <= min(ind_max, len(files)):
40+
file_path = os.path.join(ms_data, input_dir, files[i])
41+
file_list.append(file_path)
42+
return file_list
43+
44+
# -----------------------------------------------------------------------------
45+
46+
def create_dir(dir_path):
47+
if os.path.isdir(dir_path):
48+
command = "rm -rf " + dir_path + "/*"
49+
print "\nRunning the commmand: " , command, "\n"
50+
os.system(command)
51+
else:
52+
os.mkdir(dir_path)
53+
54+
# -----------------------------------------------------------------------------
55+
56+
def remove_dir(dir_path):
57+
command = "rm -rf " + dir_path
58+
print "\nRunning the commmand: " , command, "\n"
59+
os.system(command)
60+
61+
# -----------------------------------------------------------------------------
62+
63+
def create_mosaic_file(image_file_list, mosaic_file):
64+
list_len = len(image_file_list)
65+
for i in range(0, list_len):
66+
data1 = np.asarray(Image.open(image_file_list[i]))
67+
if i == 0:
68+
shape1 = data1.shape
69+
mosaic_data = np.zeros([shape1[0]*list_len, shape1[1]], dtype = data1.dtype)
70+
mosaic_data[(i*shape1[0]):((i+1)*shape1[0]), :] = data1
71+
img = Image.fromarray(mosaic_data)
72+
img.save(mosaic_file)
73+
print "image_file_list=", image_file_list, " mosaic_file=", mosaic_file
74+
75+
# -----------------------------------------------------------------------------
76+
77+
def create_ilastik_trainig_file(raw_file_list, gt_file_list):
78+
create_dir("tmp_raw_mosaic_dir")
79+
create_dir("tmp_gt_mosaic_dir")
80+
create_mosaic_file(raw_file_list, os.path.join("tmp_raw_mosaic_dir", "mosaic_raw.png"))
81+
create_mosaic_file( gt_file_list, os.path.join("tmp_gt_mosaic_dir", "mosaic_gt.png"))
82+
83+
# Create only one raw and one labels file
84+
command = "MS_UT_Im2H5.py " + os.path.join("tmp_gt_mosaic_dir", "mosaic_gt.png") + \
85+
" groundtruth.h5"
86+
print "\nRunning the commmand: " , command, "\n"
87+
os.system(command)
88+
89+
# Generate Ilastik project file
90+
training_ilp = "training.ilp"
91+
python_script = os.path.join(ilastik_home, "ilastik", "bin", "train_headless.py")
92+
# python_script = os.path.join(ilastik_home2, "src", "ilastik", "ilastik", "bin", "train_headless.py")
93+
command = "python " + python_script +\
94+
" " + training_ilp + " 'tmp_raw_mosaic_dir/*.png' groundtruth.h5/stack"
95+
print "\nRunning the commmand: " , command, "\n"
96+
os.system(command)
97+
98+
# remove_dir("tmp_raw_mosaic_dir")
99+
# remove_dir("tmp_gt_mosaic_dir")
100+
101+
return training_ilp
102+
103+
# -----------------------------------------------------------------------------
104+
105+
def populate_one_file(training_ilp, raw_file, target_dir):
106+
# Populate temporary dirs
107+
create_dir("tmp_raw")
108+
command = "cp " + raw_file + " " + " tmp_raw"
109+
print "\nRunning the commmand: " , command, "\n"
110+
os.system(command)
111+
112+
# Remove old files, if they exist
113+
if not os.path.isdir('seg_data'):
114+
print "\nRunning the commmand: mkdir seg_data"
115+
os.mkdir('seg_data')
116+
else:
117+
print "\nRunning the commmand: rm -f seg_data/*"
118+
os.system("rm -rf seg_data/*")
119+
if os.path.isfile('STACKED_prediction.h5'):
120+
command = "rm -f STACKED_prediction.h5"
121+
print "\nRunning the commmand: " , command, "\n"
122+
os.system(command)
123+
124+
# Produce the probabilities and oversegmentation files from the inputs
125+
if not os.path.islink('gala-segmentation-pipeline'):
126+
os.symlink('gala-segmentation-pipeline', os.path.join(gala_home, 'gala',\
127+
'bin', 'gala-segmentation-pipeline'))
128+
command = "python gala-segmentation-pipeline " + \
129+
" . -I 'tmp_raw/*.png' --ilp-file " + training_ilp + " " + \
130+
" --enable-gen-supervoxels --enable-gen-agglomeration " + \
131+
" --enable-gen-pixel --seed-size 5 --segmentation-thresholds 0.0"
132+
print "\nRunning the commmand: " , command, "\n"
133+
os.system(command)
134+
# remove_dir("tmp_raw")
135+
136+
# Populate the image file
137+
base_name = ntpath.basename(raw_file).split(".")[0]
138+
suffix = ntpath.basename(raw_file).split(".")[1]
139+
img_file = os.path.join(target_dir, "img." + base_name[1:] + "." + suffix)
140+
command = "cp " + raw_file + " " + img_file
141+
print "\nRunning the commmand: ", command, "\n"
142+
os.system(command)
143+
144+
# Populate the predictions file
145+
base_name = ntpath.basename(raw_file).split(".")[0]
146+
print "target_dir=", target_dir, " basename=", base_name
147+
148+
probs_h5 = os.path.join(target_dir, "img." + base_name[1:] + "_prediction.h5")
149+
command = "MS_UT_ProbsNF2GALA.py STACKED_prediction.h5 " + probs_h5
150+
print "\nRunning the commmand: ", command, "\n"
151+
os.system(command)
152+
153+
# Populate watersfed file
154+
base_name = ntpath.basename(raw_file).split(".")[0]
155+
watershed_h5 = os.path.join(target_dir, "img." + base_name[1:] + "_watershed.h5")
156+
command = "python " + os.path.join(neuroproof_2d_home, "generate_watershed_dan.py") +\
157+
" " + probs_h5 + " " + watershed_h5 + " 0"
158+
print "\nRunning the commmand: ", command, "\n"
159+
os.system(command)
160+
161+
# -----------------------------------------------------------------------------
162+
163+
def populate_training_dir(training_raw, groundtruth_labels_dir, options):
164+
# Create training dir
165+
create_dir(options.training_dir)
166+
167+
# Create a list of training files
168+
raw_file_list = extract_file_list_from_directory(training_raw, \
169+
int(options.tmin), int(options.tmax))
170+
groundtruth_labels_file_list = extract_file_list_from_directory(groundtruth_labels_dir,\
171+
int(options.tmin), int(options.tmax))
172+
if not len(raw_file_list) == len(groundtruth_labels_file_list):
173+
sys.exit("# files in raw and groundtruth training lists are not the same")
174+
175+
print "groundtruth_labels_file_list=", groundtruth_labels_file_list
176+
# Create ilastik training file
177+
training_ilp = create_ilastik_trainig_file(raw_file_list, groundtruth_labels_file_list)
178+
179+
# Populate training_dir
180+
print "options.training_dir=", options.training_dir
181+
for i in range(0, len(raw_file_list)):
182+
populate_one_file(training_ilp, raw_file_list[i], options.training_dir)
183+
base_name = ntpath.basename(raw_file_list[i]).split(".")[0]
184+
groundtruth_seg_file = os.path.join(options.training_dir, "img." + base_name[1:] + "_groundtruth.h5")
185+
prediction_file = os.path.join(options.training_dir, "img." + base_name[1:] + "_prediction.h5")
186+
command = "python " + os.path.join(neuroproof_2d_home, "generate_seg_groundtruth.py") \
187+
+ " " + groundtruth_labels_file_list[i] + " " + prediction_file \
188+
+ " " + groundtruth_seg_file
189+
print "\nRunning the commmand: ", command, "\n"
190+
os.system(command)
191+
return training_ilp
192+
193+
# -----------------------------------------------------------------------------
194+
195+
def populate_production_dir(training_ilp, production_raw, options):
196+
# Create production dir
197+
create_dir(options.production_dir)
198+
199+
# Create a list of training files
200+
raw_file_list = extract_file_list_from_directory(production_raw, \
201+
int(options.zmin), int(options.zmax))
202+
203+
# Populate production die
204+
for i in range(0, len(raw_file_list)):
205+
populate_one_file(training_ilp, raw_file_list[i], options.production_dir)
206+
return
207+
208+
# -----------------------------------------------------------------------------
209+
210+
def learn_classifier(classifier_h5, training_dir, options):
211+
command = os.path.join(neuroproof_2d_home, "NeuroProof_stack_learn") + \
212+
" -train_dir " + training_dir + " -mito_thd 0.35 " + \
213+
" -classifier " + classifier_h5 + " -iteration 1 -strategy 2 "
214+
print "\nRunning the commmand: " , command, "\n"
215+
os.system(command)
216+
return
217+
218+
# -----------------------------------------------------------------------------
219+
220+
def perform_segmentation(output_dir, classifier_h5, production_dir, options):
221+
if not os.path.isdir(output_dir):
222+
os.mkdir(output_dir)
223+
command = os.path.join(neuroproof_2d_home, "NeuroProof_stack") + \
224+
" -image_dir " + production_dir + " -mito_thd 0.35 " + \
225+
" -classifier " + classifier_h5 + " -algorithm 1 -threshold 0.2"
226+
print "\nRunning the commmand: " , command, "\n"
227+
os.system(command)
228+
return
229+
230+
# -----------------------------------------------------------------------------
231+
232+
def create_segmentation_stack(output_dir, options):
233+
result_files = []
234+
print "output_dir=", output_dir
235+
for file in os.listdir(output_dir):
236+
if re.search("_m.h5", file):
237+
result_files.append(os.path.join(output_dir, file))
238+
result_files = sorted(result_files)
239+
for i in range(0, len(result_files)):
240+
f = h5py.File(result_files[i], 'r')
241+
data1 = f["stack"]
242+
if i == 0:
243+
data_stack = np.zeros([len(result_files), data1.shape[1], data1.shape[2]])
244+
print "data1.shape=", data1.shape
245+
data_stack[i,:,:] = data1
246+
if len(result_files) > 0:
247+
f = h5py.File(os.path.join(output_dir, "resulting_stack.h5"), 'w')
248+
f.create_dataset('stack', data_stack.shape, data = data_stack)
249+
250+
# -----------------------------------------------------------------------------
251+
252+
def process_data(raw_dir, groundtruth_labels_dir, options):
253+
training_ilp = "training.ilp" # ilastik training file
254+
classifier_h5 = "np_classifier.h5"
255+
output_dir = os.path.join("production_dir", "output")
256+
257+
if int(options.processing_start) == 1 and int(options.processing_end) >= 1:
258+
training_ilp = populate_training_dir(raw_dir, \
259+
groundtruth_labels_dir, options)
260+
261+
if int(options.processing_start) <= 2 and int(options.processing_end) >= 2:
262+
populate_production_dir(training_ilp, raw_dir, options)
263+
264+
if int(options.processing_start) <= 3 and int(options.processing_end) >= 3:
265+
learn_classifier(classifier_h5, options.training_dir, options)
266+
267+
if int(options.processing_start) <= 4 and int(options.processing_end) >= 4:
268+
perform_segmentation(output_dir, classifier_h5, options.production_dir, options)
269+
270+
if int(options.processing_start) <= 5 and int(options.processing_end) >= 5:
271+
create_segmentation_stack(output_dir, options)
272+
273+
# -----------------------------------------------------------------------------
274+
275+
if __name__ == "__main__":
276+
277+
usage = "Usage: \n\
278+
%prog <raw_train_data> <gt_seg_labels> <raw_data> [options (-h to list)]"
279+
280+
parser = optparse.OptionParser(usage=usage, version="%%prog ")
281+
parser = MS_LIB_Options.NeuroProofSegmentation2D_command_line_parser(parser)
282+
(options, args) = parser.parse_args()
283+
284+
if len(args) == 2:
285+
raw_dir = args[0]
286+
gt_labels_dir = args[1]
287+
288+
process_data(raw_dir, gt_labels_dir, options)
289+
else:
290+
parser.print_usage()
291+
sys.exit(2)
292+

‎Lib/MS_LIB_Options.py

+64-20
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,12 @@ def Segmentation2D_command_line_parser(parser):
1818
parser.add_option("-f", "--fracBlack", dest="fracBlack", help="fracBlack for detecting neurons (default=automatic)",metavar="fracBlack", default=None)
1919
parser.add_option("-F", "--fracBlack2",dest="fracBlack2",help="fracBlack for detect. dark str.",metavar="fracBlack2",default=None)
2020
parser.add_option("-i","--uint_type",dest="uint",help="8,16,32 or 64",metavar="uint",default="16")
21-
parser.add_option("-l", "--nlen", dest="nlen", help="# of subsections for processing a fragment in y (length) direction",metavar="nlen", default="1")
22-
parser.add_option("-m", "--maxsize",dest="msize", help="# of subsections for processing a fragment in y (length) direction",metavar="nlen",default=sys.maxint)
21+
parser.add_option("-l", "--nlen", dest="ysecit", help="# of subsections for adaptive thresholding in y (length) direction",metavar="nlen", default="1")
22+
parser.add_option("-M", "--memb_prob",dest="memb_prob", help="HDF5 file or a folder of files containing membrane probabilities",\
23+
metavar="memb_prob",default="")
24+
parser.add_option("-m", "--mito_prob",dest="mito_prob",help="HDF5 file or a folder of files containing mitoch. probabilities",\
25+
metavar="mito_prob",default="")
26+
parser.add_option("-N", "--maxsize",dest="msize", help="# of subsections for processing a fragment in y (length) direction",metavar="nlen",default=sys.maxint)
2327
parser.add_option("-n", "--node", dest="node", help="id of the cluster node to be used", metavar="node", default=0)
2428
parser.add_option("-o", "--output_folder",dest="output_folder",help="output folder",metavar="output_folder",default=ms_data)
2529
parser.add_option("-p", "--processing_start",dest="processing_start",help="start processing from segm(=1),xmerg(=2),ymerg(=3),or epilog(=4) ",metavar="processing_start",default=1)
@@ -29,7 +33,7 @@ def Segmentation2D_command_line_parser(parser):
2933
parser.add_option("-S", "--slots",dest="num_slots", help="# of cluster slots per 1 job (default=1)", metavar="num_slots", default=1)
3034
parser.add_option("-U", "--unprocessed",action="store_true",dest="unprocessed", help="reprocess only the data for which an output file does not exist",default=False)
3135
parser.add_option("-v", "--verbose",action="store_true",dest="verbose",help="increase the verbosity level of output",default=False)
32-
parser.add_option("-w", "--nwid", dest="nwid", help="# of subsections for processing a fragment in x (width) direction", metavar="nwid", default="1")
36+
parser.add_option("-w", "--nwid", dest="nwid", help="# of subsections for adaptive thresholding in x (width) direction", metavar="nwid", default="1")
3337
parser.add_option("-X", "--nx", dest="nx", help="# of image fragments in x direction", metavar="nx", default=1)
3438
parser.add_option("-Y", "--ny", dest="ny", help="# of image fragments in y direction", metavar="ny", default=1)
3539
parser.add_option("-x", "--dx", dest="dx", help="# of scans for fragment overlap in x direction", metavar="dx", default=50)
@@ -190,17 +194,17 @@ def CreateH5Stack_command_line_parser(parser):
190194
def RhoanaSegmentation2D_command_line_parser(parser):
191195
parser.add_option("-A", "--project",dest="project_code",help="code to be used with qsub",metavar="project_code", default="flyTEM")
192196
parser.add_option("-c", "--classifier",dest="classifier",help="classifier file",metavar="classifier", \
193-
default=os.path.join(rh_home, "rhoana", "ClassifyMembranes", "GB_classifier.txt"))
197+
default=os.path.join(ms_home, "RhoanaSegmentation", "GB_classifier_membranes_150324_pedunculus.txt"))
194198
parser.add_option("-D", "--debug",dest="debug",help="don't delete intermediate outputs", action="store_true", default=False)
195199
parser.add_option("-e", "--executable",dest="executable",help="executable",metavar="executable",\
196200
default=os.path.join(rh_home, "rhoana", "ClassifyMembranes", "classify_image"))
197-
parser.add_option("-i", "--inverse_probabilities", action="store_true",dest="inverse_probabilities",help="output membrane probabilities", default=False)
198-
parser.add_option("-m", "--memprob",dest="memprobs",help="membrane probabilities dir name",metavar="membprobs",default="membranes")
199-
parser.add_option("-o", "--output_folder",dest="output_folder",help="output folder",metavar="output_folder",default=ms_data)
201+
parser.add_option("-o", "--output_folder",dest="output_folder",help="output folder",metavar="output_folder",\
202+
default=os.path.join(ms_data,"membrane_bw2d_150324_pedunculus"))
200203
parser.add_option("-p", "--processing_start",dest="processing_start",help="start processing from probs(=1) or segm(=2)",\
201204
metavar="processing_start",default=1)
202205
parser.add_option("-P", "--processing_end",dest="processing_end",help="complete processing at step probs(=1) or segm(=2) ",\
203206
metavar="processing_end",default=2)
207+
parser.add_option("-T", "--seg_type", dest="seg_type", help="segmentation type: memb, mito, or mitomemb", default="memb")
204208
parser.add_option("-t", "--output_tag",dest="output_tag",help="output tag, default=input_data",metavar="output_tag",default="")
205209
parser.add_option("-v", "--verbose",action="store_true",dest="verbose",help="increase the verbosity level of output",default=False)
206210
parser.add_option("-z", "--zmin",dest="zmin",help="min z-layer to be processed", metavar="zmin", default=0)
@@ -211,26 +215,66 @@ def RhoanaSegmentation2D_command_line_parser(parser):
211215

212216
def RhoanaTrainClassifier_command_line_parser(parser):
213217
parser.add_option("-A", "--project",dest="project_code",help="code to be used with qsub",metavar="project_code", default="flyTEM")
214-
parser.add_option("-c", "--classifier_type",dest="classifier_type",help="GB(=gradient boosting) or RF(=random forest)",metavar="classifier_type",default="GB")
218+
parser.add_option("-c", "--classifier_type",dest="classifier_type",help="GB(=gradient boosting) or RF(=random forest)",\
219+
metavar="classifier_type",default="GB")
220+
parser.add_option("-d", "--training_dir",dest="training_dir",help="output dir for training images",
221+
metavar="training_dir",default="")
215222
parser.add_option("-D", "--debug",dest="debug",help="don't delete intermediate outputs", action="store_true", default=False)
216-
parser.add_option("-f", "--features",dest="features",help="name of a folder containing computed features",metavar="features",
217-
default=os.path.join(ms_data,"features_150324_pedunculus"))
218-
parser.add_option("-M", "--membrane_labels",dest="membrane_labels",help="folder containing membrane training labels",\
219-
metavar="membrane_labels",\
220-
default=os.path.join(ms_data,"membrane_labels_150324_pedunculus"))
221-
parser.add_option("-m", "--mitochondria_labels",dest="mitochondria_labels",help="folder containing mitochondria training labels",\
222-
metavar="mitochondria_labels",\
223-
default=os.path.join(ms_data,"mitochonria_labels_150324_pedunculus"))
224-
parser.add_option("-o", "--output_file",dest="output_file",help="output file",metavar="output_file",
225-
default=os.path.join(ms_home, "RhoanaSegmentation", "GB_classifier.txt"))
223+
parser.add_option("-f", "--features_dir",dest="features_dir",help="name of a folder containing computed features",\
224+
metavar="features_dir", default="")
225+
parser.add_option("-l", "--pos_labels_dir",dest="pos_labels_dir",help="folder containing positive training labels",\
226+
metavar="pos_labels_dir", default="")
227+
parser.add_option("-L", "--neg_labels_dir",dest="neg_labels_dir",help="folder containing negative training labels",\
228+
metavar="neg_labels_dir", default="")
229+
parser.add_option("-o", "--output_file",dest="output_file",help="output file",metavar="output_file", default="")
226230
parser.add_option("-p", "--processing_start",dest="processing_start",help="start processing from probs(=1) or segm(=2)",\
227231
metavar="processing_start",default=1)
228232
parser.add_option("-P", "--processing_end",dest="processing_end",help="complete processing at step probs(=1) or segm(=2) ",\
229233
metavar="processing_end",default=3)
230-
parser.add_option("-r", "--raw_images",dest="raw_images",help="folder containing the raw images for training",\
231-
metavar="raw_images", default=os.path.join(ms_data,"raw_150324_pedunculus"))
234+
parser.add_option("-r", "--raw_dir",dest="raw_dir",help="folder containing the raw images for training",\
235+
metavar="raw_dir", default="")
236+
parser.add_option("-T", "--training_type", dest="training_type", \
237+
help="training type: memb_vs_rest, mito_vs_rest, mitomemb_vs_rest or memb_vs_mito", default="memb_vs_rest")
232238
parser.add_option("-v", "--verbose",action="store_true",dest="verbose",help="increase the verbosity level of output",default=False)
233239
parser.add_option("-z", "--zmin",dest="zmin",help="min z-layer to be processed", metavar="zmin", default=0)
234240
parser.add_option("-Z", "--zmax",dest="zmax",help="max z-layer to be processed", metavar="zmax", default=sys.maxint)
235241
return parser
236242

243+
# -----------------------------------------------------------------------
244+
245+
def GalaSegmentation2D_command_line_parser(parser):
246+
parser.add_option("-D", "--debug",dest="debug",help="don't delete intermediate outputs", action="store_true", default=False)
247+
parser.add_option("-i", "--input_raw",dest="input_dir",help="folder of raw/grayscale production images",metavar="input_raw_dir", default="")
248+
parser.add_option("-o", "--output_seg",dest="output_dir",help="output folder",metavar="output_folder",\
249+
default=os.path.join(ms_data,"membrane_bw2d_150324_pedunculus"))
250+
parser.add_option("-p", "--processing_start",dest="processing_start",help="start processing from probs(=1) or segm(=2)",\
251+
metavar="processing_start",default=1)
252+
parser.add_option("-P", "--processing_end",dest="processing_end",help="complete processing at step probs(=1) or segm(=2) ",\
253+
metavar="processing_end",default=2)
254+
parser.add_option("-T", "--training_labels", dest="training_labels_dir", help="folder of groundtruth labels images", default="")
255+
parser.add_option("-t", "--training_raw",dest="training_raw_dir",help="folder of raw/grayscale training images",metavar="training_raw_dir", default="")
256+
parser.add_option("-v", "--verbose",action="store_true",dest="verbose",help="increase the verbosity level of output",default=False)
257+
parser.add_option("-z", "--zmin",dest="zmin",help="min z-layer for training", metavar="zmin", default=0)
258+
parser.add_option("-Z", "--zmax",dest="zmax",help="max z-layer for training", metavar="zmax", default=sys.maxint)
259+
260+
return parser
261+
262+
# -----------------------------------------------------------------------
263+
264+
def NeuroProofSegmentation2D_command_line_parser(parser):
265+
parser.add_option("-d", "--training_dir",dest="training_dir",help="folder for training data",\
266+
metavar="training_dir", default = "training_dir")
267+
parser.add_option("-D", "--production_dir",dest="production_dir",help="folder for production data",\
268+
metavar="production_dir", default = "production_dir")
269+
parser.add_option("-p", "--processing_start",dest="processing_start",help="1=train_dir, 2=prod_dir, 3=learn, 4=segm ",\
270+
metavar="processing_start",default=1)
271+
parser.add_option("-P", "--processing_end",dest="processing_end",help="1=train_dir, 2=prod_dir, 3=learn, 4=segm ",\
272+
metavar="processing_end",default=5)
273+
parser.add_option("-t", "--tmin",dest="tmin",help="min z-layer for training", metavar="zmin", default=0)
274+
parser.add_option("-T", "--tmax",dest="tmax",help="max z-layer for training", metavar="zmax", default=sys.maxint)
275+
parser.add_option("-v", "--verbose",action="store_true",dest="verbose",help="increase the verbosity level of output",default=False)
276+
parser.add_option("-z", "--zmin",dest="zmin",help="min z-layer for production", metavar="tmin", default=0)
277+
parser.add_option("-Z", "--zmax",dest="zmax",help="max z-layer for production", metavar="tmax", default=sys.maxint)
278+
279+
return parser
280+

‎Utilities/MS_UT_BW2RGB.py

-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import re, sys
44
import h5py
55
from scipy import misc
6-
import tifffile as tiff
76
import numpy
87
from PIL import Image
98

‎Utilities/MS_UT_CombineBinaryLabels.py

+19-8
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#
55

66
from PIL import Image
7+
from skimage.morphology import disk, binary_erosion
78
import numpy
89
import sys, os
910

@@ -16,17 +17,27 @@
1617
if not os.path.isdir(outdir):
1718
os.mkdir(outdir)
1819
else:
19-
sys.exit("usage: MS_UT_CombineBinaryLabels.py <dir_labels1> <dir_labels2> <dir_out> \n")
20+
sys.exit("usage: MS_UT_CombineBinaryLabels.py <dir_memb_labels> <dir_mito_labels> <dir_comb_labels> \n")
2021

2122
for file in os.listdir(indir1):
2223
inpath1 = os.path.join(indir1, file)
23-
inpath2 = os.path.join(indir2, file.split(".")[0] + ".png")
24+
inpath2 = os.path.join(indir2, file)
2425
outpath = os.path.join(outdir, file.split(".")[0] + ".png")
25-
data1 = numpy.asarray(Image.open(inpath1, 'r'))
26-
data2 = numpy.invert(numpy.asarray(Image.open(inpath2, 'r'))) # mito membranes = now black
27-
data = numpy.zeros(data1.shape, dtype="int")
28-
data[:] = data1[:]
29-
data[data2 == 0] = 0.
30-
img = Image.fromarray(numpy.uint8(data/numpy.max(data)*255.))
26+
27+
data_memb = numpy.asarray(Image.open(inpath1, 'r')) # membranes = black, the rest = white
28+
data_mito = numpy.asarray(Image.open(inpath2, 'r')) # mitochondria = white, the rest = black
29+
strel = disk(3)
30+
data_mito_1 = numpy.zeros(data_mito.shape, dtype=numpy.uint8)
31+
data_mito_1[data_mito > 0] = 1 # having 1's instead of 255's is necessary for applying the binary_erosion operation
32+
data_mito_e = binary_erosion(data_mito_1, strel)
33+
data_mito_memb = data_mito_1.copy()
34+
data_mito_memb[data_mito_e > 0] = 0
35+
print "(data_mito_1 == 1).sum()=", (data_mito_1 == 1).sum()
36+
print "(data_mito_e == 1).sum()=", (data_mito_e == 1).sum()
37+
data = numpy.zeros(data_memb.shape, dtype=numpy.uint8)
38+
data[data_memb == 0] = 1 # cellular membranes
39+
data[data_mito_e > 0] = 2 # mitochondria-interior
40+
data[data_mito_memb > 0 ] = 3 # mito-membranes
41+
img = Image.fromarray(numpy.uint8(data))
3142
img.save(outpath)
3243

‎Utilities/MS_UT_CreateH5Groundtruth.py

+3-14
Original file line numberDiff line numberDiff line change
@@ -38,22 +38,11 @@
3838

3939
for i in range(0, len(input_files1)):
4040
data = copy.copy(numpy.asarray(Image.open(input_files1[i], 'r')))
41-
# if i == 0:
42-
# my_shape = [len(input_files1), data.shape[0], data.shape[1]]
43-
# data_stack = numpy.zeros(my_shape, dtype = numpy.uint32)
44-
# data_stack[i,:,:] = data
4541
if i == 0:
46-
my_shape = [data.shape[0], data.shape[1], len(input_files1)]
47-
data_stack = numpy.zeros(my_shape, dtype = numpy.uint32)
48-
data_stack[:,:, i] = data
49-
print "my_shape=", my_shape
50-
max_label = numpy.max(data_stack)
51-
my_shape2 = [max_label+1, 2]
52-
transforms = numpy.zeros(my_shape2, dtype = numpy.uint64)
53-
for i in range(0, max_label+1):
54-
transforms[i,0:2] = i
42+
my_shape = [len(input_files1), data.shape[0], data.shape[1]]
43+
data_stack = numpy.zeros(my_shape, dtype = numpy.int32)
44+
data_stack[i, :,:] = data
5545
f.create_dataset('stack', my_shape, data = data_stack)
56-
f.create_dataset('transforms', my_shape2, data = transforms)
5746

5847

5948

‎Utilities/MS_UT_CreateTrainingImage.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
pos_BW_shape = pos_BW_data.shape
2020
RGB_shape = (pos_BW_shape[0],pos_BW_shape[1],3)
2121
RGB_data = numpy.zeros(RGB_shape, 'uint8')
22-
if re.search("membrane", pos_BW_image_file) and len(neg_BW_image_file) == 0:
22+
if re.search("memb", pos_BW_image_file) and len(neg_BW_image_file) == 0:
2323
# train membranes vs the rest: membranes=black, the rest =white
2424
RGB_data[pos_BW_data== 0,1] = 255 # positive examples: black -> green
2525
RGB_data[pos_BW_data==255,0] = 255 # negatice examples: white -> red
@@ -33,9 +33,6 @@
3333
RGB_data[pos_BW_data== 0,1] = 255 # positive examples: white -> green
3434
RGB_data[neg_BW_data==255,0] = 255 # negatice examples: white -> red
3535
RGB_image = Image.fromarray(RGB_data)
36-
parent_dir = os.path.dirname(os.path.normpath(out_RGB_image_file))
37-
print "out_RGB_image_file=", out_RGB_image_file, " parent_dir=", parent_dir
38-
if not os.path.isdir(parent_dir):
39-
os.mkdir(parent_dir)
36+
print "out_RGB_image_file=", out_RGB_image_file
4037
RGB_image.save(out_RGB_image_file)
4138

‎Utilities/MS_UT_ExploreImageFile.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,15 @@
2222
image_data = numpy.array(f[key])
2323
else:
2424
sys.exit("Unsupported file format")
25-
print "max=", image_data.max(), " min=", image_data.min()
26-
print "dtype=", image_data.dtype
2725
print "Image shape=", image_data.shape
26+
print "dtype=", image_data.dtype
27+
max_value = image_data.max()
28+
min_value = image_data.min()
29+
print "max=", max_value, " min=", min_value
30+
if round(max_value) == max_value and round(min_value) == min_value and max_value - min_value > 1:
31+
num_values = 0
32+
for i in range(min_value, max_value + 1):
33+
if (image_data == i).sum() > 0:
34+
num_values += 1
35+
print "num_values=", num_values
2836

‎Utilities/MS_UT_Im2H5.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,15 @@
2222
else:
2323
sys.exit("\nusage: MS_UT_Im2H5.py <image_file> <hdf5_file> \n")
2424

25-
data_stack = numpy.array(Image.open(image_file))
25+
data = numpy.array(Image.open(image_file))
26+
if 1:
27+
data_stack = numpy.zeros([1, data.shape[0], data.shape[1]], dtype = numpy.uint32)
28+
data_stack[0,:,:] = data
29+
else:
30+
data_stack = numpy.zeros([ data.shape[0], data.shape[1]], dtype = numpy.uint32)
31+
data_stack[ :,:] = data
2632
f = h5py.File(h5_file_name, 'w')
27-
f.create_dataset('stack', data_stack.shape, data = numpy.transpose(data_stack))
33+
f.create_dataset('stack', data_stack.shape, data = data_stack, dtype = numpy.uint32)
2834

2935

3036

‎Utilities/MS_UT_ProbsNF2GALA.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import h5py
88
import numpy
99
import sys, os, copy
10+
from numpy import squeeze
1011

1112
# ----------------------------------------------------------------------
1213

@@ -24,8 +25,9 @@
2425

2526
fin = h5py.File(NF_probs_file, 'r')
2627
data_stack = numpy.array(fin['/volume']['predictions'])
28+
data_stack1 = numpy.zeros([data_stack.shape[2], data_stack.shape[1], data_stack.shape[0], data_stack.shape[3]])
29+
data_stack1 = data_stack.transpose((2,1,0,3))
2730
fout = h5py.File(GALA_probs_file, 'w')
28-
fout.create_dataset('stack', data_stack.shape, data = data_stack)
29-
31+
fout.create_dataset('stack', data_stack1.shape, data = data_stack1)
3032

3133

‎Utilities/MS_UT_SegStack2RGB.py

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
#! /usr/local/python-2.7.6/bin/python
2+
#
3+
# Copyright (C) 2015 by Howard Hughes Medical Institute.
4+
#
5+
6+
import os, sys, re, h5py
7+
import tifffile as tiff
8+
import numpy as np
9+
from skimage.color import label2rgb
10+
from PIL import Image
11+
12+
def read_input(stack_file_name, layer_id):
13+
if re.search(".h5", stack_file_name):
14+
f = h5py.File(stack_file_name, 'r')
15+
key = f.keys()[0]
16+
data = f[key]
17+
elif re.search(".tif", stack_file_name):
18+
data = tiff.imread(stack_file_name)
19+
else:
20+
sys.exit("Unrecognized stack type")
21+
print "data.shape=", data.shape, " layer_id=", layer_id
22+
return data[layer_id,:,:]
23+
24+
# -------------------------------------------------------------------------------
25+
26+
if __name__ == "__main__":
27+
28+
print "len(sys.argv)=", len(sys.argv)
29+
if len(sys.argv) in [3,4]:
30+
stack_file_name = sys.argv[1]
31+
RGB_name = sys.argv[2]
32+
layer_id = 0
33+
if len(sys.argv) == 4:
34+
layer_id = int(sys.argv[3])
35+
else:
36+
sys.exit("\nusage: MS_UT_SegStack2RGB.py input_stack_file output_RGB_file\n")
37+
38+
data = np.squeeze(read_input(stack_file_name, layer_id))
39+
print "data.shape=", data.shape
40+
max_value = data.max()
41+
min_value = data.min()
42+
print "data.max=", max_value, " data.min=", min_value
43+
44+
if round(max_value) == max_value and round(min_value) == min_value and max_value - min_value > 1:
45+
num_values = 0
46+
for i in range(min_value, max_value + 1):
47+
if (data == i).sum() > 0:
48+
num_values += 1
49+
print "num_values=", num_values
50+
51+
outdata = label2rgb(data)
52+
print "\noutdata.shape=", outdata.shape
53+
max_value = outdata.max()
54+
min_value = outdata.min()
55+
print "outdata.max=", max_value, " outdata.min=", min_value
56+
57+
outdata2 = np.zeros([outdata.shape[0], outdata.shape[1], 3], 'uint8')
58+
for i in [0,1,2]:
59+
outdata2[:,:,i] = np.uint8(outdata[:,:,i]/np.max(outdata[:,:,i])*255.)
60+
img = Image.fromarray(outdata2)
61+
img.save(RGB_name)
62+
63+
64+
65+

‎Utilities/MS_UT_ShowH5StackLabels.py

+11-9
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,19 @@
1919
else:
2020
sys.exit("usage: MS_UT_ShowH5StackLabels.py h5file layer_id \n")
2121

22-
f = h5py.File(h5_file_name, 'r')
23-
data = f['main']
22+
f = h5py.File(h5_file_name, 'r')
23+
key = f.keys()[0]
24+
data = numpy.transpose(f[key])
2425
print "type=", type(data)
2526
print "type=", type(numpy.array(data))
26-
print "shape=", numpy.array(data).shape
27-
data1 = data[0,:,:,layer_id]
28-
data2 = data[1,:,:,layer_id]
29-
data3 = data[2,:,:,layer_id]
30-
print "shape1=", data1.shape
27+
print "data.shape=", numpy.array(data).shape
28+
# data1 = data[0,:,:,layer_id]
29+
# data2 = data[1,:,:,layer_id]
30+
# data3 = data[2,:,:,layer_id]
31+
print "data=", data
32+
print "max=", numpy.max(data), " min=", numpy.min(data)
3133
# data1_new = data1;
32-
data1 == data2
34+
# data1 == data2
3335
# data1
3436
#:red
35-
data1_new(data1 == data2) = "."
37+
# data1_new(data1 == data2) = "."

‎Utilities/MS_UT_ShowImage.py

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
#! /usr/local/python-2.7.6/bin/python
2+
#
3+
# Copyright (C) 2015 by Howard Hughes Medical Institute.
4+
#
5+
6+
import os, sys, numpy
7+
from PIL import Image
8+
import matplotlib.pyplot as plt
9+
import matplotlib.cm as cm
10+
from matplotlib.pyplot import figure, ion, show, draw
11+
ion()
12+
13+
if __name__ == "__main__":
14+
15+
if len(sys.argv) == 2:
16+
image_file = sys.argv[1]
17+
else:
18+
sys.exit("\nusage: MS_UT_ShowImage image_file\n")
19+
20+
image_data = numpy.asarray(Image.open(image_file))
21+
if len(image_data.shape) == 2:
22+
plt.imshow(image_data, cmap = cm.Greys_r)
23+
plt.show()
24+
else:
25+
plt.imshow(image_data)
26+
show()
27+
try:
28+
id = input("Press enter to remove image")
29+
i = int(id)
30+
if i == 0:
31+
sys.exit("Exiting ...")
32+
except SyntaxError:
33+
pass
34+
35+
36+
37+

‎Utilities/MS_UT_ShowImageStack.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,11 @@
3030
key = "segmentations"
3131
data0 = f[key]
3232
print "data0.shape=", data0.shape
33-
# data = numpy.transpose(f[key])
3433
data = f[key]
3534
print "data.shape=", data.shape
35+
# Layer id is supposed to be the last dimension:
36+
if data.shape[0] < min(data.shape[1], data.shape[2]):
37+
data = numpy.transpose(f[key])
3638
elif re.search(".tif", stack_file_name):
3739
data = numpy.transpose(tiff.imread(stack_file_name))
3840
else:

0 commit comments

Comments
 (0)
Please sign in to comment.