-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathDataStorage.py
1440 lines (1116 loc) · 70.9 KB
/
DataStorage.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Provides a convenient way to store all data into first and foremost raw files and also into a datastore so the
data can be analyzed and displayed on screen"""
__copyright__ = "Copyright 2015 - 2017, Justin Scholz"
__author__ = "Justin Scholz"
import cmath
import pickle
import copy
import time
import queue
from threading import Thread
import os
import math
import UserInput
import _version
class Database:
def __init__(self, name="Run1", pickle_path=".{0}".format(os.sep), experimenter="Tron", room="Dream World",
comment="I fight for the User!", creation_time=time.strftime("%d.%m.%Y %H:%M:%S")):
self.db = {} # {p1:[point1, point2], c1:[point1, point2]}
self.tasks_of_a_run = [] # this is where we store the task list for a run
self.name = name
self.experimenter = experimenter
self.room = room
self.comment = comment
self.pickle_path = pickle_path
self.creation_time = creation_time
self.version = _version.__version__
self.task_input=[]#Helps with setting up a template
def change_to_passed_db(self, unpickled_db):
"""
:param unpickled_db:
"""
self.db = unpickled_db.db
self.tasks_of_a_run = unpickled_db.tasks_of_a_run
try:
self.name = unpickled_db.name
except AttributeError:
self.name = "Run1"
try:
self.pickle_path = unpickled_db.pickle_path
except AttributeError:
self.pickle_path = ".{0}".format(os.sep)
try:
self.version = unpickled_db.version
except AttributeError:
self.version = "0"
try:
self.experimenter = unpickled_db.experimenter
except AttributeError:
self.experimenter = "Tron"
try:
self.room = unpickled_db.room
except AttributeError:
self.room = "Dream World"
try:
self.comment = unpickled_db.comment
except AttributeError:
self.comment = "I fight for the User!"
def start_fresh(self, name="Run1", pickle_path=".{0}".format(os.sep), experimenter="Tron", room="Dream World",
comment="I fight for the User!", creation_time=time.strftime("%d.%m.%Y %H:%M:%S")):
""" You may want to make multiple measurement runs. This means though that the database should be cleared. This
wipes all data and starts clean.
:param pickle_path:
:param name:
"""
self.db = {}
self.tasks_of_a_run = []
self.name = name
self.pickle_path = pickle_path
self.version = _version.__version__
self.experimenter = experimenter
self.room = room
self.comment = comment
self.creation_time = creation_time
def measurement_finished(self):
# Database should be pickled NOW
self.pickle_database()
def pickle_database(self, suffix=""):
if not os.path.isdir(self.pickle_path):
os.makedirs(self.pickle_path)
filename = "{0}{1}{2}{3}.JUMP".format(self.pickle_path, os.sep, self.name, suffix)
with open(filename, 'wb') as output:
pickle.dump(self, output, -1)
def new_pickle_path(self, new_pickle_path:str):
if not new_pickle_path.endswith(os.sep):
new_pickle_path += os.sep
self.pickle_path = new_pickle_path
def start_post_processing(self, path_to_opened_db:str):
database_to_manipulate = copy.deepcopy(self) # type: Database
path = os.path.dirname(path_to_opened_db)
if database_to_manipulate.pickle_path == ".{0}".format(os.sep):
database_to_manipulate.pickle_path = os.getcwd() + os.sep
#TODO: Consider more useful path display
question = {"question_title": "Output directory",
"question_text": "The current output directory is '{0}' . "
"Do you want to change it?".format(path),
"default_answer": True,
"optiontype": "yes_no"}
user_wants_to_change_path = UserInput.ask_user_for_input(question)["answer"]
if user_wants_to_change_path:
question = {"question_title": "Output directory",
"question_text": "Please enter a working directory for the following processing session.",
"default_answer": "C:\Data\JUMP",
"optiontype": "free_text"}
new_path = UserInput.ask_user_for_input(question)["answer"]
path = new_path
database_to_manipulate.new_pickle_path(path)
UserInput.post_status("")
UserInput.post_status("-------------Choose template-------------")
UserInput.post_status("Choose whether you want to customize your post-processing, or want to use an existing"
"template")
question = {"question_title": "Choose template",
"question_text": "Please choose a template",
"default_answer": "Custom",
"optiontype": "multi_choice",
"valid_options": ["Custom","S001"]}
chosen_template = UserInput.ask_user_for_input(question)["answer"]
if chosen_template == 0:
database_to_manipulate._post_process(True)
elif chosen_template == 1:
template=[False, False, True, [1, 2], False, True, [0, 1], False, [0], False, False, [0]]
database_to_manipulate._post_process(False,template)
def _post_process(self, custom:bool=True, template=[]):
""" the post processing workflow follows the steps outlined in post_processing_steps
"""
template = template
post_processing_steps = ["1. Ask user whether he wants geometry based calculations and calculate all possible"
" values",
"2. Merge *all* <<same>> level datapoints/DataAcquisitions",
"3. Merge *all* <<different>> level datapoints (usually ParamControllers or Triggers "
"together with DataAcquisitions",
"4. Integrate *all* <<multi-level>> tasks (temperature+frequencies)",
"5. Specify first output list",
"6. If needed, transpose datapoints (temp-> freq)",
"7. Define file naming, header and columns for first list",
"8. Define file naming, header and columns for second list",
"9. Start file-output",
"10. Be happy"]
processing_log = []
current_log_index = 0
UserInput.post_status("You are now in post processing mode. You are post-processing the database: " + self.name)
# ------ Helper methods for the post_processing_workflow
def print_following_steps(start_index):
"""Little method to print the current + all following steps of the post-processing-workflow
:param start_index:
"""
UserInput.post_status("These are the steps to the finish line, starting with the current one:")
for index, item in enumerate(post_processing_steps):
if index >= start_index:
UserInput.post_status(item)
def print_current_processing_log(start_index=0):
"""Little method to print the log of the processing workflow
:param start_index:
"""
for index, item in enumerate(processing_log):
if index >= start_index:
UserInput.post_status(item)
def print_task_list_with_indeces():
for index, item in enumerate(self.tasks):
UserInput.post_status(str(index) + ": " + item)
def get_task_id_from_task_list_index(index_in_task_list):
task = self.tasks[index_in_task_list]
identifier_str = task.split("]")[0].split("[")[1].split(",")
identifier = []
for item in identifier_str:
identifier.append(int(item))
return identifier
def setup_postprocessing():
pass
# -------------------------------------------------------------------------------------------------------------
if True:
# Step 1: Ask user whether he wants geometry based calculations and calculate all possible values
print_following_steps(0)
UserInput.post_status("")
UserInput.post_status("-------------Step 1: Geometry-------------")
UserInput.post_status("You now have the chance to enter a geometry so all the possible quantities can be "
"calculated for you")
question = {"question_title": "Sample geometry",
"question_text": "You can enter the sample geometry in mm (Millimeter!), do you want that?",
"default_answer": True,
"optiontype": "yes_no"}
user_wants_geometry = self._get_input(custom, question, template)
if user_wants_geometry:
question = {"question_title": "Sample Thickness",
"question_text": "Please enter the sample's thickness in mm. Valid values range from 0 to "
"9999999, maximum accuracy is capped at 0.0000001",
"default_answer": 1.0,
"optiontype": "free_choice",
"valid_options_lower_limit": 0.0,
"valid_options_upper_limit": 9999999,
"valid_options_steplength": 1e7}
thickness = self._get_input(custom, question, template)
question = {"question_title": "Sample area",
"question_text": "Please enter the sample's area in mm^2. Valid values range from 0 to 9999999,"
" maximum accuracy is capped at 0.0000001",
"default_answer": 1.0,
"optiontype": "free_choice",
"valid_options_lower_limit": 0.0,
"valid_options_upper_limit": 9999999,
"valid_options_steplength": 1e7}
area = self._get_input(custom, question, template)
self.geometry = {"thickness": thickness,
"area": area}
UserInput.post_status("Successfully gathered the geometry info. All values will be calculated. This can "
"take a moment.")
processing_log.append(time.strftime("%c") + ": User entered geometry. Starting value calculation.")
self.calculate_all_values(self.geometry)
processing_log.append(time.strftime("%c") + ": All values calculated.")
else:
UserInput.post_status("Values will be calculated without geometry input, but calculation could nevertheless"
" take a moment.")
processing_log.append(time.strftime("%c") + ": User didn't enter geometry. Starting value calculation.")
self.calculate_all_values()
self.geometry = {"Info": "no geometry given"}
processing_log.append(time.strftime("%c") + ": All values calculated.")
UserInput.post_status("All values are now calculated. Successfully finished step 1. New step is step 2.")
# -------------------------------------------------------------------------------------------------------------
# Step 2: Merge *all* same level datapoints/DataAcquisitions
UserInput.post_status("####################################")
UserInput.post_status("-------------Step 2: Same level merge-------------")
processing_log.append(time.strftime("%c") + ": Entering step 2")
print_following_steps(1)
current_log_index = len(processing_log) - 1
UserInput.confirm_warning(
"Now please merge all same-level tasks. Same level means that all but the very last index"
" components may differ. For example: \n\n [0,0,0,0] and [0,0,0,1] can be merged. \n\n"
"!!! [0,0,0] and [0,0,0,0] can't be merged in this step because that's a level 3 and "
"a level 4 merge. \n"
"!!! [0,0,0,0] and [1,0,0,0] can't be merged because they differ on the first index "
"component. \n\n"
"This merging process will only be needed in rare cases where you for example recorded "
"both sample AND control temperature in DataAcquisitons or have additional sensors.",custom)
step2_is_finished = False
while not step2_is_finished:
UserInput.post_status("°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°")
UserInput.post_status("Processing log for step 2:")
print_current_processing_log(current_log_index)
UserInput.post_status("°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°")
question = {"question_title": "Merge same level tasks",
"question_text": "Do you want to merge (another) two tasks?",
"default_answer": False,
"optiontype": "yes_no"}
user_wants_new_merge = self._get_input(custom, question, template)
if user_wants_new_merge:
print_task_list_with_indeces()
question = {"question_title": "Same-level-merge selection",
"question_text": "Please enter the two indeces, (you will get two input prompts) for the "
"two which are to be merged. The result is that the second one is "
"<<integrated>> into the <<first>> one.",
"default_answer": "0",
"optiontype": "2_indeces"}
index_list = self._get_input(custom, question, template)
identifier1 = get_task_id_from_task_list_index(index_list[0])
identifier2 = get_task_id_from_task_list_index(index_list[1])
self.merge_same_level_datapoints(identifier1, identifier2)
processing_log.append(time.strftime("%c") + ": Merged " + str(identifier2) + " into -> " +
str(identifier1))
else:
step2_is_finished = True
# -------------------------------------------------------------------------------------------------------------
# Step 3: Merge *all* different level datapoints/DataAcquisitions
UserInput.post_status("####################################")
UserInput.post_status("-------------Step 3: Different level merge-------------")
processing_log.append(time.strftime("%c") + ": Entering step 3")
# We are at step 3, which starting counting at 0 means we should print 2 going forward
print_following_steps(2)
current_log_index = len(processing_log) - 1
UserInput.confirm_warning(
"Now please merge all different-level tasks. Different level means that all compinents "
"of the identifier are the same, just the second one has one more component. This process"
" is almost always needed, if onyl to merge ParamController with DataAcquisitions",custom)
step3_is_finished = False
while not step3_is_finished:
UserInput.post_status("°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°")
UserInput.post_status("Processing log for step 3:")
print_current_processing_log(current_log_index)
UserInput.post_status("°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°")
question = {"question_title": "Merge different level tasks",
"question_text": "Do you want to merge (another) two tasks?",
"default_answer": False,
"optiontype": "yes_no"}
user_wants_new_merge = self._get_input(custom, question, template)
if user_wants_new_merge:
print_task_list_with_indeces()
question = {"question_title": "Different-level-merge selection",
"question_text": "Please enter the two indeces, (you will get two input prompts) for the "
"two which are to be merged. The result is that the second one is "
"<<integrated>> into the <<first>> one.",
"default_answer": "0",
"optiontype": "2_indeces"}
index_list = self._get_input(custom, question, template)
identifier1 = get_task_id_from_task_list_index(index_list[0])
identifier2 = get_task_id_from_task_list_index(index_list[1])
self.merge_diff_level_datapoints(identifier1, identifier2)
processing_log.append(time.strftime("%c") + ": Merged " + str(identifier2) + " into -> " +
str(identifier1))
else:
step3_is_finished = True
# -------------------------------------------------------------------------------------------------------------
# Step 4: Integrate *all* multi-level connections (temperature+frequencies)
UserInput.post_status("####################################")
UserInput.post_status("-------------Step 4: Multi-level integration-------------")
processing_log.append(time.strftime("%c") + ": Entering step 4")
print_following_steps(3)
current_log_index = len(processing_log) - 1
UserInput.confirm_warning(
"Now please integrate the tasks. This usually means integrating already merged sub_tasks."
" (Same with merging, integrating means that tghe result will be in the first entered"
" one). In a classical dielectric measurement, you usually have 30 frequencies at every "
"temperature. This step integrate the 30 frequencies data into the temperature "
"datapoints. This is the reason that the first task must contain equal or less "
"datapoints than the second task.",custom)
step4_is_finished = False
while not step4_is_finished:
UserInput.post_status("°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°")
UserInput.post_status("Processing log for all steps:")
print_current_processing_log(0)
UserInput.post_status("°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°")
question = {"question_title": "Integrate datarows",
"question_text": "Do you want to integrate (another) two tasks?",
"default_answer": False,
"optiontype": "yes_no"}
user_wants_new_merge = self._get_input(custom, question, template)
if user_wants_new_merge:
print_task_list_with_indeces()
question = {"question_title": "Same-level-merge selection",
"question_text": "Please enter the two indeces, (you will get two input prompts) for the "
"two which are to be merged. The result is that the second one is "
"<<integrated>> into the <<first>> one.",
"default_answer": "0",
"optiontype": "2_indeces"}
index_list = self._get_input(custom, question, template)
identifier1 = get_task_id_from_task_list_index(index_list[0])
identifier2 = get_task_id_from_task_list_index(index_list[1])
self.insert_sub_datapoints_into_parent_datapoint(identifier1, identifier2)
processing_log.append(time.strftime("%c") + ": Integrated " + str(identifier2) + " into -> "
+ str(identifier1))
else:
step4_is_finished = True
# -------------------------------------------------------------------------------------------------------------
# Step 5: Specify first output list
UserInput.post_status("-------------Step 5: Tasks for first output file-------------")
processing_log.append(time.strftime("%c") + ": Entering step 5")
print_following_steps(4)
current_log_index = len(processing_log) - 1
datapoint_list_1 = []
UserInput.confirm_warning(
"Now, specify the first round of now massaged tasks that you want output from. This is"
"the first ouput round. As you can see, you can transpose the data as needed in step 6. "
"In step 8, you'll be able to configure how exactly they should be printed out into "
"the file.",custom)
print_task_list_with_indeces()
question = {"question_title": "First output selection",
"question_text": "Please enter one or more indeces separated only by a comma to be "
"selected for file-output",
"default_answer": "0,4,8,12",
"optiontype": "multi_indeces"}
indeces = self._get_input(custom, question, template)
processing_log.append(time.strftime("%c") + ": Selected tasks at indeces :" + str(indeces) + " for output 1")
for index in indeces:
task_identifier = get_task_id_from_task_list_index(index)
linked_datapoints = self._get_datapoint_list_at_identifier(task_identifier)
copied_datapoints = copy.deepcopy(linked_datapoints)
datapoint_list_1.append(copied_datapoints)
# -------------------------------------------------------------------------------------------------------------
# Step 6: If needed, transpose datapoints (temp-> freq) (automatically selected for output in second list)
UserInput.post_status("-------------Step 6: Optionally transpose datapoints-------------")
processing_log.append(time.strftime("%c") + ": Entering step 6")
user_wants_transposed = False
print_following_steps(5)
current_log_index = len(processing_log) - 1
datapoint_list_2 = []
UserInput.confirm_warning("This step is to make it easy to also get your data into files in the other "
"dependency. You essentially transpose the result matrix. In other terms, you are "
"able to output per frequency and not just per temperature.",custom)
question = {"question_title": "Do you want transposed data?",
"question_text": "In a sweep, you won't, in a regular measurementa, you almost surely will.",
"default_answer": True,
"optiontype": "yes_no"}
user_wants_transposed = self._get_input(custom, question, template)
if user_wants_transposed:
UserInput.post_status("Here comes a nice time saver! Beware!")
question = {"question_title": "Use tasks selected in step 5?",
"question_text": "Usually/Always, you want just the tasks you merged, integrated and selected for "
"file output to also be transposed so you get the temperature dependency as well. "
"If you select yes, the previously selected tasks will be the ones for output "
"here.",
"default_answer": True,
"optiontype": "yes_no"}
user_wants_tasks_from_step5 = self._get_input(custom, question, template)
if user_wants_tasks_from_step5:
processing_log.append(time.strftime("%c") + ": Selected same tasks as in step 5 for file output 2.")
elif not user_wants_tasks_from_step5:
question = {"question_title": "Second output selection",
"question_text": "Please enter one or more indeces separated only by a comma to be "
"selected for file-output. Everything you select is automatically "
"selected for 2nd file output.",
"default_answer": "0,4,8,12",
"optiontype": "multi_indeces"}
indeces = self._get_input(custom, question, template)
processing_log.append(time.strftime("%c") + ": Selected tasks at indeces :" + str(indeces) +
" for output 2")
# Add the datapoints to the output list 2
for index in indeces:
task_identifier = get_task_id_from_task_list_index(index)
datapoint_list_2.append(self.get_transposed_parent_child_datapoints(task_identifier))
# -------------------------------------------------------------------------------------------------------------
# Step 7: Define file naming, header and columns for first list
UserInput.post_status("####################################")
UserInput.post_status("-------------1st OutputFile: Define file naming, header-------------")
processing_log.append(time.strftime("%c") + ": Entering step 7")
print_following_steps(6)
current_log_index = len(processing_log) - 1
directory_name = ""
UserInput.confirm_warning("Now to the easier of the two lists. The first one is usually the one where you"
"create one file per temperature.",custom)
question = {"question_title": "Directory name",
"question_text": "Usually you want the directory named \"Temperatures\", do you want to use the "
"default?",
"default_answer": True,
"optiontype": "yes_no"}
user_is_fine_with_temperatures = self._get_input(custom, question, template)
if user_is_fine_with_temperatures:
directory_name = os.path.join(self.pickle_path, "Temperatures{0}".format(os.sep))
elif not user_is_fine_with_temperatures:
question = {"question_title": "name for directory",
"question_text": "Please choose a working directory for output of list 1",
"default_answer": "Temperatures",
"optiontype": "free_text"}
directory_name = UserInput.ask_user_for_input(question)["answer"]
directory_name = os.path.join(self.pickle_path, directory_name + os.sep)
file_handler_1 = FileHandler(directory_name)
file_number = 1
file_number_str = "%05d" % (file_number,) # we want leading 0s in the file name so file explorers sort them
# correctly
# Now iterate over all main tasks in the list:
for number, main_task in enumerate(datapoint_list_1):
# We need to ask the user what he wants as the base name for the files. For this, we show the user the top level
# descriptors that are available, eg "Sample Sensor"
main_task_keys_without_subtasks = []
UserInput.post_status("-----------------------")
UserInput.post_status(
"At task {0}, what do you want as the attribute used inside the file name?".format(number))
key_for_file_name = None
for key in main_task[0].keys():
if key != "sub_task_datapoints":
main_task_keys_without_subtasks.append(key)
main_task_keys_without_subtasks.sort()
# Now ask the user which of the keys's value he wants to see in the file name
for index, key in enumerate(main_task_keys_without_subtasks):
UserInput.post_status(str(index) + ": " + key)
question = {"question_title": "What attributes' values should be used to put in the file name?",
"question_text": "Please only enter the 1 corresponding number",
"default_answer": "0",
"optiontype": "multi_indeces"}
index_chosen = self._get_input(custom, question, template)
key_for_file_name = main_task_keys_without_subtasks[index_chosen[0]]
keys_for_sub_task_datapoints = []
# I want all keys that are in sub_task datapoints in a list so I can more easily work with them
for key in main_task[0]["sub_task_datapoints"][0].keys():
keys_for_sub_task_datapoints.append(key)
keys_for_sub_task_datapoints.sort(key=str.lower)
# File Header
first_line = ""
for key in keys_for_sub_task_datapoints:
first_line = first_line + str(key) + "\t"
second_line = "Name: " + self.name
third_line = "Operator: " + self.experimenter + "\t"
fourth_line = "Created at: " + self.creation_time
fifth_line = "Comment: " + str(self.comment)
sixth_line = "Geometry: " + str(self.geometry)
seventh_line = "-------------------------------------------------------\n"
nineth_line = seventh_line
header_lines = [first_line, second_line, third_line, fourth_line, fifth_line, sixth_line, seventh_line]
# Now we access each datarow we have
for main_task_data_point in main_task:
processing_log.append(time.strftime("%c") + ": Processing task {0}".format(str(number)))
# now we have a dictionary at hand of our datapoints and each datapoint of the main_task gets its
# own file
file_name = "{0}_Task{1}_{2}_{3}".format(self.name, str(number), file_number_str,
str(main_task_data_point[key_for_file_name]))
# We need to count up the file number and ready the str of it
file_number += 1
file_number_str = "%05d" % (
file_number,) # we want leading 0s in the name so file explorers sort them correctly
outputfile = file_handler_1.create_file(file_name)
# Write the header
for line in header_lines:
outputfile.write_string(line)
# Write main task datapoint data
main_task_data_str = ""
for key in main_task_keys_without_subtasks:
main_task_data_str += "\t{0} {1}".format(str(key), str(main_task_data_point[key]))
outputfile.write_string(main_task_data_str)
outputfile.write_string(nineth_line)
# now gather every sub_task_datapoint (one line in the output file)
for sub_task_datapoint in main_task_data_point["sub_task_datapoints"]:
line_of_data = ""
# then iterate over every key so we can generate the one line
for key in keys_for_sub_task_datapoints:
line_of_data += "{0}\t".format(sub_task_datapoint[key])
outputfile.write_string(line_of_data)
UserInput.post_status("Export is in progress. You should shortly see the files appearing.")
# -------------------------------------------------------------------------------------------------------------
# Step 8: Define file naming, header and columns for second list
if user_wants_transposed:
UserInput.post_status("####################################")
UserInput.post_status("-------------2nd OutputFile: Define file naming, header and columns-------------")
processing_log.append(time.strftime("%c") + ": Entering step 8")
print_following_steps(7)
current_log_index = len(processing_log) - 1
directory_name = ""
UserInput.confirm_warning("Now to the hard part. The second list. This is the list containing your transposed "
"entries, so in Dielectrics parlance, the frequency files.",custom)
question = {"question_title": "Directory name",
"question_text": "Usually you want the directory named \"Frequencies\", do you want to use the "
"default?",
"default_answer": True,
"optiontype": "yes_no"}
user_is_fine_with_temperatures = self._get_input(custom, question, template)
if user_is_fine_with_temperatures:
directory_name = os.path.join(self.pickle_path, "Frequencies{0}".format(os.sep))
elif not user_is_fine_with_temperatures:
question = {"question_title": "name for directory",
"question_text": "Please choose a working directory for output of the list containing your "
"transposed task data",
"default_answer": "Frequencies",
"optiontype": "free_text"}
directory_name = self._get_input(custom, question, template)
directory_name = os.path.join(self.pickle_path, directory_name + os.sep)
file_handler_2 = FileHandler(directory_name)
# File Number for list 2 is 0 at the beginning of course
file_number = 1
file_number_str = "%05d" % (file_number,) # we want leading 0s in the file name so file explorers sort them
# correctly
# Now iterate over all main tasks in the list:
for number, main_task in enumerate(datapoint_list_2):
# We need to ask the user what he wants as the base name for the files. For this, we show the user the top level
# descriptors that are available, eg "Sample Sensor"
main_task_keys_without_subtasks = []
UserInput.post_status("-----------------------")
UserInput.post_status(
"At task {0}, what do you want as the attribute used inside the file name?".format(number))
key_for_file_name = None
for key in main_task[0].keys():
if key != "sub_task_datapoints":
main_task_keys_without_subtasks.append(key)
main_task_keys_without_subtasks.sort()
# Now ask the user which of the keys's value he wants to see in the file name
for index, key in enumerate(main_task_keys_without_subtasks):
UserInput.post_status(str(index) + ": " + key)
question = {"question_title": "File name",
"question_text": "What attributes' values should be used to put in the file name? Please "
"only enter the 1 corresponding number",
"default_answer": "0",
"optiontype": "multi_indeces"}
index_chosen = self._get_input(custom, question, template)
key_for_file_name = main_task_keys_without_subtasks[index_chosen[0]]
keys_for_sub_task_datapoints = []
# I want all keys that are in sub_task datapoints in a list so I can more easily work with them
for key in main_task[0]["sub_task_datapoints"][0].keys():
# In the transposed case, we will have another set of sub_tasks that we don't want
if key != "sub_task_datapoints":
keys_for_sub_task_datapoints.append(key)
keys_for_sub_task_datapoints.sort(key=str.lower)
# File Header
first_line = ""
for key in keys_for_sub_task_datapoints:
first_line = first_line + str(key) + "\t"
second_line = "Name: " + self.name
third_line = self.experimenter + "\t"
fourth_line = "Created at: " + self.creation_time
fifth_line = "Comment: " + str(self.comment)
sixth_line = "Geometry: " + str(self.geometry)
seventh_line = "-------------------------------------------------------\n"
nineth_line = seventh_line
header_lines = [first_line, second_line, third_line, fourth_line, fifth_line, sixth_line, seventh_line]
# We want to ask the user what is the controlled bit of the transposed task data. In Dielectrics, this
# usually is the applied frequency or frequency.
for index, key in enumerate(main_task_keys_without_subtasks):
UserInput.post_status(str(index) + ": " + key)
question = {"question_title": "Attributes for file header",
"question_text": "Enter the numbers corresponding to the constants in the task. We are "
"working with transposed data, so this usually means that you want to "
"select frequency and maybe applied_frequency as keys. Those 2 should "
"suffice for the header. The numbers should only be separated by a comma.",
"default_answer": "0,3",
"optiontype": "multi_indeces"}
indeces_chosen = self._get_input(custom, question, template)
keys_for_file_header2 = []
for index in indeces_chosen:
keys_for_file_header2.append(main_task_keys_without_subtasks[index])
keys_for_file_header2.sort()
# Now we access each datarow we have
for main_task_data_point in main_task:
processing_log.append(time.strftime("%c") + ": Processing task {0}".format(str(number)))
# now we have a dictionary at hand of our datapoints and each datapoint of the main_task gets its
# own file
file_name = "{0}_Task{1}_{2}_{3}".format(self.name, str(number), file_number_str,
str(main_task_data_point[key_for_file_name]))
# We need to count up the file number and ready the str of it
file_number += 1
file_number_str = "%05d" % (file_number,) # we want leading 0s in the name so file explorers sort
# them correctly
outputfile = file_handler_2.create_file(file_name)
# Write the header
for line in header_lines:
outputfile.write_string(line)
# Write main task datapoint data (in this case modified for only needed keys
main_task_data_str = ""
for key in keys_for_file_header2:
main_task_data_str += "\t{0} {1}".format(str(key), str(main_task_data_point[key]))
outputfile.write_string(main_task_data_str)
outputfile.write_string(nineth_line)
# now gather every sub_task_datapoint (one line in the output file)
for sub_task_datapoint in main_task_data_point["sub_task_datapoints"]:
line_of_data = ""
# then iterate over every key so we can generate the one line
for key in keys_for_sub_task_datapoints:
line_of_data += "{0}\t".format(sub_task_datapoint[key])
outputfile.write_string(line_of_data)
UserInput.post_status("Export is in progress. You should shortly see the files appearing.")
# -------------------------------------------------------------------------------------------------------------
# Step 9: Start File Output - this involves the output of the processing log as well as the task list and
# closing all files
UserInput.post_status("####################################")
UserInput.post_status("-------------Making files-------------")
processing_log.append(time.strftime("%c") + ": Entering step 9")
print_following_steps(8)
current_log_index = len(processing_log) - 1
# We need to print out the processing log, the modified database itself and the task list.
#TODO
UserInput.post_status("Now Pickling the modified database. This could take some time!")
processing_log.append(time.strftime("%c") + ": Starting pickling of processed database.")
self.pickle_database("_processed")
processing_log.append(time.strftime("%c") + ": Finished pickling.")
processing_log.append(time.strftime("%c") + ": Starting file output for task list.")
filehandler_for_task_list = FileHandler(self.pickle_path)
task_list_file = filehandler_for_task_list.create_file("{0}_tasks".format(self.name))
UserInput.post_status("Now reticulating splines. This could take some time.")
for task in self.tasks:
# Write all tasks into the buffer of the file
task_list_file.write_string(str(task))
# Now stop the file Handler
filehandler_for_task_list.start()
processing_log.append(time.strftime("%c") + ": Starting file output for first list.")
# when we call start, we make a new Thread for the file handler which itself handles one file after the other
file_handler_1.start()
if user_wants_transposed:
processing_log.append(time.strftime("%c") + ": Starting file output for second list.")
file_handler_2.start()
UserInput.post_status("Waiting on output1 to finish")
file_handler_1.join()
UserInput.post_status("Forgot some splines. Remedying that!")
if user_wants_transposed:
UserInput.post_status("Waiting on output2 to finish")
# We can only wait on it if the user requested it and wants it
file_handler_2.join()
UserInput.post_status("Waiting on task_list_output to finish")
filehandler_for_task_list.join()
UserInput.post_status("Now saving processing log.")
UserInput.post_status("I sincerely hope your time with JUMP was good!")
filehandler_for_log = FileHandler(self.pickle_path)
processing_log_file = filehandler_for_log.create_file("{0}_processing_log".format(self.name))
for entry in processing_log:
processing_log_file.write_string(str(entry))
filehandler_for_log.start()
filehandler_for_log.join()
UserInput.post_status("Closed the log file. Bye bye!")
def _get_input(self,custom,question,template=[]):
"""
Parameters
----------
custom : Boolean
Decide wether to customize your measurement or use a template instead.
question :
Enter the posed question.
template :
Enter a template, if custom=False.
Returns chosen task.
-------
Private function in order to implement template data export. A list with all input parameters
serves as template.
"""
template=template;
if custom:
answer = UserInput.ask_user_for_input(question)["answer"]
self.task_input.append(answer)
return answer
else:
return template.pop(0)
@property
def tasks(self):
return self.tasks_of_a_run
@tasks.setter
def tasks(self, task_list):
self.tasks_of_a_run = task_list
def add_point(self, identifier: [], Datapoint):
# In this method should also probably go the logic to use the FileHandler to manage the file output for raw data
# A datapoint looks like: {"R": 3, "X": 4}
recursive_db = self.db
for sub_part in identifier:
recursive_db = recursive_db[sub_part]
recursive_db["Datapoints"].append(Datapoint)
def make_storage(self, identifier: [], data_source, human_readable_taskname: str): # identifier:[0,1,3,2]
data_level_dict = {"type": data_source, "Datapoints": [], "human_readable_task": human_readable_taskname}
recursive_db_level = self.db
level_depth = len(identifier) - 1
# We start of at the top lebel db and then go deeper and deeper
for index, temp_id in enumerate(identifier):
# temp_id is a piece of the identifier.
if temp_id not in recursive_db_level:
if index == level_depth:
recursive_db_level[temp_id] = data_level_dict
else:
recursive_db_level[temp_id] = {}
recursive_db_level = recursive_db_level[temp_id]
else:
recursive_db_level = recursive_db_level[temp_id]
def calculate_all_values(self, geometry=None):
"""Traverses the database and calculates all calculatable values as implemented in DataManipulator
:type geometry: dict
:param geometry: dictionary containing thickness and area in mm as a float value respectively
"""
for key in list(self.db.keys()):
self._recursive_value_calc(self.db[key], geometry)
def _recursive_value_calc(self, db_slice: dict, geometry=None):
# first calculate all datapoints in the current depth
for index, datapoint in enumerate(db_slice["Datapoints"]):
db_slice["Datapoints"][index] = DataManipulator.calculate_non_geometry_dependent_values(datapoint)
if geometry:
db_slice["Datapoints"][index] = DataManipulator.calculate_geometry_dependent_values(
db_slice["Datapoints"][index], geometry)
# then go deeper in all the other deeper lying subtasks
for key in list(db_slice.keys()):
if type(key) is int:
self._recursive_value_calc(db_slice[key], geometry)
def _get_datapoint_list_at_identifier(self, identifier):
"""returns the datapoint list with the specified identifier. If you manipulate this directly, you will directly
manipulate the main_db as well
:param identifier: a list in the usual identifier type
:return: datapoints list at the specified identifier. Raises an IndexError if index isn't present
"""
db_slice = self.db
did_find_identifier = False
# we need to match every component of the identifier
for identifier_slice in identifier:
did_find_identifier = False
# then go over every key of the current db slice to see whether it matches
for key in list(db_slice.keys()):
# now check whether it matches
if key == identifier_slice:
# if it matches, we go one level deeper with the respective key
db_slice = db_slice[key]
# and we did find the identifier_slice, so we can set that to true
did_find_identifier = True
break
# if we finish thje for loop without having found the identifier, then it's not present in the database,
# therefore we raise an error
if not did_find_identifier:
raise IndexError("Didn't find the identifier in the database")
return db_slice["Datapoints"]
def _merge_two_datapoint_lists(self, datapointlist1, datapointlist2):
"""Takes two lists that are directly in the database and updates the first list with the content of the
second one in place
:param datapointlist1:
:param datapointlist2:
:return:
"""
# we update the first item of datapoints1 with the first item of datapoints2. This 1 by 1 of both lists is
# achieved by "zip", which makes sure that we're always at the same index in both lists
for point1, point2 in zip(datapointlist1, datapointlist2): # type: dict
# in case data with the exact same key is already present, it gets overwritten as expected and documented