Skip to content

Commit b046559

Browse files
committed
[DATALAD RUNCMD] Do interactive fixing of some ambigous typos
=== Do not change lines below === { "chain": [], "cmd": "codespell -w -i 3 -C 2", "exit": 0, "extra_inputs": [], "inputs": [], "outputs": [], "pwd": "." } ^^^ Do not change lines above ^^^
1 parent 4790182 commit b046559

File tree

5 files changed

+9
-9
lines changed

5 files changed

+9
-9
lines changed

nnunetv2/evaluation/evaluate_predictions.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ def evaluate_folder_entry_point():
227227
help='Output file. Optional. Default: pred_folder/summary.json')
228228
parser.add_argument('-np', type=int, required=False, default=default_num_processes,
229229
help=f'number of processes used. Optional. Default: {default_num_processes}')
230-
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred doesnt have all files that are present in folder_gt')
230+
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred does not have all files that are present in folder_gt')
231231
args = parser.parse_args()
232232
compute_metrics_on_folder2(args.gt_folder, args.pred_folder, args.djfile, args.pfile, args.o, args.np, chill=args.chill)
233233

@@ -245,7 +245,7 @@ def evaluate_simple_entry_point():
245245
help='Output file. Optional. Default: pred_folder/summary.json')
246246
parser.add_argument('-np', type=int, required=False, default=default_num_processes,
247247
help=f'number of processes used. Optional. Default: {default_num_processes}')
248-
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred doesnt have all files that are present in folder_gt')
248+
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred does not have all files that are present in folder_gt')
249249

250250
args = parser.parse_args()
251251
compute_metrics_on_folder_simple(args.gt_folder, args.pred_folder, args.l, args.o, args.np, args.il, chill=args.chill)

nnunetv2/inference/export_prediction.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits
3131
properties_dict['shape_after_cropping_and_before_resampling'],
3232
current_spacing,
3333
properties_dict['spacing'])
34-
# return value of resampling_fn_probabilities can be ndarray or Tensor but that doesnt matter because
34+
# return value of resampling_fn_probabilities can be ndarray or Tensor but that does not matter because
3535
# apply_inference_nonlin will convert to torch
3636
predicted_probabilities = label_manager.apply_inference_nonlin(predicted_logits)
3737
del predicted_logits

nnunetv2/postprocessing/remove_connected_components.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def determine_postprocessing(folder_predictions: str,
7171
if plans_file_or_dict is None:
7272
expected_plans_file = join(folder_predictions, 'plans.json')
7373
if not isfile(expected_plans_file):
74-
raise RuntimeError(f"Expected plans file missing: {expected_plans_file}. The plans fils should have been "
74+
raise RuntimeError(f"Expected plans file missing: {expected_plans_file}. The plans files should have been "
7575
f"created while running nnUNetv2_predict. Sadge.")
7676
plans_file_or_dict = load_json(expected_plans_file)
7777
plans_manager = PlansManager(plans_file_or_dict)
@@ -80,7 +80,7 @@ def determine_postprocessing(folder_predictions: str,
8080
expected_dataset_json_file = join(folder_predictions, 'dataset.json')
8181
if not isfile(expected_dataset_json_file):
8282
raise RuntimeError(
83-
f"Expected plans file missing: {expected_dataset_json_file}. The plans fils should have been "
83+
f"Expected plans file missing: {expected_dataset_json_file}. The plans files should have been "
8484
f"created while running nnUNetv2_predict. Sadge.")
8585
dataset_json_file_or_dict = load_json(expected_dataset_json_file)
8686

nnunetv2/training/dataloading/nnunet_dataset.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def load_case(self, key):
123123

124124
# this should have the properties
125125
ds = nnUNetDataset(folder, num_images_properties_loading_threshold=1000)
126-
# now rename the properties file so that it doesnt exist anymore
126+
# now rename the properties file so that it does not exist anymore
127127
shutil.move(join(folder, 'liver_0.pkl'), join(folder, 'liver_XXX.pkl'))
128128
# now we should still be able to access the properties because they have already been loaded
129129
ks = ds['liver_0'].keys()
@@ -133,7 +133,7 @@ def load_case(self, key):
133133

134134
# this should not have the properties
135135
ds = nnUNetDataset(folder, num_images_properties_loading_threshold=0)
136-
# now rename the properties file so that it doesnt exist anymore
136+
# now rename the properties file so that it does not exist anymore
137137
shutil.move(join(folder, 'liver_0.pkl'), join(folder, 'liver_XXX.pkl'))
138138
# now this should crash
139139
try:

nnunetv2/utilities/dataset_name_id_conversion.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -70,5 +70,5 @@ def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -> str:
7070
except ValueError:
7171
raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to "
7272
"convert it to a dataset ID (int). That failed, however. Please give an integer number "
73-
"('1', '2', etc) or a correct tast name. Your input: %s" % dataset_name_or_id)
74-
return convert_id_to_dataset_name(dataset_name_or_id)
73+
"('1', '2', etc) or a correct dataset name. Your input: %s" % dataset_name_or_id)
74+
return convert_id_to_dataset_name(dataset_name_or_id)

0 commit comments

Comments
 (0)