Skip to content

Commit de48541

Browse files
committed
Merge remote-tracking branch 'yarikoptic/enh-codespell'
2 parents ef4e3da + b046559 commit de48541

24 files changed

+66
-39
lines changed

.github/workflows/codespell.yml

+22
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
---
2+
name: Codespell
3+
4+
on:
5+
push:
6+
branches: [master]
7+
pull_request:
8+
branches: [master]
9+
10+
permissions:
11+
contents: read
12+
13+
jobs:
14+
codespell:
15+
name: Check for spelling errors
16+
runs-on: ubuntu-latest
17+
18+
steps:
19+
- name: Checkout
20+
uses: actions/checkout@v3
21+
- name: Codespell
22+
uses: codespell-project/actions-codespell@v2

documentation/dataset_format.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ images). So these images could for example be a T1 and a T2 MRI (or whatever els
2121
channels MUST have the same geometry (same shape, spacing (if applicable) etc.) and
2222
must be co-registered (if applicable). Input channels are identified by nnU-Net by their FILE_ENDING: a four-digit integer at the end
2323
of the filename. Image files must therefore follow the following naming convention: {CASE_IDENTIFIER}_{XXXX}.{FILE_ENDING}.
24-
Hereby, XXXX is the 4-digit modality/channel identifier (should be unique for each modality/chanel, e.g., “0000” for T1, “0001” for
24+
Hereby, XXXX is the 4-digit modality/channel identifier (should be unique for each modality/channel, e.g., “0000” for T1, “0001” for
2525
T2 MRI, …) and FILE_ENDING is the file extension used by your image format (.png, .nii.gz, ...). See below for concrete examples.
2626
The dataset.json file connects channel names with the channel identifiers in the 'channel_names' key (see below for details).
2727

documentation/how_to_use_nnunet.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ wait
189189
**Important: The first time a training is run nnU-Net will extract the preprocessed data into uncompressed numpy
190190
arrays for speed reasons! This operation must be completed before starting more than one training of the same
191191
configuration! Wait with starting subsequent folds until the first training is using the GPU! Depending on the
192-
dataset size and your System this should oly take a couple of minutes at most.**
192+
dataset size and your System this should only take a couple of minutes at most.**
193193

194194
If you insist on running DDP multi-GPU training, we got you covered:
195195

documentation/set_environment_variables.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
nnU-Net requires some environment variables so that it always knows where the raw data, preprocessed data and trained
44
models are. Depending on the operating system, these environment variables need to be set in different ways.
55

6-
Variables can either be set permanently (recommended!) or you can decide to set them everytime you call nnU-Net.
6+
Variables can either be set permanently (recommended!) or you can decide to set them every time you call nnU-Net.
77

88
# Linux & MacOS
99

nnunetv2/dataset_conversion/generate_dataset_json.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def generate_dataset_json(output_folder: str,
7676
labels[l] = int(labels[l])
7777

7878
dataset_json = {
79-
'channel_names': channel_names, # previously this was called 'modality'. I didnt like this so this is
79+
'channel_names': channel_names, # previously this was called 'modality'. I didn't like this so this is
8080
# channel_names now. Live with it.
8181
'labels': labels,
8282
'numTraining': num_training_cases,

nnunetv2/ensembling/ensemble.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def ensemble_crossvalidations(list_of_trained_model_folders: List[str],
144144
for f in folds:
145145
if not isdir(join(tr, f'fold_{f}', 'validation')):
146146
raise RuntimeError(f'Expected model output directory does not exist. You must train all requested '
147-
f'folds of the speficied model.\nModel: {tr}\nFold: {f}')
147+
f'folds of the specified model.\nModel: {tr}\nFold: {f}')
148148
files_here = subfiles(join(tr, f'fold_{f}', 'validation'), suffix='.npz', join=False)
149149
if len(files_here) == 0:
150150
raise RuntimeError(f"No .npz files found in folder {join(tr, f'fold_{f}', 'validation')}. Rerun your "

nnunetv2/evaluation/evaluate_predictions.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@ def key_to_label_or_region(key: str):
2727
except ValueError:
2828
key = key.replace('(', '')
2929
key = key.replace(')', '')
30-
splitted = key.split(',')
31-
return tuple([int(i) for i in splitted if len(i) > 0])
30+
split = key.split(',')
31+
return tuple([int(i) for i in split if len(i) > 0])
3232

3333

3434
def save_summary_json(results: dict, output_file: str):
@@ -227,7 +227,7 @@ def evaluate_folder_entry_point():
227227
help='Output file. Optional. Default: pred_folder/summary.json')
228228
parser.add_argument('-np', type=int, required=False, default=default_num_processes,
229229
help=f'number of processes used. Optional. Default: {default_num_processes}')
230-
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred doesnt have all files that are present in folder_gt')
230+
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred does not have all files that are present in folder_gt')
231231
args = parser.parse_args()
232232
compute_metrics_on_folder2(args.gt_folder, args.pred_folder, args.djfile, args.pfile, args.o, args.np, chill=args.chill)
233233

@@ -245,7 +245,7 @@ def evaluate_simple_entry_point():
245245
help='Output file. Optional. Default: pred_folder/summary.json')
246246
parser.add_argument('-np', type=int, required=False, default=default_num_processes,
247247
help=f'number of processes used. Optional. Default: {default_num_processes}')
248-
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred doesnt have all files that are present in folder_gt')
248+
parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred does not have all files that are present in folder_gt')
249249

250250
args = parser.parse_args()
251251
compute_metrics_on_folder_simple(args.gt_folder, args.pred_folder, args.l, args.o, args.np, args.il, chill=args.chill)

nnunetv2/evaluation/find_best_configuration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ def find_best_configuration_entry_point():
285285
help='Set this flag to disable ensembling')
286286
parser.add_argument('--no_overwrite', action='store_true',
287287
help='If set we will not overwrite already ensembled files etc. May speed up concecutive '
288-
'runs of this command (why would oyu want to do that?) at the risk of not updating '
288+
'runs of this command (why would you want to do that?) at the risk of not updating '
289289
'outdated results.')
290290
args = parser.parse_args()
291291

nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -520,8 +520,8 @@ def save_plans(self, plans):
520520

521521
def generate_data_identifier(self, configuration_name: str) -> str:
522522
"""
523-
configurations are unique within each plans file but differnet plans file can have configurations with the
524-
same name. In order to distinguish the assiciated data we need a data identifier that reflects not just the
523+
configurations are unique within each plans file but different plans file can have configurations with the
524+
same name. In order to distinguish the associated data we need a data identifier that reflects not just the
525525
config but also the plans it originates from
526526
"""
527527
return self.plans_identifier + '_' + configuration_name

nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def extract_fingerprint_entry():
2121
help='[OPTIONAL] Set this flag to overwrite existing fingerprints. If this flag is not set and a '
2222
'fingerprint already exists, the fingerprint extractor will not run.')
2323
parser.add_argument('--verbose', required=False, action='store_true',
24-
help='Set this to print a lot of stuff. Useful for debugging. Will disable progrewss bar! '
24+
help='Set this to print a lot of stuff. Useful for debugging. Will disable progress bar! '
2525
'Recommended for cluster environments')
2626
args, unrecognized_args = parser.parse_known_args()
2727
extract_fingerprints(args.d, args.fpe, args.np, args.verify_dataset_integrity, args.clean, args.verbose)
@@ -91,7 +91,7 @@ def preprocess_entry():
9191
"DECREASE -np IF YOUR RAM FILLS UP TOO MUCH!. Default: 8 processes for 2d, 4 "
9292
"for 3d_fullres, 8 for 3d_lowres and 4 for everything else")
9393
parser.add_argument('--verbose', required=False, action='store_true',
94-
help='Set this to print a lot of stuff. Useful for debugging. Will disable progrewss bar! '
94+
help='Set this to print a lot of stuff. Useful for debugging. Will disable progress bar! '
9595
'Recommended for cluster environments')
9696
args, unrecognized_args = parser.parse_known_args()
9797
if args.np is None:
@@ -173,7 +173,7 @@ def plan_and_preprocess_entry():
173173
"DECREASE -np IF YOUR RAM FILLS UP TOO MUCH!. Default: 8 processes for 2d, 4 "
174174
"for 3d_fullres, 8 for 3d_lowres and 4 for everything else")
175175
parser.add_argument('--verbose', required=False, action='store_true',
176-
help='Set this to print a lot of stuff. Useful for debugging. Will disable progrewss bar! '
176+
help='Set this to print a lot of stuff. Useful for debugging. Will disable progress bar! '
177177
'Recommended for cluster environments')
178178
args = parser.parse_args()
179179

nnunetv2/experiment_planning/verify_dataset_integrity.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def verify_dataset_integrity(folder: str, num_processes: int = 8) -> None:
172172
missing_labels.append(dataset[k]['label'])
173173
ok = False
174174
if not ok:
175-
raise FileNotFoundError(f"Some expeted files were missing. Make sure you are properly referencing them "
175+
raise FileNotFoundError(f"Some expected files were missing. Make sure you are properly referencing them "
176176
f"in the dataset.json. Or use imagesTr & labelsTr folders!\nMissing images:"
177177
f"\n{missing_images}\n\nMissing labels:\n{missing_labels}")
178178
else:

nnunetv2/imageio/base_reader_writer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[
6161
:return:
6262
1) a np.ndarray of shape (c, x, y, z) where c is the number of image channels (can be 1) and x, y, z are
6363
the spatial dimensions (set x=1 for 2D! Example: (3, 1, 224, 224) for RGB image).
64-
2) a dictionary with metadata. This can be anything. BUT it HAS to inclue a {'spacing': (a, b, c)} where a
64+
2) a dictionary with metadata. This can be anything. BUT it HAS to include a {'spacing': (a, b, c)} where a
6565
is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set
6666
a=999 (largest spacing value! Make it larger than b and c)
6767
@@ -79,7 +79,7 @@ def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
7979
:return:
8080
1) a np.ndarray of shape (1, x, y, z) where x, y, z are
8181
the spatial dimensions (set x=1 for 2D! Example: (1, 1, 224, 224) for 2D segmentation).
82-
2) a dictionary with metadata. This can be anything. BUT it HAS to inclue a {'spacing': (a, b, c)} where a
82+
2) a dictionary with metadata. This can be anything. BUT it HAS to include a {'spacing': (a, b, c)} where a
8383
is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set
8484
a=999 (largest spacing value! Make it larger than b and c)
8585
"""

nnunetv2/inference/export_prediction.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ def convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits
3131
properties_dict['shape_after_cropping_and_before_resampling'],
3232
current_spacing,
3333
properties_dict['spacing'])
34-
# return value of resampling_fn_probabilities can be ndarray or Tensor but that doesnt matter because
35-
# apply_inference_nonlin will covnert to torch
34+
# return value of resampling_fn_probabilities can be ndarray or Tensor but that does not matter because
35+
# apply_inference_nonlin will convert to torch
3636
predicted_probabilities = label_manager.apply_inference_nonlin(predicted_logits)
3737
del predicted_logits
3838
segmentation = label_manager.convert_probabilities_to_segmentation(predicted_probabilities)

nnunetv2/inference/predict_from_raw_data.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -805,7 +805,7 @@ def predict_entry_point():
805805
if not isdir(args.o):
806806
maybe_mkdir_p(args.o)
807807

808-
# slightly passive agressive haha
808+
# slightly passive aggressive haha
809809
assert args.part_id < args.num_parts, 'Do you even read the documentation? See nnUNetv2_predict -h.'
810810

811811
assert args.device in ['cpu', 'cuda',

nnunetv2/inference/readme.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ need for the _0000 suffix anymore! This can be useful in situations where you ha
8282
Remember that the files must be given as 'list of lists' where each entry in the outer list is a case to be predicted
8383
and the inner list contains all the files belonging to that case. There is just one file for datasets with just one
8484
input modality (such as CT) but may be more files for others (such as MRI where there is sometimes T1, T2, Flair etc).
85-
IMPORTANT: the order in wich the files for each case are given must match the order of the channels as defined in the
85+
IMPORTANT: the order in which the files for each case are given must match the order of the channels as defined in the
8686
dataset.json!
8787

8888
If you give files as input, you need to give individual output files as output!

nnunetv2/postprocessing/remove_connected_components.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def determine_postprocessing(folder_predictions: str,
7171
if plans_file_or_dict is None:
7272
expected_plans_file = join(folder_predictions, 'plans.json')
7373
if not isfile(expected_plans_file):
74-
raise RuntimeError(f"Expected plans file missing: {expected_plans_file}. The plans fils should have been "
74+
raise RuntimeError(f"Expected plans file missing: {expected_plans_file}. The plans files should have been "
7575
f"created while running nnUNetv2_predict. Sadge.")
7676
plans_file_or_dict = load_json(expected_plans_file)
7777
plans_manager = PlansManager(plans_file_or_dict)
@@ -80,7 +80,7 @@ def determine_postprocessing(folder_predictions: str,
8080
expected_dataset_json_file = join(folder_predictions, 'dataset.json')
8181
if not isfile(expected_dataset_json_file):
8282
raise RuntimeError(
83-
f"Expected plans file missing: {expected_dataset_json_file}. The plans fils should have been "
83+
f"Expected plans file missing: {expected_dataset_json_file}. The plans files should have been "
8484
f"created while running nnUNetv2_predict. Sadge.")
8585
dataset_json_file_or_dict = load_json(expected_dataset_json_file)
8686

nnunetv2/run/load_pretrained_weights.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def load_pretrained_weights(network, fname, verbose=False):
99
shape is also the same. Segmentation layers (the 1x1(x1) layers that produce the segmentation maps)
1010
identified by keys ending with '.seg_layers') are not transferred!
1111
12-
If the pretrained weights were optained with a training outside nnU-Net and DDP or torch.optimize was used,
12+
If the pretrained weights were obtained with a training outside nnU-Net and DDP or torch.optimize was used,
1313
you need to change the keys of the pretrained state_dict. DDP adds a 'module.' prefix and torch.optim adds
1414
'_orig_mod'. You DO NOT need to worry about this if pretraining was done with nnU-Net as
1515
nnUNetTrainer.save_checkpoint takes care of that!

nnunetv2/training/dataloading/nnunet_dataset.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def load_case(self, key):
123123

124124
# this should have the properties
125125
ds = nnUNetDataset(folder, num_images_properties_loading_threshold=1000)
126-
# now rename the properties file so that it doesnt exist anymore
126+
# now rename the properties file so that it does not exist anymore
127127
shutil.move(join(folder, 'liver_0.pkl'), join(folder, 'liver_XXX.pkl'))
128128
# now we should still be able to access the properties because they have already been loaded
129129
ks = ds['liver_0'].keys()
@@ -133,7 +133,7 @@ def load_case(self, key):
133133

134134
# this should not have the properties
135135
ds = nnUNetDataset(folder, num_images_properties_loading_threshold=0)
136-
# now rename the properties file so that it doesnt exist anymore
136+
# now rename the properties file so that it does not exist anymore
137137
shutil.move(join(folder, 'liver_0.pkl'), join(folder, 'liver_XXX.pkl'))
138138
# now this should crash
139139
try:

nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerBN.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def build_network_architecture(plans_manager: PlansManager,
4545
'is non-standard (maybe your own?). Yo\'ll have to dive ' \
4646
'into either this ' \
4747
'function (get_network_from_plans) or ' \
48-
'the init of your nnUNetModule to accomodate that.'
48+
'the init of your nnUNetModule to accommodate that.'
4949
network_class = mapping[segmentation_network_class_name]
5050

5151
conv_or_blocks_per_stage = {

nnunetv2/utilities/dataset_name_id_conversion.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -70,5 +70,5 @@ def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -> str:
7070
except ValueError:
7171
raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to "
7272
"convert it to a dataset ID (int). That failed, however. Please give an integer number "
73-
"('1', '2', etc) or a correct tast name. Your input: %s" % dataset_name_or_id)
74-
return convert_id_to_dataset_name(dataset_name_or_id)
73+
"('1', '2', etc) or a correct dataset name. Your input: %s" % dataset_name_or_id)
74+
return convert_id_to_dataset_name(dataset_name_or_id)

nnunetv2/utilities/file_path_utilities.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,10 @@ def parse_dataset_trainer_plans_configuration_from_path(path: str):
3939
assert len(folders[:idx]) >= 2, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
4040
'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
4141
if folders[idx - 2].startswith('Dataset'):
42-
splitted = folders[idx - 1].split('__')
43-
assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
42+
split = folders[idx - 1].split('__')
43+
assert len(split) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
4444
'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
45-
return folders[idx - 2], *splitted
45+
return folders[idx - 2], *split
4646
else:
4747
# we can only check for dataset followed by a string that is separable into three strings by splitting with '__'
4848
# look for DatasetXXX
@@ -51,10 +51,10 @@ def parse_dataset_trainer_plans_configuration_from_path(path: str):
5151
idx = dataset_folder.index(True)
5252
assert len(folders) >= (idx + 1), 'Bad path, cannot extract what I need. Your path needs to be at least ' \
5353
'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
54-
splitted = folders[idx + 1].split('__')
55-
assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
54+
split = folders[idx + 1].split('__')
55+
assert len(split) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \
5656
'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work'
57-
return folders[idx], *splitted
57+
return folders[idx], *split
5858

5959

6060
def get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, ...]):

0 commit comments

Comments
 (0)