From a50247ce3c52bc4b40b148fb62d10f48763b0e44 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 14 Jan 2025 09:15:19 -0500 Subject: [PATCH] Run ruff. --- docs/conf.py | 2 +- xcp_d/__about__.py | 3 +-- xcp_d/_warnings.py | 2 +- xcp_d/cli/parser.py | 19 +++++++++---------- xcp_d/cli/workflow.py | 6 +++--- xcp_d/config.py | 4 ++-- xcp_d/interfaces/censoring.py | 21 +++++++++------------ xcp_d/interfaces/execsummary.py | 2 +- xcp_d/interfaces/report.py | 8 ++++---- xcp_d/interfaces/workbench.py | 6 ++---- xcp_d/tests/run_local_tests.py | 6 ++---- xcp_d/tests/utils.py | 4 ++-- xcp_d/utils/atlas.py | 2 +- xcp_d/utils/bids.py | 24 ++++++++++-------------- xcp_d/utils/boilerplate.py | 12 ++++++------ xcp_d/utils/modified_data.py | 3 +-- xcp_d/utils/plotting.py | 12 ++++++------ xcp_d/utils/utils.py | 2 +- xcp_d/workflows/base.py | 4 ++-- xcp_d/workflows/parcellation.py | 2 +- 20 files changed, 65 insertions(+), 79 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index c60065d7d..8021708f7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -369,7 +369,7 @@ linkcode_resolve = make_linkcode_resolve( 'xcp_d', ( - 'https://github.com/pennlinc/xcp_d/blob/' '{revision}/{package}/{path}#L{lineno}' # noqa: FS003 + 'https://github.com/pennlinc/xcp_d/blob/{revision}/{package}/{path}#L{lineno}' # noqa: FS003 ), ) diff --git a/xcp_d/__about__.py b/xcp_d/__about__.py index 3cea2d156..bc9e87d94 100644 --- a/xcp_d/__about__.py +++ b/xcp_d/__about__.py @@ -10,8 +10,7 @@ __packagename__ = 'xcp_d' __copyright__ = 'Copyright 2020, PennLINC and DCAN labs' __credits__ = ( - 'Contributors: please check the ``.zenodo.json`` file at the top-level folder' - 'of the repository' + 'Contributors: please check the ``.zenodo.json`` file at the top-level folderof the repository' ) __url__ = 'https://github.com/PennLINC/xcp_d' diff --git a/xcp_d/_warnings.py b/xcp_d/_warnings.py index ef4677f04..7b71b1672 100644 --- a/xcp_d/_warnings.py +++ b/xcp_d/_warnings.py @@ -13,7 +13,7 @@ def _warn(message, category=None, stacklevel=1, source=None): category = type(category).__name__ category = category.replace('type', 'WARNING') - logging.getLogger('py.warnings').warning(f"{category or 'WARNING'}: {message}") + logging.getLogger('py.warnings').warning(f'{category or "WARNING"}: {message}') def _showwarning(message, category, filename, lineno, file=None, line=None): diff --git a/xcp_d/cli/parser.py b/xcp_d/cli/parser.py index 4623ab275..02f9c2826 100644 --- a/xcp_d/cli/parser.py +++ b/xcp_d/cli/parser.py @@ -119,11 +119,11 @@ def _build_parser(): default=None, metavar='FILE', help=( - "A JSON file describing custom BIDS input filters using PyBIDS. " - "For further details, please check out " - "https://xcp-d.readthedocs.io/en/" - f"{currentv.base_version if is_release else 'latest'}/usage.html#" - "filtering-inputs-with-bids-filter-files" + 'A JSON file describing custom BIDS input filters using PyBIDS. ' + 'For further details, please check out ' + 'https://xcp-d.readthedocs.io/en/' + f'{currentv.base_version if is_release else "latest"}/usage.html#' + 'filtering-inputs-with-bids-filter-files' ), ) g_bids.add_argument( @@ -868,7 +868,7 @@ def parse_args(args=None, namespace=None): # Ensure input and output folders are not the same if output_dir == fmri_dir: - rec_path = fmri_dir / 'derivatives' / f"xcp_d-{version.split('+')[0]}" + rec_path = fmri_dir / 'derivatives' / f'xcp_d-{version.split("+")[0]}' parser.error( 'The selected output folder is the same as the input BIDS folder. ' f'Please modify the output path (suggestion: {rec_path}).' @@ -897,8 +897,8 @@ def parse_args(args=None, namespace=None): missing_subjects = participant_label - set(all_subjects) if missing_subjects: parser.error( - "One or more participant labels were not found in the BIDS directory: " - f"{', '.join(missing_subjects)}." + 'One or more participant labels were not found in the BIDS directory: ' + f'{", ".join(missing_subjects)}.' ) config.execution.participant_label = sorted(participant_label) @@ -1105,8 +1105,7 @@ def _validate_parameters(opts, build_log, parser): and (opts.high_pass > 0 and opts.low_pass > 0) ): parser.error( - f"'--lower-bpf' ({opts.high_pass}) must be lower than " - f"'--upper-bpf' ({opts.low_pass})." + f"'--lower-bpf' ({opts.high_pass}) must be lower than '--upper-bpf' ({opts.low_pass})." ) elif not opts.bandpass_filter: build_log.warning('Bandpass filtering is disabled. ALFF outputs will not be generated.') diff --git a/xcp_d/cli/workflow.py b/xcp_d/cli/workflow.py index 750d32a07..dabbe5154 100644 --- a/xcp_d/cli/workflow.py +++ b/xcp_d/cli/workflow.py @@ -30,11 +30,11 @@ def build_workflow(config_file, retval): notice_path = data.load.readable('NOTICE') if notice_path.exists(): banner[0] += '\n' - banner += [f"License NOTICE {'#' * 50}"] + banner += [f'License NOTICE {"#" * 50}'] banner += [f'XCP-D {version}'] banner += notice_path.read_text().splitlines(keepends=False)[1:] banner += ['#' * len(banner[1])] - build_log.log(25, f"\n{' ' * 9}".join(banner)) + build_log.log(25, f'\n{" " * 9}'.join(banner)) # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( @@ -95,7 +95,7 @@ def build_workflow(config_file, retval): if config.execution.datasets: init_msg += [f'Searching for derivatives and atlases: {config.execution.datasets}.'] - build_log.log(25, f"\n{' ' * 11}* ".join(init_msg)) + build_log.log(25, f'\n{" " * 11}* '.join(init_msg)) retval['workflow'] = init_xcpd_wf() diff --git a/xcp_d/config.py b/xcp_d/config.py index 66e122319..d00e811cf 100644 --- a/xcp_d/config.py +++ b/xcp_d/config.py @@ -201,7 +201,7 @@ if _proc_oc_kbytes.exists(): _oc_limit = _proc_oc_kbytes.read_text().strip() if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists(): - _oc_limit = f"{Path('/proc/sys/vm/overcommit_ratio').read_text().strip()}%" + _oc_limit = f'{Path("/proc/sys/vm/overcommit_ratio").read_text().strip()}%' except Exception: # noqa: S110, BLE001 pass @@ -416,7 +416,7 @@ class execution(_Config): """Folder where derivatives will be stored.""" atlases = [] """Selection of atlases to apply to the data.""" - run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}" + run_uuid = f'{strftime("%Y%m%d-%H%M%S")}_{uuid4()}' """Unique identifier of this particular run.""" participant_label = None """List of participant identifiers that are to be preprocessed.""" diff --git a/xcp_d/interfaces/censoring.py b/xcp_d/interfaces/censoring.py index c619a3696..7d3c9c1f6 100644 --- a/xcp_d/interfaces/censoring.py +++ b/xcp_d/interfaces/censoring.py @@ -39,8 +39,7 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec): None, mandatory=False, desc=( - 'TSV file with selected confounds for denoising. ' - 'May be None if denoising is disabled.' + 'TSV file with selected confounds for denoising. May be None if denoising is disabled.' ), ) confounds_images = traits.Either( @@ -71,8 +70,7 @@ class _RemoveDummyVolumesOutputSpec(TraitedSpec): File(exists=True), None, desc=( - 'TSV file with selected confounds for denoising. ' - 'May be None if denoising is disabled.' + 'TSV file with selected confounds for denoising. May be None if denoising is disabled.' ), ) confounds_images_dropped_TR = traits.Either( @@ -386,8 +384,7 @@ def _run_interface(self, runtime): censoring_df.loc[random_censor, column_name] = 0 temporal_mask_metadata[column_name] = { 'Description': ( - f'Randomly selected low-motion volumes to retain exactly {exact_scan} ' - 'volumes.' + f'Randomly selected low-motion volumes to retain exactly {exact_scan} volumes.' ), 'Levels': { '0': 'Retained or high-motion volume', @@ -905,9 +902,9 @@ def _run_interface(self, runtime): signal_regressors = [c for c in new_confound_df.columns if c.startswith('signal__')] if signal_regressors: LOGGER.warning( - "Signal regressors detected. " - "Orthogonalizing nuisance regressors w.r.t. the following signal regressors: " - f"{', '.join(signal_regressors)}" + 'Signal regressors detected. ' + 'Orthogonalizing nuisance regressors w.r.t. the following signal regressors: ' + f'{", ".join(signal_regressors)}' ) noise_regressors = [c for c in new_confound_df.columns if not c.startswith('signal__')] @@ -936,15 +933,15 @@ def _run_interface(self, runtime): for col in noise_regressors: desc_str = ( "This regressor is orthogonalized with respect to the 'signal' regressors " - f"({', '.join(signal_regressors)}) after dummy scan removal, " - "but prior to any censoring." + f'({", ".join(signal_regressors)}) after dummy scan removal, ' + 'but prior to any censoring.' ) col_metadata = {} if col in confounds_metadata.keys(): col_metadata = confounds_metadata.pop(col) if 'Description' in col_metadata.keys(): - desc_str = f"{col_metadata['Description']} {desc_str}" + desc_str = f'{col_metadata["Description"]} {desc_str}' col_metadata['Description'] = desc_str confounds_metadata[f'{col}_orth'] = col_metadata diff --git a/xcp_d/interfaces/execsummary.py b/xcp_d/interfaces/execsummary.py index c92e4bdc8..c19bd0f54 100644 --- a/xcp_d/interfaces/execsummary.py +++ b/xcp_d/interfaces/execsummary.py @@ -328,7 +328,7 @@ def generate_report(self, out_file=None): 'LaTeX', f"""
{text}

Bibliography

-
{load_data("boilerplate.bib").read_text()}
+
{load_data('boilerplate.bib').read_text()}
""", ) ) diff --git a/xcp_d/interfaces/report.py b/xcp_d/interfaces/report.py index c2923a008..3790ad368 100644 --- a/xcp_d/interfaces/report.py +++ b/xcp_d/interfaces/report.py @@ -150,12 +150,12 @@ def _generate_segment(self): mean_relative_rms = str(round(qcfile['mean_relative_rms'][0], 4)) max_relative_rms = str(round(qcfile['max_relative_rms'][0], 4)) dvars = ( - f"{round(qcfile['mean_dvars_initial'][0], 4)}, " - f"{round(qcfile['mean_dvars_final'][0], 4)}" + f'{round(qcfile["mean_dvars_initial"][0], 4)}, ' + f'{round(qcfile["mean_dvars_final"][0], 4)}' ) fd_dvars_correlation = ( - f"{round(qcfile['fd_dvars_correlation_initial'][0], 4)}, " - f"{round(qcfile['fd_dvars_correlation_final'][0], 4)}" + f'{round(qcfile["fd_dvars_correlation_initial"][0], 4)}, ' + f'{round(qcfile["fd_dvars_correlation_final"][0], 4)}' ) num_vols_censored = str(round(qcfile['num_censored_volumes'][0], 4)) diff --git a/xcp_d/interfaces/workbench.py b/xcp_d/interfaces/workbench.py index 761e70d03..8b04190fe 100644 --- a/xcp_d/interfaces/workbench.py +++ b/xcp_d/interfaces/workbench.py @@ -1444,8 +1444,7 @@ class _CiftiSmoothInputSpec(_WBCommandInputSpec): exists=True, position=6, argstr='-left-corrected-areas %s', - desc='vertex areas (as a metric) to use instead of computing them from ' - 'the left surface.', + desc='vertex areas (as a metric) to use instead of computing them from the left surface.', ) right_surf = File( exists=True, @@ -1458,8 +1457,7 @@ class _CiftiSmoothInputSpec(_WBCommandInputSpec): exists=True, position=8, argstr='-right-corrected-areas %s', - desc='vertex areas (as a metric) to use instead of computing them from ' - 'the right surface', + desc='vertex areas (as a metric) to use instead of computing them from the right surface', ) cerebellum_surf = File( exists=True, diff --git a/xcp_d/tests/run_local_tests.py b/xcp_d/tests/run_local_tests.py index 023986eaa..c9077ad0a 100755 --- a/xcp_d/tests/run_local_tests.py +++ b/xcp_d/tests/run_local_tests.py @@ -60,7 +60,7 @@ def run_command(command, env=None): if process.returncode != 0: raise RuntimeError( - f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}' + f'Non zero return code: {process.returncode}\n{command}\n\n{process.stdout.read()}' ) @@ -71,9 +71,7 @@ def run_tests(test_regex, test_mark, check_path): if check_path: run_str = ( - 'docker run --rm -ti ' - '--entrypoint /bin/ls ' - f'pennlinc/xcp_d:unstable {mounted_code}' + f'docker run --rm -ti --entrypoint /bin/ls pennlinc/xcp_d:unstable {mounted_code}' ) try: run_command(run_str) diff --git a/xcp_d/tests/utils.py b/xcp_d/tests/utils.py index 34c33bbbf..f4c7b0c66 100644 --- a/xcp_d/tests/utils.py +++ b/xcp_d/tests/utils.py @@ -77,7 +77,7 @@ def download_test_data(dset, data_dir=None): return if dset not in URLS: - raise ValueError(f"dset ({dset}) must be one of: {', '.join(URLS.keys())}") + raise ValueError(f'dset ({dset}) must be one of: {", ".join(URLS.keys())}') if not data_dir: data_dir = os.path.join(os.path.dirname(get_test_data_path()), 'test_data') @@ -238,7 +238,7 @@ def run_command(command, env=None): if process.returncode != 0: raise RuntimeError( - f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}' + f'Non zero return code: {process.returncode}\n{command}\n\n{process.stdout.read()}' ) diff --git a/xcp_d/utils/atlas.py b/xcp_d/utils/atlas.py index 2c003681f..57992a4d6 100644 --- a/xcp_d/utils/atlas.py +++ b/xcp_d/utils/atlas.py @@ -169,7 +169,7 @@ def collect_atlases(datasets, atlases, file_format, bids_filters=None): for _atlas, atlas_info in atlas_cache.items(): if not atlas_info['labels']: - raise FileNotFoundError(f"No TSV file found for {atlas_info['image']}") + raise FileNotFoundError(f'No TSV file found for {atlas_info["image"]}') # Check the contents of the labels file df = pd.read_table(atlas_info['labels']) diff --git a/xcp_d/utils/bids.py b/xcp_d/utils/bids.py index 258cea1cf..082eff823 100644 --- a/xcp_d/utils/bids.py +++ b/xcp_d/utils/bids.py @@ -64,12 +64,10 @@ class BIDSError(ValueError): def __init__(self, message, bids_root): indent = 10 header = ( - f'{"".join(["-"] * indent)} BIDS root folder: "{bids_root}" ' - f'{"".join(["-"] * indent)}' + f'{"".join(["-"] * indent)} BIDS root folder: "{bids_root}" {"".join(["-"] * indent)}' ) self.msg = ( - f"\n{header}\n{''.join([' '] * (indent + 1))}{message}\n" - f"{''.join(['-'] * len(header))}" + f'\n{header}\n{"".join([" "] * (indent + 1))}{message}\n{"".join(["-"] * len(header))}' ) super().__init__(self.msg) self.bids_root = bids_root @@ -132,13 +130,13 @@ def collect_participants(layout, participant_label=None, strict=False): found_label = sorted(set(participant_label) & all_participants) if not found_label: raise BIDSError( - f"Could not find participants [{', '.join(participant_label)}]", + f'Could not find participants [{", ".join(participant_label)}]', layout, ) if notfound_label := sorted(set(participant_label) - all_participants): exc = BIDSError( - f"Some participants were not found: {', '.join(notfound_label)}", + f'Some participants were not found: {", ".join(notfound_label)}', layout, ) if strict: @@ -210,9 +208,9 @@ def collect_data( [f.path for f in layout.get(extension=['.nii.gz', '.dtseries.nii'])] ) raise FileNotFoundError( - f"No BOLD data found in allowed spaces ({', '.join(allowed_spaces)}).\n\n" - f"Query: {queries['bold']}\n\n" - f"Found files:\n\n{filenames}" + f'No BOLD data found in allowed spaces ({", ".join(allowed_spaces)}).\n\n' + f'Query: {queries["bold"]}\n\n' + f'Found files:\n\n{filenames}' ) if file_format == 'cifti': @@ -521,9 +519,7 @@ def collect_morphometry_data(layout, participant_label, bids_filters): elif len(files) > 1: surface_str = '\n\t'.join(files) raise ValueError( - f'More than one {name} found.\n' - f'Surfaces found:\n\t{surface_str}\n' - f'Query: {query}' + f'More than one {name} found.\nSurfaces found:\n\t{surface_str}\nQuery: {query}' ) else: morphometry_files[name] = None @@ -726,8 +722,8 @@ def collect_confounds( for confound_name, confound_def in confound_spec['confounds'].items(): if confound_def['dataset'] not in layout_dict.keys(): raise ValueError( - f"Missing dataset required by confound spec: *{confound_def['dataset']}*. " - "Did you provide it with the `--datasets` flag?" + f'Missing dataset required by confound spec: *{confound_def["dataset"]}*. ' + 'Did you provide it with the `--datasets` flag?' ) layout = layout_dict[confound_def['dataset']] diff --git a/xcp_d/utils/boilerplate.py b/xcp_d/utils/boilerplate.py index 3ebdc8945..78492b3fb 100644 --- a/xcp_d/utils/boilerplate.py +++ b/xcp_d/utils/boilerplate.py @@ -104,12 +104,12 @@ def describe_censoring(*, motion_filter_type, head_radius, fd_thresh, exact_scan desc = '' if fd_thresh > 0: desc += ( - "Framewise displacement was calculated from the " - f"{'filtered ' if motion_filter_type else ''}motion parameters using the formula from " - f"@power_fd_dvars, with a head radius of {head_radius} mm. " - f"Volumes with {'filtered ' if motion_filter_type else ''}framewise displacement " - f"greater than {fd_thresh} mm were flagged as high-motion outliers for the sake of " - "later censoring [@power_fd_dvars]." + 'Framewise displacement was calculated from the ' + f'{"filtered " if motion_filter_type else ""}motion parameters using the formula from ' + f'@power_fd_dvars, with a head radius of {head_radius} mm. ' + f'Volumes with {"filtered " if motion_filter_type else ""}framewise displacement ' + f'greater than {fd_thresh} mm were flagged as high-motion outliers for the sake of ' + 'later censoring [@power_fd_dvars].' ) if exact_scans and (fd_thresh > 0): diff --git a/xcp_d/utils/modified_data.py b/xcp_d/utils/modified_data.py index 99db7331f..cba5027ba 100644 --- a/xcp_d/utils/modified_data.py +++ b/xcp_d/utils/modified_data.py @@ -252,8 +252,7 @@ def calculate_exact_scans(exact_times, scan_length, t_r, bold_file): if non_float_times: LOGGER.warning( - f'Non-float values {non_float_times} in {os.path.basename(bold_file)} ' - 'will be ignored.' + f'Non-float values {non_float_times} in {os.path.basename(bold_file)} will be ignored.' ) exact_scans = [int(t // t_r) for t in retained_exact_times] diff --git a/xcp_d/utils/plotting.py b/xcp_d/utils/plotting.py index d7cc0f74d..c1ec08e93 100644 --- a/xcp_d/utils/plotting.py +++ b/xcp_d/utils/plotting.py @@ -866,9 +866,9 @@ def plot_carpet( img = nb.load(func) if isinstance(img, nb.Cifti2Image): # CIFTI - assert ( - img.nifti_header.get_intent()[0] == 'ConnDenseSeries' - ), f'Not a dense timeseries: {img.nifti_header.get_intent()[0]}, {func}' + assert img.nifti_header.get_intent()[0] == 'ConnDenseSeries', ( + f'Not a dense timeseries: {img.nifti_header.get_intent()[0]}, {func}' + ) # Get required information data = img.get_fdata().T @@ -917,9 +917,9 @@ def plot_carpet( order = seg_data.argsort(kind='stable') # Get color maps cmap = ListedColormap([plt.get_cmap('Paired').colors[i] for i in (1, 0, 7, 3)]) - assert len(cmap.colors) == len( - struct_map - ), 'Mismatch between expected # of structures and colors' + assert len(cmap.colors) == len(struct_map), ( + 'Mismatch between expected # of structures and colors' + ) else: # Order following segmentation labels order = np.argsort(seg_data)[::-1] diff --git a/xcp_d/utils/utils.py b/xcp_d/utils/utils.py index 887be0097..4e30d0fd3 100644 --- a/xcp_d/utils/utils.py +++ b/xcp_d/utils/utils.py @@ -575,7 +575,7 @@ def list_to_str(lst): elif len(lst_str) == 2: return ' and '.join(lst_str) else: - return f"{', '.join(lst_str[:-1])}, and {lst_str[-1]}" + return f'{", ".join(lst_str[:-1])}, and {lst_str[-1]}' def _transpose_lol(lol): diff --git a/xcp_d/workflows/base.py b/xcp_d/workflows/base.py index e97617ce7..bb82bc9d7 100644 --- a/xcp_d/workflows/base.py +++ b/xcp_d/workflows/base.py @@ -214,8 +214,8 @@ def init_single_subject_wf(subject_id: str): The eXtensible Connectivity Pipeline- DCAN (XCP-D) [@mehta2024xcp;@mitigating_2018;@satterthwaite_2013] -was used to post-process the outputs of *{info_dict["name"]}* version {info_dict["version"]} -{info_dict["references"]}. +was used to post-process the outputs of *{info_dict['name']}* version {info_dict['version']} +{info_dict['references']}. XCP-D was built with *Nipype* version {nipype_ver} [@nipype1, RRID:SCR_002502]. """ diff --git a/xcp_d/workflows/parcellation.py b/xcp_d/workflows/parcellation.py index 7e5190d74..dcf5a559e 100644 --- a/xcp_d/workflows/parcellation.py +++ b/xcp_d/workflows/parcellation.py @@ -382,7 +382,7 @@ def init_parcellate_cifti_wf( CiftiParcellateWorkbench( direction='COLUMN', only_numeric=True, - out_file=f"parcellated_data.{'ptseries' if compute_mask else 'pscalar'}.nii", + out_file=f'parcellated_data.{"ptseries" if compute_mask else "pscalar"}.nii', num_threads=config.nipype.omp_nthreads, ), name='parcellate_data',