Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Run ruff #1357

Merged
merged 1 commit into from
Jan 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@
linkcode_resolve = make_linkcode_resolve(
'xcp_d',
(
'https://github.com/pennlinc/xcp_d/blob/' '{revision}/{package}/{path}#L{lineno}' # noqa: FS003
'https://github.com/pennlinc/xcp_d/blob/{revision}/{package}/{path}#L{lineno}' # noqa: FS003
),
)

Expand Down
3 changes: 1 addition & 2 deletions xcp_d/__about__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
__packagename__ = 'xcp_d'
__copyright__ = 'Copyright 2020, PennLINC and DCAN labs'
__credits__ = (
'Contributors: please check the ``.zenodo.json`` file at the top-level folder'
'of the repository'
'Contributors: please check the ``.zenodo.json`` file at the top-level folderof the repository'
)
__url__ = 'https://github.com/PennLINC/xcp_d'

Expand Down
2 changes: 1 addition & 1 deletion xcp_d/_warnings.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def _warn(message, category=None, stacklevel=1, source=None):
category = type(category).__name__
category = category.replace('type', 'WARNING')

logging.getLogger('py.warnings').warning(f"{category or 'WARNING'}: {message}")
logging.getLogger('py.warnings').warning(f'{category or "WARNING"}: {message}')


def _showwarning(message, category, filename, lineno, file=None, line=None):
Expand Down
19 changes: 9 additions & 10 deletions xcp_d/cli/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,11 @@ def _build_parser():
default=None,
metavar='FILE',
help=(
"A JSON file describing custom BIDS input filters using PyBIDS. "
"For further details, please check out "
"https://xcp-d.readthedocs.io/en/"
f"{currentv.base_version if is_release else 'latest'}/usage.html#"
"filtering-inputs-with-bids-filter-files"
'A JSON file describing custom BIDS input filters using PyBIDS. '
'For further details, please check out '
'https://xcp-d.readthedocs.io/en/'
f'{currentv.base_version if is_release else "latest"}/usage.html#'
'filtering-inputs-with-bids-filter-files'
),
)
g_bids.add_argument(
Expand Down Expand Up @@ -868,7 +868,7 @@ def parse_args(args=None, namespace=None):

# Ensure input and output folders are not the same
if output_dir == fmri_dir:
rec_path = fmri_dir / 'derivatives' / f"xcp_d-{version.split('+')[0]}"
rec_path = fmri_dir / 'derivatives' / f'xcp_d-{version.split("+")[0]}'
parser.error(
'The selected output folder is the same as the input BIDS folder. '
f'Please modify the output path (suggestion: {rec_path}).'
Expand Down Expand Up @@ -897,8 +897,8 @@ def parse_args(args=None, namespace=None):
missing_subjects = participant_label - set(all_subjects)
if missing_subjects:
parser.error(
"One or more participant labels were not found in the BIDS directory: "
f"{', '.join(missing_subjects)}."
'One or more participant labels were not found in the BIDS directory: '
f'{", ".join(missing_subjects)}.'
)

config.execution.participant_label = sorted(participant_label)
Expand Down Expand Up @@ -1105,8 +1105,7 @@ def _validate_parameters(opts, build_log, parser):
and (opts.high_pass > 0 and opts.low_pass > 0)
):
parser.error(
f"'--lower-bpf' ({opts.high_pass}) must be lower than "
f"'--upper-bpf' ({opts.low_pass})."
f"'--lower-bpf' ({opts.high_pass}) must be lower than '--upper-bpf' ({opts.low_pass})."
)
elif not opts.bandpass_filter:
build_log.warning('Bandpass filtering is disabled. ALFF outputs will not be generated.')
Expand Down
6 changes: 3 additions & 3 deletions xcp_d/cli/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ def build_workflow(config_file, retval):
notice_path = data.load.readable('NOTICE')
if notice_path.exists():
banner[0] += '\n'
banner += [f"License NOTICE {'#' * 50}"]
banner += [f'License NOTICE {"#" * 50}']
banner += [f'XCP-D {version}']
banner += notice_path.read_text().splitlines(keepends=False)[1:]
banner += ['#' * len(banner[1])]
build_log.log(25, f"\n{' ' * 9}".join(banner))
build_log.log(25, f'\n{" " * 9}'.join(banner))

# warn if older results exist: check for dataset_description.json in output folder
msg = check_pipeline_version(
Expand Down Expand Up @@ -95,7 +95,7 @@ def build_workflow(config_file, retval):
if config.execution.datasets:
init_msg += [f'Searching for derivatives and atlases: {config.execution.datasets}.']

build_log.log(25, f"\n{' ' * 11}* ".join(init_msg))
build_log.log(25, f'\n{" " * 11}* '.join(init_msg))

retval['workflow'] = init_xcpd_wf()

Expand Down
4 changes: 2 additions & 2 deletions xcp_d/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists():
_oc_limit = f"{Path('/proc/sys/vm/overcommit_ratio').read_text().strip()}%"
_oc_limit = f'{Path("/proc/sys/vm/overcommit_ratio").read_text().strip()}%'
except Exception: # noqa: S110, BLE001
pass

Expand Down Expand Up @@ -416,7 +416,7 @@ class execution(_Config):
"""Folder where derivatives will be stored."""
atlases = []
"""Selection of atlases to apply to the data."""
run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}"
run_uuid = f'{strftime("%Y%m%d-%H%M%S")}_{uuid4()}'
"""Unique identifier of this particular run."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
Expand Down
21 changes: 9 additions & 12 deletions xcp_d/interfaces/censoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@ class _RemoveDummyVolumesInputSpec(BaseInterfaceInputSpec):
None,
mandatory=False,
desc=(
'TSV file with selected confounds for denoising. '
'May be None if denoising is disabled.'
'TSV file with selected confounds for denoising. May be None if denoising is disabled.'
),
)
confounds_images = traits.Either(
Expand Down Expand Up @@ -71,8 +70,7 @@ class _RemoveDummyVolumesOutputSpec(TraitedSpec):
File(exists=True),
None,
desc=(
'TSV file with selected confounds for denoising. '
'May be None if denoising is disabled.'
'TSV file with selected confounds for denoising. May be None if denoising is disabled.'
),
)
confounds_images_dropped_TR = traits.Either(
Expand Down Expand Up @@ -386,8 +384,7 @@ def _run_interface(self, runtime):
censoring_df.loc[random_censor, column_name] = 0
temporal_mask_metadata[column_name] = {
'Description': (
f'Randomly selected low-motion volumes to retain exactly {exact_scan} '
'volumes.'
f'Randomly selected low-motion volumes to retain exactly {exact_scan} volumes.'
),
'Levels': {
'0': 'Retained or high-motion volume',
Expand Down Expand Up @@ -905,9 +902,9 @@ def _run_interface(self, runtime):
signal_regressors = [c for c in new_confound_df.columns if c.startswith('signal__')]
if signal_regressors:
LOGGER.warning(
"Signal regressors detected. "
"Orthogonalizing nuisance regressors w.r.t. the following signal regressors: "
f"{', '.join(signal_regressors)}"
'Signal regressors detected. '
'Orthogonalizing nuisance regressors w.r.t. the following signal regressors: '
f'{", ".join(signal_regressors)}'
)
noise_regressors = [c for c in new_confound_df.columns if not c.startswith('signal__')]

Expand Down Expand Up @@ -936,15 +933,15 @@ def _run_interface(self, runtime):
for col in noise_regressors:
desc_str = (
"This regressor is orthogonalized with respect to the 'signal' regressors "
f"({', '.join(signal_regressors)}) after dummy scan removal, "
"but prior to any censoring."
f'({", ".join(signal_regressors)}) after dummy scan removal, '
'but prior to any censoring.'
)

col_metadata = {}
if col in confounds_metadata.keys():
col_metadata = confounds_metadata.pop(col)
if 'Description' in col_metadata.keys():
desc_str = f"{col_metadata['Description']} {desc_str}"
desc_str = f'{col_metadata["Description"]} {desc_str}'

col_metadata['Description'] = desc_str
confounds_metadata[f'{col}_orth'] = col_metadata
Expand Down
2 changes: 1 addition & 1 deletion xcp_d/interfaces/execsummary.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ def generate_report(self, out_file=None):
'LaTeX',
f"""<pre>{text}</pre>
<h3>Bibliography</h3>
<pre>{load_data("boilerplate.bib").read_text()}</pre>
<pre>{load_data('boilerplate.bib').read_text()}</pre>
""",
)
)
Expand Down
8 changes: 4 additions & 4 deletions xcp_d/interfaces/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,12 @@ def _generate_segment(self):
mean_relative_rms = str(round(qcfile['mean_relative_rms'][0], 4))
max_relative_rms = str(round(qcfile['max_relative_rms'][0], 4))
dvars = (
f"{round(qcfile['mean_dvars_initial'][0], 4)}, "
f"{round(qcfile['mean_dvars_final'][0], 4)}"
f'{round(qcfile["mean_dvars_initial"][0], 4)}, '
f'{round(qcfile["mean_dvars_final"][0], 4)}'
)
fd_dvars_correlation = (
f"{round(qcfile['fd_dvars_correlation_initial'][0], 4)}, "
f"{round(qcfile['fd_dvars_correlation_final'][0], 4)}"
f'{round(qcfile["fd_dvars_correlation_initial"][0], 4)}, '
f'{round(qcfile["fd_dvars_correlation_final"][0], 4)}'
)
num_vols_censored = str(round(qcfile['num_censored_volumes'][0], 4))

Expand Down
6 changes: 2 additions & 4 deletions xcp_d/interfaces/workbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -1444,8 +1444,7 @@ class _CiftiSmoothInputSpec(_WBCommandInputSpec):
exists=True,
position=6,
argstr='-left-corrected-areas %s',
desc='vertex areas (as a metric) to use instead of computing them from '
'the left surface.',
desc='vertex areas (as a metric) to use instead of computing them from the left surface.',
)
right_surf = File(
exists=True,
Expand All @@ -1458,8 +1457,7 @@ class _CiftiSmoothInputSpec(_WBCommandInputSpec):
exists=True,
position=8,
argstr='-right-corrected-areas %s',
desc='vertex areas (as a metric) to use instead of computing them from '
'the right surface',
desc='vertex areas (as a metric) to use instead of computing them from the right surface',
)
cerebellum_surf = File(
exists=True,
Expand Down
6 changes: 2 additions & 4 deletions xcp_d/tests/run_local_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def run_command(command, env=None):

if process.returncode != 0:
raise RuntimeError(
f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}'
f'Non zero return code: {process.returncode}\n{command}\n\n{process.stdout.read()}'
)


Expand All @@ -71,9 +71,7 @@ def run_tests(test_regex, test_mark, check_path):

if check_path:
run_str = (
'docker run --rm -ti '
'--entrypoint /bin/ls '
f'pennlinc/xcp_d:unstable {mounted_code}'
f'docker run --rm -ti --entrypoint /bin/ls pennlinc/xcp_d:unstable {mounted_code}'
)
try:
run_command(run_str)
Expand Down
4 changes: 2 additions & 2 deletions xcp_d/tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def download_test_data(dset, data_dir=None):
return

if dset not in URLS:
raise ValueError(f"dset ({dset}) must be one of: {', '.join(URLS.keys())}")
raise ValueError(f'dset ({dset}) must be one of: {", ".join(URLS.keys())}')

if not data_dir:
data_dir = os.path.join(os.path.dirname(get_test_data_path()), 'test_data')
Expand Down Expand Up @@ -238,7 +238,7 @@ def run_command(command, env=None):

if process.returncode != 0:
raise RuntimeError(
f'Non zero return code: {process.returncode}\n' f'{command}\n\n{process.stdout.read()}'
f'Non zero return code: {process.returncode}\n{command}\n\n{process.stdout.read()}'
)


Expand Down
2 changes: 1 addition & 1 deletion xcp_d/utils/atlas.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def collect_atlases(datasets, atlases, file_format, bids_filters=None):

for _atlas, atlas_info in atlas_cache.items():
if not atlas_info['labels']:
raise FileNotFoundError(f"No TSV file found for {atlas_info['image']}")
raise FileNotFoundError(f'No TSV file found for {atlas_info["image"]}')

# Check the contents of the labels file
df = pd.read_table(atlas_info['labels'])
Expand Down
24 changes: 10 additions & 14 deletions xcp_d/utils/bids.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,12 +64,10 @@ class BIDSError(ValueError):
def __init__(self, message, bids_root):
indent = 10
header = (
f'{"".join(["-"] * indent)} BIDS root folder: "{bids_root}" '
f'{"".join(["-"] * indent)}'
f'{"".join(["-"] * indent)} BIDS root folder: "{bids_root}" {"".join(["-"] * indent)}'
)
self.msg = (
f"\n{header}\n{''.join([' '] * (indent + 1))}{message}\n"
f"{''.join(['-'] * len(header))}"
f'\n{header}\n{"".join([" "] * (indent + 1))}{message}\n{"".join(["-"] * len(header))}'
)
super().__init__(self.msg)
self.bids_root = bids_root
Expand Down Expand Up @@ -132,13 +130,13 @@ def collect_participants(layout, participant_label=None, strict=False):
found_label = sorted(set(participant_label) & all_participants)
if not found_label:
raise BIDSError(
f"Could not find participants [{', '.join(participant_label)}]",
f'Could not find participants [{", ".join(participant_label)}]',
layout,
)

if notfound_label := sorted(set(participant_label) - all_participants):
exc = BIDSError(
f"Some participants were not found: {', '.join(notfound_label)}",
f'Some participants were not found: {", ".join(notfound_label)}',
layout,
)
if strict:
Expand Down Expand Up @@ -210,9 +208,9 @@ def collect_data(
[f.path for f in layout.get(extension=['.nii.gz', '.dtseries.nii'])]
)
raise FileNotFoundError(
f"No BOLD data found in allowed spaces ({', '.join(allowed_spaces)}).\n\n"
f"Query: {queries['bold']}\n\n"
f"Found files:\n\n{filenames}"
f'No BOLD data found in allowed spaces ({", ".join(allowed_spaces)}).\n\n'
f'Query: {queries["bold"]}\n\n'
f'Found files:\n\n{filenames}'
)

if file_format == 'cifti':
Expand Down Expand Up @@ -521,9 +519,7 @@ def collect_morphometry_data(layout, participant_label, bids_filters):
elif len(files) > 1:
surface_str = '\n\t'.join(files)
raise ValueError(
f'More than one {name} found.\n'
f'Surfaces found:\n\t{surface_str}\n'
f'Query: {query}'
f'More than one {name} found.\nSurfaces found:\n\t{surface_str}\nQuery: {query}'
)
else:
morphometry_files[name] = None
Expand Down Expand Up @@ -726,8 +722,8 @@ def collect_confounds(
for confound_name, confound_def in confound_spec['confounds'].items():
if confound_def['dataset'] not in layout_dict.keys():
raise ValueError(
f"Missing dataset required by confound spec: *{confound_def['dataset']}*. "
"Did you provide it with the `--datasets` flag?"
f'Missing dataset required by confound spec: *{confound_def["dataset"]}*. '
'Did you provide it with the `--datasets` flag?'
)

layout = layout_dict[confound_def['dataset']]
Expand Down
12 changes: 6 additions & 6 deletions xcp_d/utils/boilerplate.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,12 @@ def describe_censoring(*, motion_filter_type, head_radius, fd_thresh, exact_scan
desc = ''
if fd_thresh > 0:
desc += (
"Framewise displacement was calculated from the "
f"{'filtered ' if motion_filter_type else ''}motion parameters using the formula from "
f"@power_fd_dvars, with a head radius of {head_radius} mm. "
f"Volumes with {'filtered ' if motion_filter_type else ''}framewise displacement "
f"greater than {fd_thresh} mm were flagged as high-motion outliers for the sake of "
"later censoring [@power_fd_dvars]."
'Framewise displacement was calculated from the '
f'{"filtered " if motion_filter_type else ""}motion parameters using the formula from '
f'@power_fd_dvars, with a head radius of {head_radius} mm. '
f'Volumes with {"filtered " if motion_filter_type else ""}framewise displacement '
f'greater than {fd_thresh} mm were flagged as high-motion outliers for the sake of '
'later censoring [@power_fd_dvars].'
)

if exact_scans and (fd_thresh > 0):
Expand Down
3 changes: 1 addition & 2 deletions xcp_d/utils/modified_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,7 @@ def calculate_exact_scans(exact_times, scan_length, t_r, bold_file):

if non_float_times:
LOGGER.warning(
f'Non-float values {non_float_times} in {os.path.basename(bold_file)} '
'will be ignored.'
f'Non-float values {non_float_times} in {os.path.basename(bold_file)} will be ignored.'
)

exact_scans = [int(t // t_r) for t in retained_exact_times]
Expand Down
12 changes: 6 additions & 6 deletions xcp_d/utils/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -866,9 +866,9 @@ def plot_carpet(
img = nb.load(func)

if isinstance(img, nb.Cifti2Image): # CIFTI
assert (
img.nifti_header.get_intent()[0] == 'ConnDenseSeries'
), f'Not a dense timeseries: {img.nifti_header.get_intent()[0]}, {func}'
assert img.nifti_header.get_intent()[0] == 'ConnDenseSeries', (
f'Not a dense timeseries: {img.nifti_header.get_intent()[0]}, {func}'
)

# Get required information
data = img.get_fdata().T
Expand Down Expand Up @@ -917,9 +917,9 @@ def plot_carpet(
order = seg_data.argsort(kind='stable')
# Get color maps
cmap = ListedColormap([plt.get_cmap('Paired').colors[i] for i in (1, 0, 7, 3)])
assert len(cmap.colors) == len(
struct_map
), 'Mismatch between expected # of structures and colors'
assert len(cmap.colors) == len(struct_map), (
'Mismatch between expected # of structures and colors'
)
else:
# Order following segmentation labels
order = np.argsort(seg_data)[::-1]
Expand Down
Loading
Loading