Skip to content

Commit

Permalink
add CBPE MC AUROC class checks
Browse files Browse the repository at this point in the history
  • Loading branch information
nikml committed Jul 9, 2024
1 parent 03c580c commit b441a07
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -85,17 +85,8 @@ def __init__(
components=[("ROC AUC", "roc_auc")],
)
self.y_pred_proba: Dict[str, str]
# Move check here, since we have all the info we need for checking.
if not isinstance(self.y_pred_proba, Dict):
raise InvalidArgumentsException(
f"'y_pred_proba' is of type {type(self.y_pred_proba)}\n"
"multiclass use cases require 'y_pred_proba' to be a dictionary mapping classes to columns."
)
# classes and class probability columns
self.classes: List[str] = [""]
self.class_probability_columns: List[str]

# sampling error
self._sampling_error_components: List[Tuple] = []

def __str__(self):
Expand Down Expand Up @@ -134,6 +125,13 @@ def _fit(self, reference_data: pd.DataFrame):
)

def _calculate(self, data: pd.DataFrame):
if not isinstance(self.y_pred_proba, Dict):
raise InvalidArgumentsException(
f"'y_pred_proba' is of type {type(self.y_pred_proba)}\n"
f"multiclass use cases require 'y_pred_proba' to "
"be a dictionary mapping classes to columns."
)

_list_missing([self.y_true] + self.class_probability_columns, data)
data, empty = common_nan_removal(
data[[self.y_true] + self.class_probability_columns], [self.y_true] + self.class_probability_columns
Expand Down
7 changes: 4 additions & 3 deletions nannyml/performance_estimation/confidence_based/cbpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,11 +541,12 @@ def _fit_calibrators(
noop_calibrator = NoopCalibrator()

for clazz, y_true, y_pred_proba in _get_class_splits(reference_data, y_true_col, y_pred_proba_col):
_calibrator = copy.deepcopy(calibrator)
if not needs_calibration(np.asarray(y_true), np.asarray(y_pred_proba), calibrator):
calibrator = noop_calibrator
_calibrator = noop_calibrator

calibrator.fit(y_pred_proba, y_true)
fitted_calibrators[clazz] = copy.deepcopy(calibrator)
_calibrator.fit(y_pred_proba, y_true)
fitted_calibrators[clazz] = copy.deepcopy(_calibrator)

return fitted_calibrators

Expand Down
58 changes: 33 additions & 25 deletions nannyml/performance_estimation/confidence_based/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2327,36 +2327,43 @@ def __init__(
threshold=threshold,
components=[('ROC AUC', 'roc_auc')],
)
# FIXME: Should we check the y_pred_proba argument here to ensure it's a dict?
self.y_pred_proba: Dict[str, str]

# sampling error
self.classes: List[str] = [""]
self.class_probability_columns: List[str]
self.class_uncalibrated_y_pred_proba_columns: List[str]
self._sampling_error_components: List[Tuple] = []

def _fit(self, reference_data: pd.DataFrame):
classes = class_labels(self.y_pred_proba)
class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba)
class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns]
_list_missing([self.y_true] + class_uncalibrated_y_pred_proba_columns, list(reference_data.columns))
self.classes = class_labels(self.y_pred_proba)
self.class_probability_columns = [self.y_pred_proba[clazz] for clazz in self.classes]
self.class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in self.class_probability_columns]
_list_missing([self.y_true] + self.class_uncalibrated_y_pred_proba_columns, list(reference_data.columns))
# filter nans here
reference_data, empty = common_nan_removal(
reference_data[[self.y_true] + class_uncalibrated_y_pred_proba_columns],
[self.y_true] + class_uncalibrated_y_pred_proba_columns,
reference_data[[self.y_true] + self.class_uncalibrated_y_pred_proba_columns],
[self.y_true] + self.class_uncalibrated_y_pred_proba_columns,
)
if empty:
self._sampling_error_components = [(np.NaN, 0) for class_col in class_y_pred_proba_columns]
self._sampling_error_components = [(np.NaN, 0) for clasz in self.classes]
else:
# test if reference data are represented correctly
observed_classes = set(reference_data[self.y_true].unique())
if not observed_classes == set(self.classes):
self._logger.error(
"The specified classification classes are not the same as the classes observed in the reference"
"targets."
)
raise InvalidArgumentsException(
"y_pred_proba class and class probabilities dictionary does not match reference data.")
# sampling error
binarized_y_true = list(label_binarize(reference_data[self.y_true], classes=classes).T)
y_pred_proba = [reference_data['uncalibrated_' + self.y_pred_proba[clazz]].T for clazz in classes]
binarized_y_true = list(label_binarize(reference_data[self.y_true], classes=self.classes).T)
y_pred_proba = [reference_data['uncalibrated_' + self.y_pred_proba[clazz]].T for clazz in self.classes]
self._sampling_error_components = mse.auroc_sampling_error_components(
y_true_reference=binarized_y_true, y_pred_proba_reference=y_pred_proba
)

def _estimate(self, data: pd.DataFrame):
class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba)
class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns]
needed_columns = class_y_pred_proba_columns + class_uncalibrated_y_pred_proba_columns
needed_columns = self.class_probability_columns + self.class_uncalibrated_y_pred_proba_columns
try:
_list_missing(needed_columns, list(data.columns))
except InvalidArgumentsException as ex:
Expand Down Expand Up @@ -2390,9 +2397,7 @@ def _estimate(self, data: pd.DataFrame):
return multiclass_roc_auc

def _sampling_error(self, data: pd.DataFrame) -> float:
class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba)
class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns]
needed_columns = class_y_pred_proba_columns + class_uncalibrated_y_pred_proba_columns
needed_columns = self.class_probability_columns + self.class_uncalibrated_y_pred_proba_columns
_list_missing(needed_columns, data)
data, empty = common_nan_removal(data[needed_columns], needed_columns)
if empty:
Expand All @@ -2404,25 +2409,28 @@ def _sampling_error(self, data: pd.DataFrame) -> float:
return mse.auroc_sampling_error(self._sampling_error_components, data)

def _realized_performance(self, data: pd.DataFrame) -> float:
class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba)
class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns]
try:
_list_missing([self.y_true] + class_uncalibrated_y_pred_proba_columns, data)
_list_missing([self.y_true] + self.class_uncalibrated_y_pred_proba_columns, data)
except InvalidArgumentsException as ex:
if "missing required columns" in str(ex):
self._logger.debug(str(ex))
return np.NaN
else:
raise ex

data, empty = common_nan_removal(data, [self.y_true] + class_uncalibrated_y_pred_proba_columns)
data, empty = common_nan_removal(data, [self.y_true] + self.class_uncalibrated_y_pred_proba_columns)
if empty:
warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.")
return np.NaN

y_true = data[self.y_true]
if y_true.nunique() <= 1:
warnings.warn("Too few unique values present in 'y_true', returning NaN as realized ROC-AUC.")
if set(y_true.unique()) != set(self.classes):
_message = (
f"'{self.y_true}' does not contain all reported classes, cannot calculate {self.display_name}. "
"Returning NaN."
)
warnings.warn(_message)
self._logger.warning(_message)
return np.NaN

_, y_pred_probas, labels = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba)
Expand Down Expand Up @@ -3158,7 +3166,7 @@ def _multi_class_confusion_matrix_realized_performance(self, data: pd.DataFrame)
warnings.warn(
f"Too few unique values present in 'y_pred', returning NaN as realized {self.display_name} score."
)
return nan_array
return nan_array

cm = confusion_matrix(
data[self.y_true], data[self.y_pred], labels=self.classes, normalize=self.normalize_confusion_matrix
Expand Down
56 changes: 56 additions & 0 deletions tests/performance_estimation/CBPE/test_cbpe_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import pandas as pd
import numpy as np
import pytest
from logging import getLogger

from nannyml.chunk import DefaultChunker, SizeBasedChunker
from nannyml.datasets import (
Expand All @@ -21,6 +22,9 @@
BinaryClassificationSpecificity,
)
from nannyml.thresholds import ConstantThreshold
from nannyml.exceptions import InvalidArgumentsException

LOGGER = getLogger(__name__)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -3580,3 +3584,55 @@ def test_cbpe_for_multiclass_classification_cm_with_nans(calculator_opts, realiz
'realized_true_upmarket_card_pred_upmarket_card',
]
pd.testing.assert_frame_equal(realized, sut)


def test_auroc_errors_out_when_not_all_classes_are_represented_reference():
reference, _, _ = load_synthetic_multiclass_classification_dataset()
reference['y_pred_proba_clazz'] = reference['y_pred_proba_upmarket_card']
calc = CBPE(
y_pred_proba={
'prepaid_card': 'y_pred_proba_prepaid_card',
'highstreet_card': 'y_pred_proba_highstreet_card',
'upmarket_card': 'y_pred_proba_upmarket_card',
'clazz': 'y_pred_proba_clazz'
},
y_pred='y_pred',
y_true='y_true',
metrics=['roc_auc'],
problem_type='classification_multiclass',
)
expected_exc_test = "y_pred_proba class and class probabilities dictionary does not match reference data."
with pytest.raises(InvalidArgumentsException, match=expected_exc_test):
calc.fit(reference)


def test_auroc_errors_out_when_not_all_classes_are_represented_chunk(caplog):
LOGGER.info("testing test_auroc_errors_out_when_not_all_classes_are_represented_chunk")
reference, monitored, targets = load_synthetic_multiclass_classification_dataset()
monitored = monitored.merge(targets)
# Uncalibrated probabilities need to sum up to 1 per row.
reference['y_pred_proba_clazz'] = 0.1
reference['y_pred_proba_prepaid_card'] = 0.9 * reference['y_pred_proba_prepaid_card']
reference['y_pred_proba_highstreet_card'] = 0.9 * reference['y_pred_proba_highstreet_card']
reference['y_pred_proba_upmarket_card'] = 0.9 * reference['y_pred_proba_upmarket_card']
monitored['y_pred_proba_clazz'] = 0.1
monitored['y_pred_proba_prepaid_card'] = 0.9 * monitored['y_pred_proba_prepaid_card']
monitored['y_pred_proba_highstreet_card'] = 0.9 * monitored['y_pred_proba_highstreet_card']
monitored['y_pred_proba_upmarket_card'] = 0.9 * monitored['y_pred_proba_upmarket_card']
reference['y_true'].iloc[-1000:] = 'clazz'
calc = CBPE(
y_pred_proba={
'prepaid_card': 'y_pred_proba_prepaid_card',
'highstreet_card': 'y_pred_proba_highstreet_card',
'upmarket_card': 'y_pred_proba_upmarket_card',
'clazz': 'y_pred_proba_clazz'
},
y_pred='y_pred',
y_true='y_true',
metrics=['roc_auc'],
problem_type='classification_multiclass',
)
calc.fit(reference)
_ = calc.estimate(monitored)
expected_exc_test = "does not contain all reported classes, cannot calculate"
assert expected_exc_test in caplog.text

0 comments on commit b441a07

Please sign in to comment.