diff --git a/requirements.txt b/requirements.txt index 7b472cdd..dc655937 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,8 +6,9 @@ matplotlib==3.8.4 numpy==1.26.4 opencv-contrib-python==4.9.0.80 opencv-python==4.9.0.80 -pandas==2.2.1 -rich==13.7.1 +pandas==2.2.2 +rich==13.8.0 +rich-tools==0.5.1 scipy==1.12.0 screeninfo==0.8.1 shapely==2.0.3 \ No newline at end of file diff --git a/samples/2-omr-marker/evaluation.json b/samples/2-omr-marker/evaluation.json index 0e5fe84e..3702f5f9 100644 --- a/samples/2-omr-marker/evaluation.json +++ b/samples/2-omr-marker/evaluation.json @@ -90,6 +90,7 @@ } }, "outputs_configuration": { - "should_explain_scoring": true + "should_explain_scoring": true, + "should_export_explanation_csv": true, } } diff --git a/src/algorithm/evaluation/config.py b/src/algorithm/evaluation/config.py index 127b140f..c29abe96 100644 --- a/src/algorithm/evaluation/config.py +++ b/src/algorithm/evaluation/config.py @@ -458,6 +458,7 @@ def __init__( self.draw_question_verdicts, self.draw_score, self.should_explain_scoring, + self.should_export_explanation_csv, ) = map( outputs_configuration.get, [ @@ -466,6 +467,7 @@ def __init__( "draw_question_verdicts", "draw_score", "should_explain_scoring", + "should_export_explanation_csv", ], ) if self.draw_question_verdicts["enabled"]: @@ -1014,6 +1016,12 @@ def conditionally_print_explanation(self): def get_should_explain_scoring(self): return self.should_explain_scoring + def get_should_export_explanation_csv(self): + return self.should_export_explanation_csv + + def get_explanation_table(self): + return self.explanation_table + def get_formatted_answers_summary(self, answers_summary_format_string=None): if answers_summary_format_string is None: answers_summary_format_string = self.draw_answers_summary[ diff --git a/src/algorithm/template/template.py b/src/algorithm/template/template.py index fd92b16f..5267b06f 100644 --- a/src/algorithm/template/template.py +++ b/src/algorithm/template/template.py @@ -122,6 +122,9 @@ def get_multi_marked_dir(self): def get_errors_dir(self): return self.directory_handler.path_utils.errors_dir + def get_evaluations_dir(self): + return self.directory_handler.path_utils.evaluations_dir + def read_omr_response(self, input_gray_image, colored_image, file_path): # Convert posix path to string file_path = str(file_path) diff --git a/src/entry.py b/src/entry.py index e43e1aac..2a3fcde7 100644 --- a/src/entry.py +++ b/src/entry.py @@ -5,6 +5,7 @@ import pandas as pd from rich.table import Table +from rich_tools import table_to_df from src.algorithm.evaluation.config import EvaluationConfig from src.algorithm.evaluation.evaluation import evaluate_concatenated_response @@ -326,6 +327,15 @@ def process_directory_files( logger.info( f"(/{files_counter}) Graded with score: {round(score, 2)}\t {default_answers_summary} \t file: '{file_id}'" ) + if evaluation_config_for_response.get_should_export_explanation_csv(): + explanation_table = evaluation_config_for_response.get_explanation_table() + explanation_table = table_to_df(explanation_table) + explanation_table.to_csv( + template.get_evaluations_dir().joinpath(file_name + ".csv"), + quoting=QUOTE_NONNUMERIC, + index=False, + ) + else: logger.info(f"(/{files_counter}) Processed file: '{file_id}'") diff --git a/src/schemas/defaults/evaluation.py b/src/schemas/defaults/evaluation.py index 677b2225..063108c3 100644 --- a/src/schemas/defaults/evaluation.py +++ b/src/schemas/defaults/evaluation.py @@ -9,6 +9,7 @@ "conditional_sets": [], "outputs_configuration": { "should_explain_scoring": False, + "should_export_explanation_csv": False, "draw_score": { "enabled": False, "position": [200, 200], diff --git a/src/schemas/evaluation_schema.py b/src/schemas/evaluation_schema.py index 716067a5..a518c83a 100644 --- a/src/schemas/evaluation_schema.py +++ b/src/schemas/evaluation_schema.py @@ -220,6 +220,10 @@ "description": "Whether to print the table explaining question-wise verdicts", "type": "boolean", }, + "should_export_explanation_csv": { + "description": "Whether to export the explanation of evaluation results as a CSV file", + "type": "boolean", + }, "draw_score": { "description": "The configuration for drawing the final score", "type": "object", diff --git a/src/utils/file.py b/src/utils/file.py index 1ced89f2..7425c0e5 100644 --- a/src/utils/file.py +++ b/src/utils/file.py @@ -62,6 +62,7 @@ def __init__(self, output_dir): self.manual_dir = output_dir.joinpath("Manual") self.errors_dir = self.manual_dir.joinpath("ErrorFiles") self.multi_marked_dir = self.manual_dir.joinpath("MultiMarkedFiles") + self.evaluations_dir = output_dir.joinpath("Evaluations") self.debug_dir = output_dir.joinpath("Debug") def create_output_directories(self): @@ -96,6 +97,7 @@ def create_output_directories(self): for save_output_dir in [ self.results_dir, self.image_metrics_dir, + self.evaluations_dir, ]: if not os.path.exists(save_output_dir): logger.info(f"Created : {save_output_dir}")