1
+ import json
2
+ import re
3
+ from typing import Dict , List , Tuple , Any
4
+
1
5
from codedog .actors .reporters .base import Reporter
2
6
from codedog .localization import Localization
3
7
from codedog .models .code_review import CodeReview
@@ -7,6 +11,7 @@ class CodeReviewMarkdownReporter(Reporter, Localization):
7
11
def __init__ (self , code_reviews : list [CodeReview ], language = "en" ):
8
12
self ._code_reviews : list [CodeReview ] = code_reviews
9
13
self ._markdown : str = ""
14
+ self ._scores : List [Dict ] = []
10
15
11
16
super ().__init__ (language = language )
12
17
@@ -16,17 +21,169 @@ def report(self) -> str:
16
21
17
22
return self ._markdown
18
23
24
+ def _extract_scores (self , review_text : str , file_name : str ) -> Dict [str , Any ]:
25
+ """Extract scores from the review text using a simple format."""
26
+ # Default empty score data
27
+ default_scores = {
28
+ "file" : file_name ,
29
+ "scores" : {
30
+ "correctness" : 0 ,
31
+ "readability" : 0 ,
32
+ "maintainability" : 0 ,
33
+ "standards_compliance" : 0 ,
34
+ "performance" : 0 ,
35
+ "security" : 0 ,
36
+ "overall" : 0
37
+ }
38
+ }
39
+
40
+ try :
41
+ # Look for the scores section
42
+ scores_section = re .search (r'#{1,3}\s*SCORES:\s*([\s\S]*?)(?=#{1,3}|$)' , review_text )
43
+ if not scores_section :
44
+ print (f"No scores section found for { file_name } " )
45
+ return default_scores
46
+
47
+ scores_text = scores_section .group (1 )
48
+
49
+ # Extract individual scores
50
+ correctness = self ._extract_score (scores_text , "Correctness" )
51
+ readability = self ._extract_score (scores_text , "Readability" )
52
+ maintainability = self ._extract_score (scores_text , "Maintainability" )
53
+ standards = self ._extract_score (scores_text , "Standards Compliance" )
54
+ performance = self ._extract_score (scores_text , "Performance" )
55
+ security = self ._extract_score (scores_text , "Security" )
56
+ overall = self ._extract_score (scores_text , "Overall" )
57
+
58
+ # Update scores if found
59
+ if any ([correctness , readability , maintainability , standards , performance , security , overall ]):
60
+ return {
61
+ "file" : file_name ,
62
+ "scores" : {
63
+ "correctness" : correctness or 0 ,
64
+ "readability" : readability or 0 ,
65
+ "maintainability" : maintainability or 0 ,
66
+ "standards_compliance" : standards or 0 ,
67
+ "performance" : performance or 0 ,
68
+ "security" : security or 0 ,
69
+ "overall" : overall or 0
70
+ }
71
+ }
72
+
73
+ except Exception as e :
74
+ print (f"Error extracting scores from review for { file_name } : { e } " )
75
+
76
+ return default_scores
77
+
78
+ def _extract_score (self , text : str , dimension : str ) -> float :
79
+ """Extract a score for a specific dimension from text."""
80
+ try :
81
+ # Find patterns like "Correctness: 4.5 /5" or "- Readability: 3.8/5"
82
+ pattern = rf'[-\s]*{ dimension } :\s*(\d+(?:\.\d+)?)\s*\/?5'
83
+ match = re .search (pattern , text , re .IGNORECASE )
84
+ if match :
85
+ return float (match .group (1 ))
86
+ except Exception as e :
87
+ print (f"Error extracting { dimension } score: { e } " )
88
+ return 0
89
+
90
+ def _calculate_average_scores (self ) -> Dict :
91
+ """Calculate the average scores across all files."""
92
+ if not self ._scores :
93
+ return {
94
+ "avg_correctness" : 0 ,
95
+ "avg_readability" : 0 ,
96
+ "avg_maintainability" : 0 ,
97
+ "avg_standards" : 0 ,
98
+ "avg_performance" : 0 ,
99
+ "avg_security" : 0 ,
100
+ "avg_overall" : 0
101
+ }
102
+
103
+ total_files = len (self ._scores )
104
+ avg_scores = {
105
+ "avg_correctness" : sum (s ["scores" ]["correctness" ] for s in self ._scores ) / total_files ,
106
+ "avg_readability" : sum (s ["scores" ]["readability" ] for s in self ._scores ) / total_files ,
107
+ "avg_maintainability" : sum (s ["scores" ]["maintainability" ] for s in self ._scores ) / total_files ,
108
+ "avg_standards" : sum (s ["scores" ]["standards_compliance" ] for s in self ._scores ) / total_files ,
109
+ "avg_performance" : sum (s ["scores" ]["performance" ] for s in self ._scores ) / total_files ,
110
+ "avg_security" : sum (s ["scores" ]["security" ] for s in self ._scores ) / total_files ,
111
+ "avg_overall" : sum (s ["scores" ]["overall" ] for s in self ._scores ) / total_files
112
+ }
113
+
114
+ return avg_scores
115
+
116
+ def _get_quality_assessment (self , avg_overall : float ) -> str :
117
+ """Generate a quality assessment based on the average overall score."""
118
+ if avg_overall >= 4.5 :
119
+ return "Excellent code quality. The PR demonstrates outstanding adherence to best practices and coding standards."
120
+ elif avg_overall >= 4.0 :
121
+ return "Very good code quality. The PR shows strong adherence to standards with only minor improvement opportunities."
122
+ elif avg_overall >= 3.5 :
123
+ return "Good code quality. The PR meets most standards but has some areas for improvement."
124
+ elif avg_overall >= 3.0 :
125
+ return "Satisfactory code quality. The PR is acceptable but has several areas that could be improved."
126
+ elif avg_overall >= 2.0 :
127
+ return "Needs improvement. The PR has significant issues that should be addressed before merging."
128
+ else :
129
+ return "Poor code quality. The PR has major issues that must be fixed before it can be accepted."
130
+
131
+ def _generate_summary_table (self ) -> str :
132
+ """Generate a summary table of all file scores."""
133
+ if not self ._scores :
134
+ return ""
135
+
136
+ file_score_rows = []
137
+ for score in self ._scores :
138
+ file_name = score ["file" ]
139
+ s = score ["scores" ]
140
+ file_score_rows .append (
141
+ f"| { file_name } | { s ['correctness' ]:.2f} | { s ['readability' ]:.2f} | { s ['maintainability' ]:.2f} | "
142
+ f"{ s ['standards_compliance' ]:.2f} | { s ['performance' ]:.2f} | { s ['security' ]:.2f} | { s ['overall' ]:.2f} |"
143
+ )
144
+
145
+ avg_scores = self ._calculate_average_scores ()
146
+ quality_assessment = self ._get_quality_assessment (avg_scores ["avg_overall" ])
147
+
148
+ return self .template .PR_REVIEW_SUMMARY_TABLE .format (
149
+ file_scores = "\n " .join (file_score_rows ),
150
+ avg_correctness = avg_scores ["avg_correctness" ],
151
+ avg_readability = avg_scores ["avg_readability" ],
152
+ avg_maintainability = avg_scores ["avg_maintainability" ],
153
+ avg_standards = avg_scores ["avg_standards" ],
154
+ avg_performance = avg_scores ["avg_performance" ],
155
+ avg_security = avg_scores ["avg_security" ],
156
+ avg_overall = avg_scores ["avg_overall" ],
157
+ quality_assessment = quality_assessment
158
+ )
159
+
19
160
def _generate_report (self ):
20
161
code_review_segs = []
162
+
21
163
for code_review in self ._code_reviews :
164
+ # Extract scores if the review is not empty
165
+ if hasattr (code_review , 'review' ) and code_review .review .strip ():
166
+ file_name = code_review .file .full_name if hasattr (code_review , 'file' ) and hasattr (code_review .file , 'full_name' ) else "Unknown"
167
+ score_data = self ._extract_scores (code_review .review , file_name )
168
+ self ._scores .append (score_data )
169
+
170
+ # Add the review text (without modification)
22
171
code_review_segs .append (
23
172
self .template .REPORT_CODE_REVIEW_SEGMENT .format (
24
- full_name = code_review .file .full_name ,
25
- url = code_review .file .diff_url ,
26
- review = code_review .review ,
173
+ full_name = code_review .file .full_name if hasattr ( code_review , 'file' ) and hasattr ( code_review . file , 'full_name' ) else "Unknown" ,
174
+ url = code_review .file .diff_url if hasattr ( code_review , 'file' ) and hasattr ( code_review . file , 'diff_url' ) else "#" ,
175
+ review = code_review .review if hasattr ( code_review , 'review' ) else "" ,
27
176
)
28
177
)
29
178
30
- return self .template .REPORT_CODE_REVIEW .format (
179
+ # Generate review content
180
+ review_content = self .template .REPORT_CODE_REVIEW .format (
31
181
feedback = "\n " .join (code_review_segs ) if code_review_segs else self .template .REPORT_CODE_REVIEW_NO_FEEDBACK ,
32
182
)
183
+
184
+ # Add summary table at the end if we have scores
185
+ summary_table = self ._generate_summary_table ()
186
+ if summary_table :
187
+ review_content += "\n \n " + summary_table
188
+
189
+ return review_content
0 commit comments