From f266cf75622f1ca12868000e5bc82f5d899556d8 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 29 Jul 2024 21:06:53 +0200 Subject: [PATCH] Fix type annotation in ResultsExporter class --- evap/results/exporters.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 9f054ff18..7f2ec7e0e 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -156,7 +156,7 @@ def filter_evaluations( ): results.setdefault(questionnaire_result.questionnaire.id, []).extend(question_results) used_questionnaires.add(questionnaire_result.questionnaire) - annotated_evaluation: Any = evaluation + annotated_evaluation: AnnotatedEvaluation = evaluation annotated_evaluation.course_evaluations_count = annotated_evaluation.course.evaluations.count() if annotated_evaluation.course_evaluations_count > 1: course_results_exist = True @@ -181,7 +181,7 @@ def write_headings_and_evaluation_info( contributor: UserProfile | None, degrees: Iterable[int], course_types: Iterable[int], - verbose_heading: Any, + verbose_heading: bool ) -> None: export_name = _("Evaluation") if contributor: @@ -225,27 +225,26 @@ def write_overall_results( evaluations_with_results: list[tuple[AnnotatedEvaluation, OrderedDict[int, list[QuestionResult]]]], course_results_exist: bool, ) -> None: - evaluations = [e for e, __ in evaluations_with_results] + annotated_evaluations = [e for e, __ in evaluations_with_results] self.write_cell(_("Overall Average Grade"), "bold") - averages = (distribution_to_grade(calculate_average_distribution(e)) for e in evaluations) + averages = (distribution_to_grade(calculate_average_distribution(e)) for e in annotated_evaluations) self.write_row(averages, lambda avg: self.grade_to_style(avg) if avg else "border_left_right") self.write_cell(_("Total voters/Total participants"), "bold") - voter_ratios = (f"{e.num_voters}/{e.num_participants}" for e in evaluations) + voter_ratios = (f"{e.num_voters}/{e.num_participants}" for e in annotated_evaluations) self.write_row(voter_ratios, style="total_voters") self.write_cell(_("Evaluation rate"), "bold") # round down like in progress bar participant_percentages = ( - f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%" for e in evaluations + f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%" for e in annotated_evaluations ) self.write_row(participant_percentages, style="evaluation_rate") if course_results_exist: - evaluations_as_any: list[Any] = evaluations # Only query the number of evaluations once and keep track of it here. - count_gt_1: list[bool] = [e.course_evaluations_count > 1 for e in evaluations_as_any] + count_gt_1: list[bool] = [e.course_evaluations_count > 1 for e in annotated_evaluations] # Borders only if there is a course grade below. Offset by one column self.write_empty_row_with_styles( @@ -255,12 +254,12 @@ def write_overall_results( self.write_cell(_("Evaluation weight"), "bold") weight_percentages = ( f"{e.weight_percentage}%" if gt1 else None - for e, gt1 in zip(evaluations_as_any, count_gt_1, strict=True) + for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True) ) self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default") self.write_cell(_("Course Grade"), "bold") - for evaluation, gt1 in zip(evaluations_as_any, count_gt_1, strict=True): + for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True): if not gt1: self.write_cell() continue @@ -330,7 +329,7 @@ def export_impl( include_not_enough_voters: bool = False, include_unpublished: bool = False, contributor: UserProfile | None = None, - verbose_heading: bool = False, + verbose_heading: bool = True, ): # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0