diff --git a/evap/evaluation/migrations/0137_use_more_database_constraints.py b/evap/evaluation/migrations/0137_use_more_database_constraints.py new file mode 100644 index 0000000000..6b57aa9913 --- /dev/null +++ b/evap/evaluation/migrations/0137_use_more_database_constraints.py @@ -0,0 +1,55 @@ +# Generated by Django 4.2.2 on 2023-07-03 22:18 + +from django.db import migrations, models +import django.db.models.functions.datetime + + +class Migration(migrations.Migration): + dependencies = [ + ("evaluation", "0136_alter_userprofile_first_name_chosen_and_more"), + ] + + operations = [ + migrations.AddConstraint( + model_name="evaluation", + constraint=models.CheckConstraint( + check=models.Q( + ( + "vote_end_date__gte", + django.db.models.functions.datetime.TruncDate(models.F("vote_start_datetime")), + ) + ), + name="check_evaluation_start_before_end", + ), + ), + migrations.AddConstraint( + model_name="evaluation", + constraint=models.CheckConstraint( + check=models.Q( + ("_participant_count__isnull", True), + ("_voter_count__isnull", True), + _connector="XOR", + _negated=True, + ), + name="check_evaluation_participant_count_and_voter_count_both_set_or_not_set", + ), + ), + migrations.AddConstraint( + model_name="question", + constraint=models.CheckConstraint( + check=models.Q( + models.Q(("type", 0), ("type", 5), _connector="OR", _negated=True), + models.Q(("allows_additional_textanswers", True), _negated=True), + _connector="OR", + ), + name="check_evaluation_textanswer_or_heading_question_has_no_additional_textanswers", + ), + ), + migrations.AddConstraint( + model_name="textanswer", + constraint=models.CheckConstraint( + check=models.Q(("answer", models.F("original_answer")), _negated=True), + name="check_evaluation_text_answer_is_modified", + ), + ), + ] diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index eb3f759004..564517dbda 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -16,8 +16,8 @@ from django.core.exceptions import ValidationError from django.core.mail import EmailMultiAlternatives from django.db import IntegrityError, models, transaction -from django.db.models import CheckConstraint, Count, Manager, OuterRef, Q, Subquery, Value -from django.db.models.functions import Coalesce, Lower, NullIf +from django.db.models import CheckConstraint, Count, F, Manager, OuterRef, Q, Subquery, Value +from django.db.models.functions import Coalesce, Lower, NullIf, TruncDate from django.dispatch import Signal, receiver from django.template import Context, Template from django.template.defaultfilters import linebreaksbr @@ -457,6 +457,16 @@ class Meta: ] verbose_name = _("evaluation") verbose_name_plural = _("evaluations") + constraints = [ + CheckConstraint( + check=Q(vote_end_date__gte=TruncDate(F("vote_start_datetime"))), + name="check_evaluation_start_before_end", + ), + CheckConstraint( + check=~(Q(_participant_count__isnull=True) ^ Q(_voter_count__isnull=True)), + name="check_evaluation_participant_count_and_voter_count_both_set_or_not_set", + ), + ] def __str__(self): return self.full_name @@ -469,8 +479,6 @@ def save(self, *args, **kw): self.contributions.create(contributor=None) del self.general_contribution # invalidate cached property - assert self.vote_end_date >= self.vote_start_datetime.date() - if hasattr(self, "state_change_source"): def state_changed_to(self, state_set): @@ -1077,9 +1085,7 @@ def remove_answers_to_questionnaires(self, questionnaires): RatingAnswerCounter.objects.filter(contribution=self, question__questionnaire__in=questionnaires).delete() -class Question(models.Model): - """A question including a type.""" - +class QuestionType: TEXT = 0 LIKERT = 1 GRADE = 2 @@ -1092,29 +1098,34 @@ class Question(models.Model): POSITIVE_YES_NO = 3 NEGATIVE_YES_NO = 4 HEADING = 5 + + +class Question(models.Model): + """A question including a type.""" + QUESTION_TYPES = ( - (_("Text"), ((TEXT, _("Text question")),)), - (_("Unipolar Likert"), ((LIKERT, _("Agreement question")),)), - (_("Grade"), ((GRADE, _("Grade question")),)), + (_("Text"), ((QuestionType.TEXT, _("Text question")),)), + (_("Unipolar Likert"), ((QuestionType.LIKERT, _("Agreement question")),)), + (_("Grade"), ((QuestionType.GRADE, _("Grade question")),)), ( _("Bipolar Likert"), ( - (EASY_DIFFICULT, _("Easy-difficult question")), - (FEW_MANY, _("Few-many question")), - (LITTLE_MUCH, _("Little-much question")), - (SMALL_LARGE, _("Small-large question")), - (SLOW_FAST, _("Slow-fast question")), - (SHORT_LONG, _("Short-long question")), + (QuestionType.EASY_DIFFICULT, _("Easy-difficult question")), + (QuestionType.FEW_MANY, _("Few-many question")), + (QuestionType.LITTLE_MUCH, _("Little-much question")), + (QuestionType.SMALL_LARGE, _("Small-large question")), + (QuestionType.SLOW_FAST, _("Slow-fast question")), + (QuestionType.SHORT_LONG, _("Short-long question")), ), ), ( _("Yes-no"), ( - (POSITIVE_YES_NO, _("Positive yes-no question")), - (NEGATIVE_YES_NO, _("Negative yes-no question")), + (QuestionType.POSITIVE_YES_NO, _("Positive yes-no question")), + (QuestionType.NEGATIVE_YES_NO, _("Negative yes-no question")), ), ), - (_("Layout"), ((HEADING, _("Heading")),)), + (_("Layout"), ((QuestionType.HEADING, _("Heading")),)), ) order = models.IntegerField(verbose_name=_("question order"), default=-1) @@ -1130,9 +1141,16 @@ class Meta: ordering = ["order"] verbose_name = _("question") verbose_name_plural = _("questions") + constraints = [ + CheckConstraint( + check=~(Q(type=QuestionType.TEXT) | Q(type=QuestionType.HEADING)) + | ~Q(allows_additional_textanswers=True), + name="check_evaluation_textanswer_or_heading_question_has_no_additional_textanswers", + ) + ] def save(self, *args, **kwargs): - if self.type in [Question.TEXT, Question.HEADING]: + if self.type in [QuestionType.TEXT, QuestionType.HEADING]: self.allows_additional_textanswers = False if "update_fields" in kwargs: kwargs["update_fields"] = {"allows_additional_textanswers"}.union(kwargs["update_fields"]) @@ -1150,34 +1168,34 @@ def answer_class(self): @property def is_likert_question(self): - return self.type == self.LIKERT + return self.type == QuestionType.LIKERT @property def is_bipolar_likert_question(self): return self.type in ( - self.EASY_DIFFICULT, - self.FEW_MANY, - self.LITTLE_MUCH, - self.SLOW_FAST, - self.SMALL_LARGE, - self.SHORT_LONG, + QuestionType.EASY_DIFFICULT, + QuestionType.FEW_MANY, + QuestionType.LITTLE_MUCH, + QuestionType.SLOW_FAST, + QuestionType.SMALL_LARGE, + QuestionType.SHORT_LONG, ) @property def is_text_question(self): - return self.type == self.TEXT + return self.type == QuestionType.TEXT @property def is_grade_question(self): - return self.type == self.GRADE + return self.type == QuestionType.GRADE @property def is_positive_yes_no_question(self): - return self.type == self.POSITIVE_YES_NO + return self.type == QuestionType.POSITIVE_YES_NO @property def is_negative_yes_no_question(self): - return self.type == self.NEGATIVE_YES_NO + return self.type == QuestionType.NEGATIVE_YES_NO @property def is_yes_no_question(self): @@ -1198,7 +1216,7 @@ def is_non_grade_rating_question(self): @property def is_heading_question(self): - return self.type == self.HEADING + return self.type == QuestionType.HEADING @property def can_have_textanswers(self): @@ -1252,7 +1270,7 @@ def can_have_textanswers(self): } CHOICES: Dict[int, Union[Choices, BipolarChoices]] = { - Question.LIKERT: Choices( + QuestionType.LIKERT: Choices( names=[ _("Strongly\nagree"), _("Agree"), @@ -1263,7 +1281,7 @@ def can_have_textanswers(self): ], **BASE_UNIPOLAR_CHOICES, # type: ignore ), - Question.GRADE: Choices( + QuestionType.GRADE: Choices( names=[ "1", "2", @@ -1274,7 +1292,7 @@ def can_have_textanswers(self): ], **BASE_UNIPOLAR_CHOICES, # type: ignore ), - Question.EASY_DIFFICULT: BipolarChoices( + QuestionType.EASY_DIFFICULT: BipolarChoices( minus_name=_("Easy"), plus_name=_("Difficult"), names=[ @@ -1289,7 +1307,7 @@ def can_have_textanswers(self): ], **BASE_BIPOLAR_CHOICES, # type: ignore ), - Question.FEW_MANY: BipolarChoices( + QuestionType.FEW_MANY: BipolarChoices( minus_name=_("Few"), plus_name=_("Many"), names=[ @@ -1304,7 +1322,7 @@ def can_have_textanswers(self): ], **BASE_BIPOLAR_CHOICES, # type: ignore ), - Question.LITTLE_MUCH: BipolarChoices( + QuestionType.LITTLE_MUCH: BipolarChoices( minus_name=_("Little"), plus_name=_("Much"), names=[ @@ -1319,7 +1337,7 @@ def can_have_textanswers(self): ], **BASE_BIPOLAR_CHOICES, # type: ignore ), - Question.SMALL_LARGE: BipolarChoices( + QuestionType.SMALL_LARGE: BipolarChoices( minus_name=_("Small"), plus_name=_("Large"), names=[ @@ -1334,7 +1352,7 @@ def can_have_textanswers(self): ], **BASE_BIPOLAR_CHOICES, # type: ignore ), - Question.SLOW_FAST: BipolarChoices( + QuestionType.SLOW_FAST: BipolarChoices( minus_name=_("Slow"), plus_name=_("Fast"), names=[ @@ -1349,7 +1367,7 @@ def can_have_textanswers(self): ], **BASE_BIPOLAR_CHOICES, # type: ignore ), - Question.SHORT_LONG: BipolarChoices( + QuestionType.SHORT_LONG: BipolarChoices( minus_name=_("Short"), plus_name=_("Long"), names=[ @@ -1364,7 +1382,7 @@ def can_have_textanswers(self): ], **BASE_BIPOLAR_CHOICES, # type: ignore ), - Question.POSITIVE_YES_NO: Choices( + QuestionType.POSITIVE_YES_NO: Choices( names=[ _("Yes"), _("No"), @@ -1372,7 +1390,7 @@ def can_have_textanswers(self): ], **BASE_YES_NO_CHOICES, # type: ignore ), - Question.NEGATIVE_YES_NO: Choices( + QuestionType.NEGATIVE_YES_NO: Choices( names=[ _("No"), _("Yes"), @@ -1454,6 +1472,9 @@ class Meta: ordering = ["id"] verbose_name = _("text answer") verbose_name_plural = _("text answers") + constraints = [ + CheckConstraint(check=~Q(answer=F("original_answer")), name="check_evaluation_text_answer_is_modified") + ] @property def will_be_deleted(self): @@ -1483,7 +1504,6 @@ def is_reviewed(self): def save(self, *args, **kwargs): super().save(*args, **kwargs) - assert self.answer != self.original_answer class FaqSection(models.Model): diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py index 5d206d97a0..bc3a726242 100644 --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -18,6 +18,7 @@ NotArchiveable, Question, Questionnaire, + QuestionType, RatingAnswerCounter, Semester, TextAnswer, @@ -296,7 +297,7 @@ def test_second_vote_sets_can_publish_text_results_to_true(self): ) evaluation.save() top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) self.assertFalse(evaluation.can_publish_text_results) @@ -316,7 +317,7 @@ def test_textanswers_get_deleted_if_they_cannot_be_published(self): can_publish_text_results=False, ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution) @@ -335,7 +336,7 @@ def test_textanswers_do_not_get_deleted_if_they_can_be_published(self): can_publish_text_results=True, ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) baker.make(TextAnswer, question=question, contribution=evaluation.general_contribution) @@ -354,7 +355,7 @@ def test_textanswers_to_delete_get_deleted_on_publish(self): can_publish_text_results=True, ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) baker.make( TextAnswer, @@ -388,7 +389,7 @@ def test_original_textanswers_get_deleted_on_publish(self): can_publish_text_results=True, ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - question = baker.make(Question, type=Question.TEXT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.TEXT, questionnaire=questionnaire) evaluation.general_contribution.questionnaires.set([questionnaire]) baker.make( TextAnswer, diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index aaa36bab6f..5d5bafc815 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -14,6 +14,7 @@ Evaluation, Question, Questionnaire, + QuestionType, Semester, TextAnswer, UserProfile, @@ -55,10 +56,10 @@ def test_questionnaire_ordering(self): questionnaire_3 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.BOTTOM) questionnaire_4 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.BOTTOM) - question_1 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_1) - question_2 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_2) - question_3 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_3) - question_4 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_4) + question_1 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_1) + question_2 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_2) + question_3 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_3) + question_4 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire_4) evaluation.general_contribution.questionnaires.set( [questionnaire_1, questionnaire_2, questionnaire_3, questionnaire_4] @@ -107,10 +108,10 @@ def test_heading_question_filtering(self): evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)]) questionnaire = baker.make(Questionnaire) - baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0) - heading_question = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1) - likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) - baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3) + baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=0) + heading_question = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=2) + baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=3) contribution = baker.make( Contribution, evaluation=evaluation, questionnaires=[questionnaire], contributor=contributor @@ -201,7 +202,7 @@ def test_course_type_ordering(self): cache_results(evaluation_2) questionnaire = baker.make(Questionnaire) - question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire) evaluation_1.general_contribution.questionnaires.set([questionnaire]) make_rating_answer_counters(question, evaluation_1.general_contribution) @@ -358,9 +359,9 @@ def test_exclude_used_but_unanswered_questionnaires(self): course__degrees=[degree], ) used_questionnaire = baker.make(Questionnaire) - used_question = baker.make(Question, type=Question.LIKERT, questionnaire=used_questionnaire) + used_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=used_questionnaire) unused_questionnaire = baker.make(Questionnaire) - unused_question = baker.make(Question, type=Question.LIKERT, questionnaire=unused_questionnaire) + unused_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=unused_questionnaire) evaluation.general_contribution.questionnaires.set([used_questionnaire, unused_questionnaire]) make_rating_answer_counters(used_question, evaluation.general_contribution) @@ -412,8 +413,8 @@ def test_correct_grades_and_bottom_numbers(self): ) questionnaire1 = baker.make(Questionnaire, order=1) questionnaire2 = baker.make(Questionnaire, order=2) - question1 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire1) - question2 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire2) + question1 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire1) + question2 = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire2) make_rating_answer_counters(question1, evaluation.general_contribution, [1, 0, 1, 0, 0]) make_rating_answer_counters(question2, evaluation.general_contribution, [0, 1, 0, 1, 0]) @@ -447,7 +448,7 @@ def test_course_grade(self): expected_average = 2.0 questionnaire = baker.make(Questionnaire) - question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire) for grades, e in zip(grades_per_eval, evaluations): make_rating_answer_counters(question, e.general_contribution, grades) e.general_contribution.questionnaires.set([questionnaire]) @@ -469,7 +470,7 @@ def test_yes_no_question_result(self): state=Evaluation.State.PUBLISHED, ) questionnaire = baker.make(Questionnaire) - question = baker.make(Question, type=Question.POSITIVE_YES_NO, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.POSITIVE_YES_NO, questionnaire=questionnaire) make_rating_answer_counters(question, evaluation.general_contribution, [4, 2]) @@ -503,8 +504,8 @@ def test_contributor_result_export(self): general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) - general_question = baker.make(Question, type=Question.LIKERT, questionnaire=general_questionnaire) - contributor_question = baker.make(Question, type=Question.LIKERT, questionnaire=contributor_questionnaire) + general_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=general_questionnaire) + contributor_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=contributor_questionnaire) evaluation_1.general_contribution.questionnaires.set([general_questionnaire]) make_rating_answer_counters(general_question, evaluation_1.general_contribution, [2, 0, 0, 0, 0]) @@ -547,9 +548,10 @@ def test_text_answer_export(self): questions = baker.make( Question, questionnaire__type=iter(Questionnaire.Type.values), - type=Question.TEXT, + type=QuestionType.TEXT, _quantity=len(Questionnaire.Type.values), _bulk_create=True, + allows_additional_textanswers=False, ) baker.make( diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py index b2b95a3c70..bc14153134 100644 --- a/evap/results/tests/test_tools.py +++ b/evap/results/tests/test_tools.py @@ -12,6 +12,7 @@ Evaluation, Question, Questionnaire, + QuestionType, RatingAnswerCounter, TextAnswer, UserProfile, @@ -82,7 +83,7 @@ def test_calculation_unipolar_results(self): voters=[student, contributor1], ) questionnaire = baker.make(Questionnaire) - question = baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + question = baker.make(Question, questionnaire=questionnaire, type=QuestionType.GRADE) contribution1 = baker.make( Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire] ) @@ -112,7 +113,7 @@ def test_calculation_bipolar_results(self): voters=[student, contributor1], ) questionnaire = baker.make(Questionnaire) - question = baker.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT) + question = baker.make(Question, questionnaire=questionnaire, type=QuestionType.EASY_DIFFICULT) contribution1 = baker.make( Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire] ) @@ -148,7 +149,7 @@ def test_results_cache_after_user_merge(self): evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, participants=[student]) questionnaire = baker.make(Questionnaire) - baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + baker.make(Question, questionnaire=questionnaire, type=QuestionType.GRADE) baker.make(Contribution, contributor=contributor, evaluation=evaluation, questionnaires=[questionnaire]) cache_results(evaluation) @@ -176,11 +177,11 @@ def setUpTestData(cls): voters=[cls.student1, cls.student2], ) cls.questionnaire = baker.make(Questionnaire) - cls.question_grade = baker.make(Question, questionnaire=cls.questionnaire, type=Question.GRADE) - cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) - cls.question_likert_2 = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) - cls.question_bipolar = baker.make(Question, questionnaire=cls.questionnaire, type=Question.FEW_MANY) - cls.question_bipolar_2 = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LITTLE_MUCH) + cls.question_grade = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.GRADE) + cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LIKERT) + cls.question_likert_2 = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LIKERT) + cls.question_bipolar = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.FEW_MANY) + cls.question_bipolar_2 = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LITTLE_MUCH) cls.general_contribution = cls.evaluation.general_contribution cls.general_contribution.questionnaires.set([cls.questionnaire]) cls.contribution1 = baker.make( @@ -204,7 +205,7 @@ def setUpTestData(cls): GENERAL_NON_GRADE_QUESTIONS_WEIGHT=5, ) def test_average_grade(self): - question_grade2 = baker.make(Question, questionnaire=self.questionnaire, type=Question.GRADE) + question_grade2 = baker.make(Question, questionnaire=self.questionnaire, type=QuestionType.GRADE) counters = [ *make_rating_answer_counters(self.question_grade, self.contribution1, [0, 1, 0, 0, 0], False), @@ -344,7 +345,7 @@ def test_result_calculation_with_no_contributor_rating_question(self): voters=[self.student1, self.student2], ) questionnaire_text = baker.make(Questionnaire) - baker.make(Question, questionnaire=questionnaire_text, type=Question.TEXT) + baker.make(Question, questionnaire=questionnaire_text, type=QuestionType.TEXT) baker.make( Contribution, contributor=baker.make(UserProfile), @@ -384,7 +385,7 @@ def test_unipolarized_bipolar(self): self.assertAlmostEqual(distribution[4], 0.15) def test_unipolarized_yesno(self): - question_yesno = baker.make(Question, questionnaire=self.questionnaire, type=Question.POSITIVE_YES_NO) + question_yesno = baker.make(Question, questionnaire=self.questionnaire, type=QuestionType.POSITIVE_YES_NO) answer_counters = make_rating_answer_counters(question_yesno, self.general_contribution, [57, 43]) result = RatingResult(question_yesno, answer_counters) @@ -462,8 +463,8 @@ def setUpTestData(cls): can_publish_text_results=True, ) cls.questionnaire = baker.make(Questionnaire) - cls.question = baker.make(Question, questionnaire=cls.questionnaire, type=Question.TEXT) - cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=Question.LIKERT) + cls.question = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.TEXT) + cls.question_likert = baker.make(Question, questionnaire=cls.questionnaire, type=QuestionType.LIKERT) cls.general_contribution = cls.evaluation.general_contribution cls.general_contribution.questionnaires.set([cls.questionnaire]) cls.responsible1_contribution = baker.make( diff --git a/evap/results/tests/test_views.py b/evap/results/tests/test_views.py index 9cdb6ed444..48a77b8be4 100644 --- a/evap/results/tests/test_views.py +++ b/evap/results/tests/test_views.py @@ -19,6 +19,7 @@ Evaluation, Question, Questionnaire, + QuestionType, RatingAnswerCounter, Semester, UserProfile, @@ -232,6 +233,7 @@ def make_course_with_evaluations(unique_suffix): name_de="foo" + unique_suffix, state=Evaluation.State.PUBLISHED, _voter_count=0, + _participant_count=0, ) baker.make( Evaluation, @@ -240,6 +242,7 @@ def make_course_with_evaluations(unique_suffix): name_de="bar" + unique_suffix, state=Evaluation.State.PUBLISHED, _voter_count=0, + _participant_count=0, ) # first measure the number of queries with two courses @@ -344,7 +347,7 @@ def setUpTestData(cls): questionnaires=[questionnaire], contributor=contributor, ) - cls.likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) + cls.likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=2) cls.url = f"/results/semester/{cls.semester.id}/evaluation/{cls.evaluation.id}" def test_many_answers_evaluation_no_warning(self): @@ -402,17 +405,19 @@ def test_questionnaire_ordering(self): contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) bottom_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.BOTTOM) - top_heading_question = baker.make(Question, type=Question.HEADING, questionnaire=top_questionnaire, order=0) - top_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=top_questionnaire, order=1) + top_heading_question = baker.make(Question, type=QuestionType.HEADING, questionnaire=top_questionnaire, order=0) + top_likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=top_questionnaire, order=1) contributor_likert_question = baker.make( - Question, type=Question.LIKERT, questionnaire=contributor_questionnaire + Question, type=QuestionType.LIKERT, questionnaire=contributor_questionnaire ) bottom_heading_question = baker.make( - Question, type=Question.HEADING, questionnaire=bottom_questionnaire, order=0 + Question, type=QuestionType.HEADING, questionnaire=bottom_questionnaire, order=0 + ) + bottom_likert_question = baker.make( + Question, type=QuestionType.LIKERT, questionnaire=bottom_questionnaire, order=1 ) - bottom_likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=bottom_questionnaire, order=1) self.evaluation.general_contribution.questionnaires.set([top_questionnaire, bottom_questionnaire]) self.contribution.questionnaires.set([contributor_questionnaire]) @@ -439,10 +444,10 @@ def test_heading_question_filtering(self): contributor = baker.make(UserProfile) questionnaire = baker.make(Questionnaire) - heading_question_0 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0) - heading_question_1 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1) - likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2) - heading_question_2 = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3) + heading_question_0 = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=0) + heading_question_1 = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=2) + heading_question_2 = baker.make(Question, type=QuestionType.HEADING, questionnaire=questionnaire, order=3) contribution = baker.make( Contribution, evaluation=self.evaluation, questionnaires=[questionnaire], contributor=contributor @@ -499,7 +504,7 @@ def test_preview_with_rating_answers(self): Evaluation, state=Evaluation.State.EVALUATED, course=baker.make(Course, semester=self.semester) ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=1) evaluation.general_contribution.questionnaires.set([questionnaire]) participants = baker.make(UserProfile, _bulk_create=True, _quantity=20) evaluation.participants.set(participants) @@ -522,7 +527,7 @@ def test_unpublished_single_results_show_results(self) -> None: voters=participants, ) questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=1) + likert_question = baker.make(Question, type=QuestionType.LIKERT, questionnaire=questionnaire, order=1) evaluation.general_contribution.questionnaires.set([questionnaire]) make_rating_answer_counters(likert_question, evaluation.general_contribution) @@ -573,8 +578,8 @@ def setUpTestData(cls): cls.url = f"/results/semester/{cls.evaluation.course.semester.pk}/evaluation/{cls.evaluation.pk}" questionnaire = baker.make(Questionnaire) - cls.question_grade = baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) - baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT) + cls.question_grade = baker.make(Question, questionnaire=questionnaire, type=QuestionType.GRADE) + baker.make(Question, questionnaire=questionnaire, type=QuestionType.LIKERT) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) cls.responsible_contribution = baker.make( Contribution, contributor=responsible, evaluation=cls.evaluation, questionnaires=[questionnaire] @@ -909,7 +914,7 @@ def setUpTestData(cls): cls.url = f"/results/semester/{evaluation.course.semester.id}/evaluation/{evaluation.id}?view=export" questionnaire = baker.make(Questionnaire) - baker.make(Question, questionnaire=questionnaire, type=Question.LIKERT) + baker.make(Question, questionnaire=questionnaire, type=QuestionType.LIKERT) evaluation.general_contribution.questionnaires.set([questionnaire]) baker.make( diff --git a/evap/rewards/migrations/0005_alter_rewardpoint_minvalue.py b/evap/rewards/migrations/0005_alter_rewardpoint_minvalue.py new file mode 100644 index 0000000000..85ab7d2c01 --- /dev/null +++ b/evap/rewards/migrations/0005_alter_rewardpoint_minvalue.py @@ -0,0 +1,23 @@ +# Generated by Django 4.1.8 on 2023-05-22 18:49 + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("rewards", "0004_make_granting_semester_non_null"), + ] + + operations = [ + migrations.AlterField( + model_name="rewardpointredemption", + name="value", + field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1)], verbose_name="value"), + ), + migrations.AlterField( + model_name="rewardpointgranting", + name="value", + field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1)], verbose_name="value"), + ), + ] diff --git a/evap/rewards/models.py b/evap/rewards/models.py index beb3065438..14198243fd 100644 --- a/evap/rewards/models.py +++ b/evap/rewards/models.py @@ -1,5 +1,6 @@ from collections import OrderedDict +from django.core.validators import MinValueValidator from django.db import models from django.dispatch import Signal from django.utils.translation import gettext_lazy as _ @@ -51,7 +52,7 @@ class RewardPointGranting(models.Model): user_profile = models.ForeignKey(UserProfile, models.CASCADE, related_name="reward_point_grantings") semester = models.ForeignKey(Semester, models.PROTECT, related_name="reward_point_grantings") granting_time = models.DateTimeField(verbose_name=_("granting time"), auto_now_add=True) - value = models.IntegerField(verbose_name=_("value"), default=0) + value = models.IntegerField(verbose_name=_("value"), validators=[MinValueValidator(1)]) granted_by_removal = Signal() @@ -64,7 +65,7 @@ class RewardPointRedemption(models.Model): user_profile = models.ForeignKey(UserProfile, models.CASCADE, related_name="reward_point_redemptions") redemption_time = models.DateTimeField(verbose_name=_("redemption time"), auto_now_add=True) - value = models.IntegerField(verbose_name=_("value"), default=0) + value = models.IntegerField(verbose_name=_("value"), validators=[MinValueValidator(1)]) event = models.ForeignKey(RewardPointRedemptionEvent, models.PROTECT, related_name="reward_point_redemptions") diff --git a/evap/rewards/tests/test_tools.py b/evap/rewards/tests/test_tools.py index bebe8911a1..aef456b729 100644 --- a/evap/rewards/tests/test_tools.py +++ b/evap/rewards/tests/test_tools.py @@ -2,7 +2,7 @@ from django.urls import reverse from model_bakery import baker -from evap.evaluation.models import NO_ANSWER, Course, Evaluation, Question, Questionnaire, UserProfile +from evap.evaluation.models import NO_ANSWER, Course, Evaluation, Question, Questionnaire, QuestionType, UserProfile from evap.evaluation.tests.tools import WebTest from evap.rewards.models import RewardPointGranting, SemesterActivation from evap.rewards.tools import reward_points_of_user @@ -22,7 +22,7 @@ def setUpTestData(cls): cls.evaluation = baker.make(Evaluation, state=Evaluation.State.IN_EVALUATION, participants=[cls.student]) questionnaire = baker.make(Questionnaire) - baker.make(Question, questionnaire=questionnaire, type=Question.GRADE) + baker.make(Question, questionnaire=questionnaire, type=QuestionType.GRADE) cls.evaluation.general_contribution.questionnaires.set([questionnaire]) def setUp(self): diff --git a/evap/staff/forms.py b/evap/staff/forms.py index 4c82a9f3a3..28f73910c7 100644 --- a/evap/staff/forms.py +++ b/evap/staff/forms.py @@ -25,6 +25,7 @@ Infotext, Question, Questionnaire, + QuestionType, RatingAnswerCounter, Semester, TextAnswer, @@ -898,12 +899,12 @@ class Meta: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - if self.instance.pk and self.instance.type in [Question.TEXT, Question.HEADING]: + if self.instance.pk and self.instance.type in [QuestionType.TEXT, QuestionType.HEADING]: self.fields["allows_additional_textanswers"].disabled = True def clean(self): super().clean() - if self.cleaned_data.get("type") in [Question.TEXT, Question.HEADING]: + if self.cleaned_data.get("type") in [QuestionType.TEXT, QuestionType.HEADING]: self.cleaned_data["allows_additional_textanswers"] = False return self.cleaned_data diff --git a/evap/staff/tests/test_forms.py b/evap/staff/tests/test_forms.py index 3e5d3d161f..6d7754b8ef 100644 --- a/evap/staff/tests/test_forms.py +++ b/evap/staff/tests/test_forms.py @@ -14,6 +14,7 @@ Evaluation, Question, Questionnaire, + QuestionType, RatingAnswerCounter, Semester, TextAnswer, @@ -591,12 +592,12 @@ def test_prevent_contribution_deletion_with_answers(self): def test_answers_for_removed_questionnaires_deleted(self): # pylint: disable=too-many-locals evaluation = baker.make(Evaluation) - general_question_1 = baker.make(Question, type=Question.LIKERT) - general_question_2 = baker.make(Question, type=Question.LIKERT) + general_question_1 = baker.make(Question, type=QuestionType.LIKERT) + general_question_2 = baker.make(Question, type=QuestionType.LIKERT) general_questionnaire_1 = baker.make(Questionnaire, questions=[general_question_1]) general_questionnaire_2 = baker.make(Questionnaire, questions=[general_question_2]) evaluation.general_contribution.questionnaires.set([general_questionnaire_1, general_questionnaire_2]) - contributor_question = baker.make(Question, type=Question.LIKERT) + contributor_question = baker.make(Question, type=QuestionType.LIKERT) contributor_questionnaire = baker.make( Questionnaire, type=Questionnaire.Type.CONTRIBUTOR, @@ -1025,12 +1026,12 @@ def test_unused_questionnaire_visibility(self): def test_answers_for_removed_questionnaires_deleted(self): # pylint: disable=too-many-locals evaluation = baker.make(Evaluation) - general_question_1 = baker.make(Question, type=Question.LIKERT) - general_question_2 = baker.make(Question, type=Question.LIKERT) + general_question_1 = baker.make(Question, type=QuestionType.LIKERT) + general_question_2 = baker.make(Question, type=QuestionType.LIKERT) general_questionnaire_1 = baker.make(Questionnaire, questions=[general_question_1]) general_questionnaire_2 = baker.make(Questionnaire, questions=[general_question_2]) evaluation.general_contribution.questionnaires.set([general_questionnaire_1, general_questionnaire_2]) - contributor_question = baker.make(Question, type=Question.LIKERT) + contributor_question = baker.make(Question, type=QuestionType.LIKERT) contributor_questionnaire = baker.make( Questionnaire, type=Questionnaire.Type.CONTRIBUTOR, diff --git a/evap/staff/tests/test_views.py b/evap/staff/tests/test_views.py index 1f92d2bfbc..36e75291a3 100644 --- a/evap/staff/tests/test_views.py +++ b/evap/staff/tests/test_views.py @@ -30,6 +30,7 @@ Infotext, Question, Questionnaire, + QuestionType, RatingAnswerCounter, Semester, TextAnswer, @@ -2511,11 +2512,11 @@ def setUpTestData(cls): ) cls.url = reverse("staff:evaluation_textanswers", args=[cls.evaluation.pk]) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) questionnaire = baker.make(Questionnaire) - question = baker.make(Question, questionnaire=questionnaire, type=Question.TEXT) + question = baker.make(Question, questionnaire=questionnaire, type=QuestionType.TEXT) contribution = baker.make( Contribution, evaluation=cls.evaluation, @@ -2642,7 +2643,7 @@ def test_suggested_evaluation_ordering(self): for evaluation, answer_count in zip(evaluations, [1, 2]): contribution = baker.make(Contribution, evaluation=evaluation, _fill_optional=["contributor"]) - baker.make(TextAnswer, contribution=contribution, question__type=Question.TEXT, _quantity=answer_count) + baker.make(TextAnswer, contribution=contribution, question__type=QuestionType.TEXT, _quantity=answer_count) url = reverse("staff:evaluation_textanswers", args=[self.evaluation2.pk]) @@ -2679,7 +2680,13 @@ def test_num_queries_is_constant(self): contributors = baker.make(UserProfile, **kwargs) contributions = baker.make(Contribution, evaluation=self.evaluation, contributor=iter(contributors), **kwargs) questionnaires = baker.make(Questionnaire, **kwargs) - questions = baker.make(Question, questionnaire=iter(questionnaires), type=Question.TEXT, **kwargs) + questions = baker.make( + Question, + questionnaire=iter(questionnaires), + type=QuestionType.TEXT, + allows_additional_textanswers=False, + **kwargs, + ) baker.make(TextAnswer, question=iter(questions), contribution=iter(contributions), **kwargs) with run_in_staff_mode(self): @@ -2717,9 +2724,9 @@ def setUpTestData(cls): state=Evaluation.State.IN_EVALUATION, ) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) - question = baker.make(Question, type=Question.TEXT) + question = baker.make(Question, type=QuestionType.TEXT) contribution = baker.make( Contribution, @@ -2871,7 +2878,7 @@ def test_create_questionnaire(self): questionnaire_form["public_name_en"] = "Public Test Questionnaire" questionnaire_form["questions-0-text_de"] = "Frage 1" questionnaire_form["questions-0-text_en"] = "Question 1" - questionnaire_form["questions-0-type"] = Question.TEXT + questionnaire_form["questions-0-type"] = QuestionType.TEXT questionnaire_form["order"] = 0 questionnaire_form["type"] = Questionnaire.Type.TOP questionnaire_form.submit().follow() @@ -2970,9 +2977,10 @@ def setUpTestData(cls): baker.make( Question, questionnaire=questionnaire, - type=iter([Question.TEXT, Question.GRADE, Question.LIKERT]), + type=iter([QuestionType.TEXT, QuestionType.GRADE, QuestionType.LIKERT]), _quantity=3, _bulk_create=True, + allows_additional_textanswers=False, ) @@ -3224,8 +3232,8 @@ def setUpTestData(cls): state=Evaluation.State.IN_EVALUATION, ) top_general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP) - baker.make(Question, questionnaire=top_general_questionnaire, type=Question.LIKERT) - cls.text_question = baker.make(Question, questionnaire=top_general_questionnaire, type=Question.TEXT) + baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.LIKERT) + cls.text_question = baker.make(Question, questionnaire=top_general_questionnaire, type=QuestionType.TEXT) cls.evaluation.general_contribution.questionnaires.set([top_general_questionnaire]) def assert_transition( diff --git a/evap/student/tests/test_views.py b/evap/student/tests/test_views.py index aa4637a526..155aff09ab 100644 --- a/evap/student/tests/test_views.py +++ b/evap/student/tests/test_views.py @@ -10,6 +10,7 @@ Evaluation, Question, Questionnaire, + QuestionType, RatingAnswerCounter, Semester, TextAnswer, @@ -73,39 +74,39 @@ def setUpTestData(cls): cls.contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR) cls.contributor_heading_question = baker.make( - Question, questionnaire=cls.contributor_questionnaire, order=0, type=Question.HEADING + Question, questionnaire=cls.contributor_questionnaire, order=0, type=QuestionType.HEADING ) cls.contributor_text_question = baker.make( - Question, questionnaire=cls.contributor_questionnaire, order=1, type=Question.TEXT + Question, questionnaire=cls.contributor_questionnaire, order=1, type=QuestionType.TEXT ) cls.contributor_likert_question = baker.make( - Question, questionnaire=cls.contributor_questionnaire, order=2, type=Question.LIKERT + Question, questionnaire=cls.contributor_questionnaire, order=2, type=QuestionType.LIKERT ) cls.top_heading_question = baker.make( - Question, questionnaire=cls.top_general_questionnaire, order=0, type=Question.HEADING + Question, questionnaire=cls.top_general_questionnaire, order=0, type=QuestionType.HEADING ) cls.top_text_question = baker.make( - Question, questionnaire=cls.top_general_questionnaire, order=1, type=Question.TEXT + Question, questionnaire=cls.top_general_questionnaire, order=1, type=QuestionType.TEXT ) cls.top_likert_question = baker.make( - Question, questionnaire=cls.top_general_questionnaire, order=2, type=Question.LIKERT + Question, questionnaire=cls.top_general_questionnaire, order=2, type=QuestionType.LIKERT ) cls.top_grade_question = baker.make( - Question, questionnaire=cls.top_general_questionnaire, order=3, type=Question.GRADE + Question, questionnaire=cls.top_general_questionnaire, order=3, type=QuestionType.GRADE ) cls.bottom_heading_question = baker.make( - Question, questionnaire=cls.bottom_general_questionnaire, order=0, type=Question.HEADING + Question, questionnaire=cls.bottom_general_questionnaire, order=0, type=QuestionType.HEADING ) cls.bottom_text_question = baker.make( - Question, questionnaire=cls.bottom_general_questionnaire, order=1, type=Question.TEXT + Question, questionnaire=cls.bottom_general_questionnaire, order=1, type=QuestionType.TEXT ) cls.bottom_likert_question = baker.make( - Question, questionnaire=cls.bottom_general_questionnaire, order=2, type=Question.LIKERT + Question, questionnaire=cls.bottom_general_questionnaire, order=2, type=QuestionType.LIKERT ) cls.bottom_grade_question = baker.make( - Question, questionnaire=cls.bottom_general_questionnaire, order=3, type=Question.GRADE + Question, questionnaire=cls.bottom_general_questionnaire, order=3, type=QuestionType.GRADE ) cls.contribution1 = baker.make(