diff --git a/openassessment/__init__.py b/openassessment/__init__.py index e0ec6036e3..562d8b5074 100644 --- a/openassessment/__init__.py +++ b/openassessment/__init__.py @@ -2,4 +2,4 @@ Initialization Information for Open Assessment Module """ -__version__ = '6.0.0' +__version__ = '6.0.1' diff --git a/openassessment/xblock/apis/grades_api.py b/openassessment/xblock/apis/grades_api.py index 8f107a7a1e..24562cbeca 100644 --- a/openassessment/xblock/apis/grades_api.py +++ b/openassessment/xblock/apis/grades_api.py @@ -14,6 +14,46 @@ def __init__(self, block): def _get_submission_uuid(self): return self._block.submission_uuid + @property + def score_overridden(self): + """ + Determine if score was overridden by staff. + Adapted from grade_mixin._get_assessment_type. + + Returns: True if score was overridden by staff, False otherwise. + """ + workflow = self._block.get_workflow_info() + score = workflow['score'] + + complete = score is not None + grade_annotation_types = [annotation['annotation_type'] for annotation in (score or {}).get("annotations", [])] + if complete and "staff_defined" in grade_annotation_types: + return True + + return False + + @property + def effective_assessment_type(self): + """ + Determine which assessment step we will use as our "graded" step. + + This follows the order: + 1) Staff (if assessment received / overridden) + 2) Peer (if assessment step configured) + 3) Self (if assessment step configured) + + NOTE: The logic in a few places differs, but this combines the best I've found. + """ + if self.staff_score is not None or self.score_overridden: + return "staff" + elif "peer-assessment" in self._block.assessment_steps: + return "peer" + elif "self-assessment" in self._block.assessment_steps: + return "self" + + # To make pylint happy + return None + @property def self_score(self): """ diff --git a/openassessment/xblock/apis/ora_data_accessor.py b/openassessment/xblock/apis/ora_data_accessor.py index 768c6079e2..a722d11d37 100644 --- a/openassessment/xblock/apis/ora_data_accessor.py +++ b/openassessment/xblock/apis/ora_data_accessor.py @@ -1,4 +1,5 @@ """API Data wrapper for exposed APIs within ORA XBlock""" +from openassessment.xblock.apis.grades_api import GradesAPI from openassessment.xblock.apis.ora_config_api import ORAConfigAPI from openassessment.xblock.apis.submissions.submissions_api import SubmissionAPI from openassessment.xblock.apis.workflow_api import WorkflowAPI @@ -28,6 +29,10 @@ def submission_data(self): def workflow_data(self): return WorkflowAPI(self._block) + @property + def grades_data(self): + return GradesAPI(self._block) + @property def self_assessment_data(self): return SelfAssessmentAPI(self._block) diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index e525037001..f37b6d3cd1 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -114,18 +114,12 @@ class AssessmentGradeSerializer(Serializer): } """ - effectiveAssessmentType = SerializerMethodField() + effectiveAssessmentType = CharField(source="grades_data.effective_assessment_type") self = AssessmentStepSerializer(source="self_assessment_data.assessment") staff = AssessmentStepSerializer(source="staff_assessment_data.assessment") peer = PeerAssessmentsSerializer(source="peer_assessment_data") peerUnweighted = UnweightedPeerAssessmentsSerializer(source="peer_assessment_data") - def get_effectiveAssessmentType(self, instance): # pylint: disable=unused-argument - """ - Get effective assessment type - """ - return self.context["step"] - class AssessmentResponseSerializer(Serializer): """ diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py index 2452b5e0ab..cd2764e5c8 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -17,7 +17,6 @@ ) from openassessment.xblock.ui_mixins.mfe.assessment_serializers import ( AssessmentResponseSerializer, - AssessmentStepSerializer, AssessmentGradeSerializer, AssessmentScoreSerializer, AssessmentDataSerializer, @@ -198,13 +197,13 @@ def test_scored_unscored(self, xblock): {} ) - context = {"response": submission, "step": "peer"} + context = {"response": submission, "step": "done"} # When I load my response data = AssessmentGradeSerializer(xblock.api_data, context=context).data # I get the appropriate response - self.assertEqual(context["step"], data["effectiveAssessmentType"]) + self.assertEqual("peer", data["effectiveAssessmentType"]) self.assertEqual(data["peer"]["stepScore"], {'earned': 5, 'possible': 6}) self.assertEqual(len(data["peer"]["assessments"]), 2) self.assertIsNone(data["peerUnweighted"]["stepScore"]) @@ -212,13 +211,22 @@ def test_scored_unscored(self, xblock): class TestAssessmentGradeSerializer(XBlockHandlerTestCase, SubmitAssessmentsMixin): + + maxDiff = None + + def assertNestedDictEquals(self, dict_1, dict_2): + # Manually expand nested dicts for comparison + dict_1_expanded = json.loads(json.dumps(dict_1)) + dict_2_expanded = json.loads(json.dumps(dict_2)) + return self.assertDictEqual(dict_1_expanded, dict_2_expanded) + ASSESSMENT = { 'options_selected': {'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': 'ﻉซƈﻉɭɭﻉกՇ', 'Form': 'Fair'}, 'criterion_feedback': {}, 'overall_feedback': "" } - @scenario("data/self_assessment_scenario.xml", user_id="Alan") + @scenario("data/self_only_scenario.xml", user_id="Alan") def test_self_assessment_step(self, xblock): submission_text = ["Foo", "Bar"] @@ -226,23 +234,37 @@ def test_self_assessment_step(self, xblock): xblock, submission_text=submission_text ) - context = {"response": submission, "step": "self"} + context = {"response": submission, "step": "done"} + + # The self-only example uses a different rubric + self_assessment = { + 'options_selected': {'Concise': 'Robert Heinlein', 'Clear-headed': 'Spock', 'Form': 'Reddit'}, + 'criterion_feedback': {}, + 'overall_feedback': "I'm so cool", + } resp = self.request( - xblock, "self_assess", json.dumps(self.ASSESSMENT), response_format="json" + xblock, "self_assess", json.dumps(self_assessment), response_format="json" ) self.assertTrue(resp["success"]) # When I load my response data = AssessmentGradeSerializer(xblock.api_data, context=context).data - # I get the appropriate response - self.assertEqual(context["step"], data["effectiveAssessmentType"]) - self.assertEqual( - data["self"], - AssessmentStepSerializer( - xblock.api_data.self_assessment_data.assessment, context=context - ).data, - ) + + # Then I get the appropriate assessment data + expected_assessment_type = "self" + self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"]) + + score_details = data[expected_assessment_type] + self.assertDictEqual(score_details["assessment"], { + "overallFeedback": self_assessment['overall_feedback'], + "criteria": [ + {"selectedOption": 2, "feedback": ""}, + {"selectedOption": 4, "feedback": ""}, + {"selectedOption": 2, "feedback": ""}, + ] + }) + self.assertDictEqual(score_details["stepScore"], {"earned": 15, "possible": 20}) @scenario("data/grade_scenario.xml", user_id="Alan") def test_staff_assessment_step(self, xblock): @@ -253,21 +275,33 @@ def test_staff_assessment_step(self, xblock): self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) - context = {"response": submission, "step": "staff"} + context = {"response": submission, "step": "done"} + # When I load my response data = AssessmentGradeSerializer(xblock.api_data, context=context).data - # I get the appropriate response - self.assertEqual(context["step"], data["effectiveAssessmentType"]) - self.assertEqual( - data["staff"], - AssessmentStepSerializer( - xblock.api_data.staff_assessment_data.assessment, context=context - ).data, - ) + # Then I get the appropriate assessment data + expected_assessment_type = "staff" + self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"]) + + score_details = data[expected_assessment_type] + self.assertNestedDictEquals(score_details["assessment"], { + "overallFeedback": STAFF_GOOD_ASSESSMENT["overall_feedback"], + "criteria": [ + { + "selectedOption": 0, + "feedback": '', + }, + { + "selectedOption": 1, + "feedback": '', + } + ] + }) + self.assertDictEqual(score_details["stepScore"], {"earned": 5, "possible": 6}) @scenario("data/grade_scenario.xml", user_id="Bernard") - def test_peer_assement_steps(self, xblock): + def test_peer_assessment_steps(self, xblock): # Create a submission from the user student_item = xblock.get_student_item_dict() submission = self.create_test_submission( @@ -292,60 +326,80 @@ def test_peer_assement_steps(self, xblock): graded_by, ) - context = {"response": submission, "step": "peer"} + context = {"response": submission, "step": "done"} # When I load my response data = AssessmentGradeSerializer(xblock.api_data, context=context).data - # I get the appropriate response - self.assertEqual(context["step"], data["effectiveAssessmentType"]) - self.assertEqual(data["peer"], {'stepScore': None, 'assessments': []}) + # Then I get the appropriate assessment data + expected_assessment_type = "peer" + self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"]) + + score_details = data[expected_assessment_type] + self.assertDictEqual(score_details, {'stepScore': None, 'assessments': []}) + self.assertIsNone(data["peerUnweighted"]['stepScore']) self.assertEqual(len(data["peerUnweighted"]['assessments']), len(self.PEERS)) - @scenario("data/grade_scenario.xml", user_id="Alan") - def test_assessment_step_score(self, xblock): - submission_text = ["Foo", "Bar"] + @scenario("data/grade_scenario.xml", user_id="Bernard") + def test_staff_override(self, xblock): + # Create a submission from the user + student_item = xblock.get_student_item_dict() submission = self.create_test_submission( - xblock, submission_text=submission_text + xblock, student_item=student_item, submission_text=self.SUBMISSION ) - self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) - - context = {"response": submission, "step": "staff"} - # When I load my response - data = AssessmentGradeSerializer(xblock.api_data, context=context).data - - # I get the appropriate response - self.assertEqual(context["step"], data["effectiveAssessmentType"]) - - step_score = AssessmentScoreSerializer( - xblock.api_data.staff_assessment_data.assessment, context=context - ).data - - self.assertEqual(data["staff"]["stepScore"], step_score) - - @scenario("data/grade_scenario.xml", user_id="Alan") - def test_assessment_step_assessment_data(self, xblock): - submission_text = ["Foo", "Bar"] - submission = self.create_test_submission( - xblock, submission_text=submission_text + # Create submissions from other users + scorer_subs = self.create_peer_submissions( + student_item, self.PEERS, self.SUBMISSION ) + graded_by = xblock.get_assessment_module("peer-assessment")["must_be_graded_by"] + for scorer_sub, scorer_name, assessment in list( + zip(scorer_subs, self.PEERS, PEER_ASSESSMENTS) + ): + self.create_peer_assessment( + scorer_sub, + scorer_name, + submission, + assessment, + xblock.rubric_criteria, + graded_by, + ) + + # Create a staff override self.submit_staff_assessment(xblock, submission, STAFF_GOOD_ASSESSMENT) - context = {"response": submission, "step": "staff"} + context = {"response": submission, "step": "done"} + # When I load my response data = AssessmentGradeSerializer(xblock.api_data, context=context).data - # I get the appropriate response - self.assertEqual(context["step"], data["effectiveAssessmentType"]) + # Then I get the appropriate assessment data + expected_assessment_type = "staff" + self.assertEqual(expected_assessment_type, data["effectiveAssessmentType"]) - assessment_data = AssessmentDataSerializer( - xblock.api_data.staff_assessment_data.assessment, context=context - ).data + score_details = data[expected_assessment_type] + # Feedback is disabled in this assignment + self.assertNestedDictEquals(score_details["assessment"], { + "overallFeedback": STAFF_GOOD_ASSESSMENT["overall_feedback"], + "criteria": [ + { + "selectedOption": 0, + "feedback": '', + }, + { + "selectedOption": 1, + "feedback": '', + } + ] + }) + self.assertDictEqual(score_details["stepScore"], {"earned": 5, "possible": 6}) - self.assertEqual(data["staff"]["assessment"], assessment_data) + # With peer responses all listed as unweighted + self.assertDictEqual(data["peer"], {'stepScore': None, 'assessments': []}) + self.assertIsNone(data["peerUnweighted"]['stepScore']) + self.assertEqual(len(data["peerUnweighted"]['assessments']), len(self.PEERS)) class TestAssessmentScoreSerializer(TestCase): diff --git a/package-lock.json b/package-lock.json index 1da5032650..28cc177c87 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "edx-ora2", - "version": "6.0.0", + "version": "6.0.1", "lockfileVersion": 2, "requires": true, "packages": { diff --git a/package.json b/package.json index d622df935c..b7d7011a0a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "edx-ora2", - "version": "6.0.0", + "version": "6.0.1", "repository": "https://github.com/openedx/edx-ora2.git", "dependencies": { "@edx/frontend-build": "^6.1.1",