Skip to content

Commit

Permalink
feat: Dual UI (#2073)
Browse files Browse the repository at this point in the history
Add support for multiple UIs, currently legacy and a new MFE view. Includes:

* make: legacy template (#2046)

* chore: add lms+cms restart script to install-local-ora

* refactor: move data gathering pieces of mixins into xblock/apis

* feat: new xblock/ui_mixins/legacy to replace existing view/handler behaviors from mixins, leveraging our new APIs.

* refactor: remaining non-core-xblock functionality from xblock to xblock/utils. Updated references to changed file locations.

* feat: add page context MFE endpoint

* refactor: allow peeking a peer assessment

* chore: update translations

* feat: Add logic to display the new ORA UI templates (#2054)

* feat: xblock iframe

* feat: conditional render of new views based on mobile or waffle

* feat: BFF Assessments Data (#2063)

* feat: add url generation to file parsing

* refactor: legacy upload files

* refactor: legacy delete file

* refactor: legacy file upload url generation

* refactor: has any files in upload space

* chore: fix studio base view (#2078)

* docs: update some comments / docstrings

* fix: add missing teams step

* feat: xblock view (#2080)

* feat: disable MFE views for unsupported use-cases (#2090)

* feat: refactor legacy assessment actions and add assessment mfe bff handler (#2074)

* refactor: legacy peer assessment handler and api action

* refactor: legacy self assessment api handler

* refactor: legacy staff assessment handler and api action

* refactor: legacy student training handler and api action

* refactor: legacy ui mixins folder structure

* refactor: peer review uuid matching

* feat: get_active_assessment peer api function

---------

Co-authored-by: Nathan Sprenkle <[email protected]>
Co-authored-by: Jenkins <[email protected]>
Co-authored-by: Ben Warzeski <[email protected]>
Co-authored-by: Leangseu Kim <[email protected]>
  • Loading branch information
4 people authored Nov 7, 2023
2 parents 63ee4fa + c8341c6 commit 92d7a92
Show file tree
Hide file tree
Showing 228 changed files with 13,056 additions and 2,632 deletions.
2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,8 @@ install-osx-requirements: ## Install OSX specific requirements using Homebrew
install-local-ora: ## installs your local ORA2 code into the LMS and Studio python virtualenvs
docker exec -t edx.devstack.lms bash -c '. /edx/app/edxapp/venvs/edxapp/bin/activate && cd /edx/app/edxapp/edx-platform && pip uninstall -y ora2 && pip install -e /edx/src/edx-ora2 && pip freeze | grep ora2'
docker exec -t edx.devstack.cms bash -c '. /edx/app/edxapp/venvs/edxapp/bin/activate && cd /edx/app/edxapp/edx-platform && pip uninstall -y ora2 && pip install -e /edx/src/edx-ora2 && pip freeze | grep ora2'
docker exec -t edx.devstack.lms bash -c 'kill $$(ps aux | egrep "manage.py ?\w* runserver" | egrep -v "while|grep" | awk "{print \$$2}")'
docker exec -t edx.devstack.cms bash -c 'kill $$(ps aux | egrep "manage.py ?\w* runserver" | egrep -v "while|grep" | awk "{print \$$2}")'

install_transifex_client: ## Install the Transifex client
# Instaling client will skip CHANGELOG and LICENSE files from git changes
Expand Down
2 changes: 1 addition & 1 deletion openassessment/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
Initialization Information for Open Assessment Module
"""

__version__ = '5.5.6'
__version__ = '6.0.0'
45 changes: 39 additions & 6 deletions openassessment/assessment/api/peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,37 @@ def get_submitted_assessments(submission_uuid, limit=None):
raise PeerAssessmentInternalError(error_message) from ex


def get_submission_to_assess(submission_uuid, graded_by):
def get_active_assessment_submission(submission_uuid):
"""
Gets the current active submission being assessed, or None if there is
no active assessment. This will not find a new submission to assess, for
that, call `get_submission_to_assess`.
"""
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)

if not workflow:
raise PeerAssessmentWorkflowError(
"A Peer Assessment Workflow does not exist for the student "
"with submission UUID {}".format(submission_uuid)
)

if workflow.is_cancelled:
return None

active_assessment = workflow.find_active_assessments()
if not active_assessment:
return None

try:
return sub_api.get_submission(active_assessment.submission_uuid)
except sub_api.SubmissionNotFoundError as ex:
error_message = "Could not find a submission with the uuid %s for student %s in the peer workflow."
error_message_args = (active_assessment.submission_uuid, workflow.student_id)
logger.exception(error_message, error_message_args[0], error_message_args[1])
raise PeerAssessmentWorkflowError(error_message % error_message_args) from ex


def get_submission_to_assess(submission_uuid, graded_by, peek=False):
"""Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student. This will
Expand All @@ -705,6 +735,8 @@ def get_submission_to_assess(submission_uuid, graded_by):
associated Peer Workflow.
graded_by (int): The number of assessments a submission
requires before it has completed the peer assessment process.
peek (bool): When True, will verify a submission is available, without
creating a workflow to begin grading.
Returns:
dict: A peer submission for assessment. This contains a 'student_item',
Expand Down Expand Up @@ -754,14 +786,15 @@ def get_submission_to_assess(submission_uuid, graded_by):
if peer_submission_uuid:
try:
submission_data = sub_api.get_submission(peer_submission_uuid)
PeerWorkflow.create_item(workflow, peer_submission_uuid)
_log_workflow(peer_submission_uuid, workflow)
if not peek:
PeerWorkflow.create_item(workflow, peer_submission_uuid)
_log_workflow(peer_submission_uuid, workflow)
return submission_data
except sub_api.SubmissionNotFoundError as ex:
error_message = "Could not find a submission with the uuid %s for student %s in the peer workflow."
error_meesage_args = (peer_submission_uuid, workflow.student_id)
logger.exception(error_message, error_meesage_args[0], error_meesage_args[1])
raise PeerAssessmentWorkflowError(error_message % error_meesage_args) from ex
error_message_args = (peer_submission_uuid, workflow.student_id)
logger.exception(error_message, error_message_args[0], error_message_args[1])
raise PeerAssessmentWorkflowError(error_message % error_message_args) from ex
else:
logger.info(
"No submission found for %s to assess (%s, %s)",
Expand Down
38 changes: 37 additions & 1 deletion openassessment/assessment/api/staff.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,12 @@

from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.assessment.models import Assessment, AssessmentPart, InvalidRubricSelection, StaffWorkflow
from openassessment.assessment.serializers import InvalidRubric, full_assessment_dict, rubric_from_dict
from openassessment.assessment.serializers import (
InvalidRubric,
full_assessment_dict,
rubric_from_dict,
serialize_assessments,
)
from openassessment.assessment.score_type_constants import STAFF_TYPE


Expand Down Expand Up @@ -462,3 +467,34 @@ def bulk_retrieve_workflow_status(course_id, item_id, submission_uuids=None):
return StaffWorkflow.bulk_retrieve_workflow_status(
course_id, item_id, submission_uuids
)


def get_assessment(submission_uuid):
"""
Retrieve a staff-assessment for a submission_uuid.
Args:
submission_uuid (str): The submission UUID for we want information for
regarding staff assessment.
Returns:
assessment (dict) is a serialized Assessment model, or None (if the user has not yet self-assessed)
If multiple submissions or staff-assessments are found, returns the most recent one.
"""
# Retrieve assessments for the submission UUID
# We weakly enforce that number of staff-assessments per submission is <= 1,
# but not at the database level. Someone could take advantage of the race condition
# between checking the number of staff-assessments and creating a new staff-assessment.
# To be safe, we retrieve just the most recent submission.
serialized_assessments = serialize_assessments(Assessment.objects.filter(
score_type=STAFF_TYPE, submission_uuid=submission_uuid
).order_by('-scored_at')[:1])

if not serialized_assessments:
logger.info("No staff-assessment found for submission %s", submission_uuid)
return None

serialized_assessment = serialized_assessments[0]
logger.info("Retrieved staff-assessment for submission %s", submission_uuid)

return serialized_assessment
31 changes: 24 additions & 7 deletions openassessment/assessment/models/peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,16 @@ class PeerWorkflowItem(models.Model):
# This WorkflowItem was used to determine the final score for the Workflow.
scored = models.BooleanField(default=False)

@classmethod
def _get_assessments(cls, submission_uuid, scored):
return Assessment.objects.filter(
pk__in=[
item.assessment.pk for item in PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid, scored=scored
)
]
)

@classmethod
def get_scored_assessments(cls, submission_uuid):
"""
Expand All @@ -533,15 +543,22 @@ def get_scored_assessments(cls, submission_uuid):
Returns:
QuerySet of Assessment objects.
"""
return cls._get_assessments(submission_uuid, True)

@classmethod
def get_unscored_assessments(cls, submission_uuid):
"""
return Assessment.objects.filter(
pk__in=[
item.assessment.pk for item in PeerWorkflowItem.objects.filter(
submission_uuid=submission_uuid, scored=True
)
]
)
Return all unscored assessments for a given submission.
Args:
submission_uuid (str): The UUID of the submission.
Returns:
QuerySet of Assessment objects.
"""
return cls._get_assessments(submission_uuid, False)

@classmethod
def get_bulk_scored_assessments(cls, submission_uuids):
Expand Down
99 changes: 99 additions & 0 deletions openassessment/assessment/test/test_peer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from submissions import api as sub_api
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.errors.peer import PeerAssessmentWorkflowError
from openassessment.assessment.models import (
Assessment,
AssessmentFeedback,
Expand Down Expand Up @@ -2054,6 +2055,104 @@ def test_flexible_peer_grading_enabled(self, block_setting, course_override, exp
)
assert result == expected_flexible

def test_get_active_assessment(self):
"""
Test for behavior of get_active_assessment
"""
# Three learners and submissions
alice_sub, _ = self._create_student_and_submission('alice', 'alice sub', steps=['peer'])
bob_sub, _ = self._create_student_and_submission('bob', 'bob sub', steps=['peer'])
carlos_sub, _ = self._create_student_and_submission('carlos', 'carlos sub', steps=['peer'])

# No one has any active assessment currently
assert peer_api.get_active_assessment_submission(alice_sub['uuid']) is None
assert peer_api.get_active_assessment_submission(bob_sub['uuid']) is None
assert peer_api.get_active_assessment_submission(carlos_sub['uuid']) is None

# Alice requests a peer to grade and is assigned bob
sub = peer_api.get_submission_to_assess(alice_sub['uuid'], 3)
assert sub['uuid'] == bob_sub['uuid']

# Alice's active assessment is now Bob, and Bob is unaffected
assert peer_api.get_active_assessment_submission(alice_sub['uuid'])['uuid'] == bob_sub['uuid']
assert peer_api.get_active_assessment_submission(bob_sub['uuid']) is None

# Alice assesses Bob and then should have no active assessment
peer_api.create_assessment(
alice_sub['uuid'],
'alice',
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
3
)
assert peer_api.get_active_assessment_submission(alice_sub['uuid']) is None

# Alice requests a new peer to assess and gets Carlos, who is now her active assessment
sub = peer_api.get_submission_to_assess(alice_sub['uuid'], 3)
assert sub['uuid'] == carlos_sub['uuid']
assert peer_api.get_active_assessment_submission(alice_sub['uuid'])['uuid'] == carlos_sub['uuid']

# Assess Carlos, active assessment is now None
peer_api.create_assessment(
alice_sub['uuid'],
'alice',
ASSESSMENT_DICT['options_selected'],
ASSESSMENT_DICT['criterion_feedback'],
ASSESSMENT_DICT['overall_feedback'],
RUBRIC_DICT,
3
)
assert peer_api.get_active_assessment_submission(alice_sub['uuid']) is None

# There are no more peers to assess, and the returned None does not affect the active assessment
assert peer_api.get_submission_to_assess(alice_sub['uuid'], 3) is None
assert peer_api.get_active_assessment_submission(alice_sub['uuid']) is None

def test_get_active_assessment_cancelled(self):
# Two learners and submissions
alice_sub, _ = self._create_student_and_submission('alice', 'alice sub', steps=['peer'])
bob_sub, _ = self._create_student_and_submission('bob', 'bob sub', steps=['peer'])

# Alice requests a peer to grade and is assigned bob
sub = peer_api.get_submission_to_assess(alice_sub['uuid'], 3)
assert sub['uuid'] == bob_sub['uuid']

# Alice is then cancelled, so then her active assessment is None
workflow_api.cancel_workflow(
alice_sub['uuid'], "cancel", "1", STEP_REQUIREMENTS, COURSE_SETTINGS
)
assert peer_api.get_active_assessment_submission(alice_sub['uuid']) is None

def test_get_active_assessment_nonexistant(self):
# If we request the active assessment submission for a nonexistant workflow,
# raise an error
with self.assertRaises(PeerAssessmentWorkflowError):
peer_api.get_active_assessment_submission('nonexistant-uuid')

def test_get_active_assessment_error(self):
# Two learners and submissions
alice_sub, _ = self._create_student_and_submission('alice', 'alice sub', steps=['peer'])
bob_sub, bob_item = self._create_student_and_submission('bob', 'bob sub', steps=['peer'])

# Alice requests a peer to grade and is assigned bob
sub = peer_api.get_submission_to_assess(alice_sub['uuid'], 3)
assert sub['uuid'] == bob_sub['uuid']

# Delete bob's submission to induce an error
sub_api.reset_score(
bob_item['student_id'],
bob_item['course_id'],
bob_item['item_id'],
clear_state=True,
emit_signal=False
)

# Expected error is raised
with self.assertRaises(PeerAssessmentWorkflowError):
peer_api.get_active_assessment_submission(alice_sub['uuid'])


class PeerWorkflowTest(CacheResetTest):
"""
Expand Down
2 changes: 1 addition & 1 deletion openassessment/conf/locale/ar/LC_MESSAGES/djangojs.po
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-09-10 18:36+0000\n"
"POT-Creation-Date: 2023-10-22 18:36+0000\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: Ghassan Maslamani <[email protected]>, 2021\n"
"Language-Team: Arabic (http://app.transifex.com/open-edx/edx-platform/language/ar/)\n"
Expand Down
2 changes: 1 addition & 1 deletion openassessment/conf/locale/ar_SA/LC_MESSAGES/djangojs.po
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-09-10 18:36+0000\n"
"POT-Creation-Date: 2023-10-22 18:36+0000\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: NELC Open edX Translation <[email protected]>, 2020\n"
"Language-Team: Arabic (Saudi Arabia) (http://app.transifex.com/open-edx/edx-platform/language/ar_SA/)\n"
Expand Down
2 changes: 1 addition & 1 deletion openassessment/conf/locale/cs/LC_MESSAGES/django.po
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-09-17 18:36+0000\n"
"POT-Creation-Date: 2023-10-22 18:36+0000\n"
"PO-Revision-Date: 2014-06-11 13:03+0000\n"
"Last-Translator: Jiří Podhorecký, 2023\n"
"Language-Team: Czech (http://app.transifex.com/open-edx/edx-platform/language/cs/)\n"
Expand Down
2 changes: 1 addition & 1 deletion openassessment/conf/locale/cs/LC_MESSAGES/djangojs.po
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ msgid ""
msgstr ""
"Project-Id-Version: edx-platform\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-09-10 18:36+0000\n"
"POT-Creation-Date: 2023-10-22 18:36+0000\n"
"PO-Revision-Date: 2014-06-11 13:04+0000\n"
"Last-Translator: Jiří Podhorecký, 2023\n"
"Language-Team: Czech (http://app.transifex.com/open-edx/edx-platform/language/cs/)\n"
Expand Down
Binary file not shown.
Loading

0 comments on commit 92d7a92

Please sign in to comment.