diff --git a/common/lib/xmodule/xmodule/combined_open_ended_module.py b/common/lib/xmodule/xmodule/combined_open_ended_module.py
index 3211bef9d545..a8d4afaa11f0 100644
--- a/common/lib/xmodule/xmodule/combined_open_ended_module.py
+++ b/common/lib/xmodule/xmodule/combined_open_ended_module.py
@@ -4,7 +4,7 @@
from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
-from .x_module import XModule
+from .x_module import XModule, module_attr
from xblock.fields import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from collections import namedtuple
@@ -510,3 +510,7 @@ def non_editable_metadata_fields(self):
non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod,
CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version, CombinedOpenEndedDescriptor.track_changes])
return non_editable_fields
+
+ # Proxy to CombinedOpenEndedModule so that external callers don't have to know if they're working
+ # with a module or a descriptor
+ child_module = module_attr('child_module')
diff --git a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
index f8fa0ffb4bcf..d23edebf0f3b 100644
--- a/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
+++ b/common/lib/xmodule/xmodule/open_ended_grading_classes/combined_open_ended_modulev1.py
@@ -363,6 +363,32 @@ def fix_invalid_state(self):
last_completed_child = next((i for i, child in reversed(list(enumerate(children))) if child['child_state'] == self.DONE), 0)
self.current_task_number = min(last_completed_child + 1, len(best_task_states) - 1)
+ def create_task(self, task_state, task_xml):
+ """Create task object for given task state and task xml."""
+
+ tag_name = self.get_tag_name(task_xml)
+ children = self.child_modules()
+ task_descriptor = children['descriptors'][tag_name](self.system)
+ task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)
+ task = children['modules'][tag_name](
+ self.system,
+ self.location,
+ task_parsed_xml,
+ task_descriptor,
+ self.static_data,
+ instance_state=task_state,
+ )
+ return task
+
+ def get_task_number(self, task_number):
+ """Return task object at task_index."""
+
+ task_states_count = len(self.task_states)
+ if task_states_count > 0 and task_number < task_states_count:
+ task_state = self.task_states[task_number]
+ task_xml = self.task_xml[task_number]
+ return self.create_task(task_state, task_xml)
+ return None
def reset_task_state(self, message=""):
"""
diff --git a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
index 6498c82c4ab3..1d6493bc79ed 100644
--- a/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
@@ -18,6 +18,7 @@
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
+from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
@@ -500,6 +501,27 @@ def test_get_last_response(self):
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)
+ def test_create_task(self):
+ combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
+
+ first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
+ self.assertIsInstance(first_task, SelfAssessmentModule)
+
+ second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
+ self.assertIsInstance(second_task, OpenEndedModule)
+
+ def test_get_task_number(self):
+ combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
+
+ first_task = combinedoe.get_task_number(0)
+ self.assertIsInstance(first_task, SelfAssessmentModule)
+
+ second_task = combinedoe.get_task_number(1)
+ self.assertIsInstance(second_task, OpenEndedModule)
+
+ third_task = combinedoe.get_task_number(2)
+ self.assertIsNone(third_task)
+
def test_update_task_states(self):
"""
See if we can update the task states properly
diff --git a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py
index dac7a2b697e4..3f6b07fa0f9a 100644
--- a/common/lib/xmodule/xmodule/tests/test_util_open_ended.py
+++ b/common/lib/xmodule/xmodule/tests/test_util_open_ended.py
@@ -774,6 +774,153 @@ def serialize_open_ended_instance_state(json_str):
}
""")
+
+# State Initial
+
+STATE_INITIAL = serialize_open_ended_instance_state("""
+{
+ "ready_to_reset": false,
+ "skip_spelling_checks": false,
+ "current_task_number": 0,
+ "old_task_states": [],
+ "weight": 1,
+ "task_states": [
+ {
+ "child_attempts" : 1,
+ "child_created" : false,
+ "child_history" : [],
+ "child_state" : "done",
+ "max_score" : 3,
+ "version" : 1
+ },
+ {
+ "child_created": false,
+ "child_attempts": 0,
+ "stored_answer": "A stored answer.",
+ "version": 1,
+ "child_history": [],
+ "max_score": 3,
+ "child_state": "initial"
+ }
+ ],
+ "graded": true,
+ "student_attempts": 0,
+ "required_peer_grading": 3,
+ "state": "initial",
+ "accept_file_upload": false,
+ "min_to_calibrate": 3,
+ "max_to_calibrate": 6,
+ "display_name": "Open Response Assessment",
+ "peer_grader_count": 3,
+ "max_attempts": 1
+}""")
+
+STATE_ACCESSING = serialize_open_ended_instance_state("""
+{
+ "ready_to_reset": false,
+ "skip_spelling_checks": false,
+ "current_task_number": 0,
+ "old_task_states": [],
+ "weight": 1,
+ "task_states": [
+ {
+ "child_attempts" : 1,
+ "child_created" : false,
+ "child_history": [
+ {
+ "answer": "Here is an answer."
+ }
+ ],
+ "child_state" : "done",
+ "max_score" : 3,
+ "version" : 1
+ },
+ {
+ "child_created": false,
+ "child_attempts": 0,
+ "stored_answer": null,
+ "version": 1,
+ "child_history": [
+ {
+ "answer": "Here is an answer."
+ }
+ ],
+ "max_score": 3,
+ "child_state": "assessing"
+ }
+ ],
+ "graded": true,
+ "student_attempts": 0,
+ "required_peer_grading": 3,
+ "state": "assessing",
+ "accept_file_upload": false,
+ "min_to_calibrate": 3,
+ "max_to_calibrate": 6,
+ "display_name": "Open Response Assessment",
+ "peer_grader_count": 3,
+ "max_attempts": 1
+}""")
+
+STATE_POST_ASSESSMENT = serialize_open_ended_instance_state("""
+{
+ "ready_to_reset": false,
+ "skip_spelling_checks": false,
+ "current_task_number": 0,
+ "old_task_states": [],
+ "weight": 1,
+ "task_states": [
+ {
+ "child_attempts" : 1,
+ "child_created" : false,
+ "child_history": [
+ {
+ "answer": "Here is an answer."
+ }
+ ],
+ "child_state" : "done",
+ "max_score" : 3,
+ "version" : 1
+ },
+ {
+ "child_created": false,
+ "child_attempts": 0,
+ "stored_answer": null,
+ "version": 1,
+ "child_history": [
+ {
+ "answer": "Here is an answer."
+ }
+ ],
+ "max_score": 3,
+ "post_assessment": {
+ "feedback" : {
+ "grammar" : "Grammar: Ok.",
+ "markup-text" : "valid essay",
+ "spelling" : "Spelling: Ok."
+ },
+ "grader_id" : 3237,
+ "grader_type" : "ML",
+ "rubric_scores_complete" : true,
+ "rubric_xml" : "Response Quality3",
+ "score" : 2,
+ "submission_id" : 3099,
+ "success" : true
+ },
+ "child_state": "post_assessment"
+ }
+ ],
+ "graded": true,
+ "student_attempts": 0,
+ "required_peer_grading": 3,
+ "state": "done",
+ "accept_file_upload": false,
+ "min_to_calibrate": 3,
+ "max_to_calibrate": 6,
+ "display_name": "Open Response Assessment",
+ "peer_grader_count": 3,
+ "max_attempts": 1
+}""")
+
# Task state with self assessment only.
TEST_STATE_SA = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r 'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r 'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r