Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion common/lib/xmodule/xmodule/combined_open_ended_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pkg_resources import resource_string

from xmodule.raw_module import RawDescriptor
from .x_module import XModule
from .x_module import XModule, module_attr
from xblock.fields import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from collections import namedtuple
Expand Down Expand Up @@ -510,3 +510,7 @@ def non_editable_metadata_fields(self):
non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod,
CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version, CombinedOpenEndedDescriptor.track_changes])
return non_editable_fields

# Proxy to CombinedOpenEndedModule so that external callers don't have to know if they're working
# with a module or a descriptor
child_module = module_attr('child_module')
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,32 @@ def fix_invalid_state(self):
last_completed_child = next((i for i, child in reversed(list(enumerate(children))) if child['child_state'] == self.DONE), 0)
self.current_task_number = min(last_completed_child + 1, len(best_task_states) - 1)

def create_task(self, task_state, task_xml):
"""Create task object for given task state and task xml."""

tag_name = self.get_tag_name(task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=task_state,
)
return task

def get_task_number(self, task_number):
"""Return task object at task_index."""

task_states_count = len(self.task_states)
if task_states_count > 0 and task_number < task_states_count:
task_state = self.task_states[task_number]
task_xml = self.task_xml[task_number]
return self.create_task(task_state, task_xml)
return None

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@cpennington I have replaced get_current_task with get_task_at_index(task_index) and task_index can be specified as an argument to the commands. This will ensure that for multi-step problems the commands only operate on the intended step.

def reset_task_state(self, message=""):
"""
Expand Down
22 changes: 22 additions & 0 deletions common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
Expand Down Expand Up @@ -500,6 +501,27 @@ def test_get_last_response(self):
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)

def test_create_task(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])

first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
self.assertIsInstance(first_task, SelfAssessmentModule)

second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
self.assertIsInstance(second_task, OpenEndedModule)

def test_get_task_number(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])

first_task = combinedoe.get_task_number(0)
self.assertIsInstance(first_task, SelfAssessmentModule)

second_task = combinedoe.get_task_number(1)
self.assertIsInstance(second_task, OpenEndedModule)

third_task = combinedoe.get_task_number(2)
self.assertIsNone(third_task)

def test_update_task_states(self):
"""
See if we can update the task states properly
Expand Down
147 changes: 147 additions & 0 deletions common/lib/xmodule/xmodule/tests/test_util_open_ended.py
Original file line number Diff line number Diff line change
Expand Up @@ -774,6 +774,153 @@ def serialize_open_ended_instance_state(json_str):
}
""")


# State Initial

STATE_INITIAL = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history" : [],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": "A stored answer.",
"version": 1,
"child_history": [],
"max_score": 3,
"child_state": "initial"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "initial",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")

STATE_ACCESSING = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history": [
{
"answer": "Here is an answer."
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": null,
"version": 1,
"child_history": [
{
"answer": "Here is an answer."
}
],
"max_score": 3,
"child_state": "assessing"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "assessing",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")

STATE_POST_ASSESSMENT = serialize_open_ended_instance_state("""
{
"ready_to_reset": false,
"skip_spelling_checks": false,
"current_task_number": 0,
"old_task_states": [],
"weight": 1,
"task_states": [
{
"child_attempts" : 1,
"child_created" : false,
"child_history": [
{
"answer": "Here is an answer."
}
],
"child_state" : "done",
"max_score" : 3,
"version" : 1
},
{
"child_created": false,
"child_attempts": 0,
"stored_answer": null,
"version": 1,
"child_history": [
{
"answer": "Here is an answer."
}
],
"max_score": 3,
"post_assessment": {
"feedback" : {
"grammar" : "Grammar: Ok.",
"markup-text" : "valid essay",
"spelling" : "Spelling: Ok."
},
"grader_id" : 3237,
"grader_type" : "ML",
"rubric_scores_complete" : true,
"rubric_xml" : "<rubric><category><description>Response Quality</description><score>3</score><option points='0'>Category one description.</option><option points='1'>Category two description.</option><option points='2'>Category three description.</option><option points='3'>Category four description.</option></category></rubric>",
"score" : 2,
"submission_id" : 3099,
"success" : true
},
"child_state": "post_assessment"
}
],
"graded": true,
"student_attempts": 0,
"required_peer_grading": 3,
"state": "done",
"accept_file_upload": false,
"min_to_calibrate": 3,
"max_to_calibrate": 6,
"display_name": "Open Response Assessment",
"peer_grader_count": 3,
"max_attempts": 1
}""")

# Task state with self assessment only.
TEST_STATE_SA = ["{\"child_created\": false, \"child_attempts\": 1, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"[3, 3, 2, 2, 2]\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"done\"}", "{\"child_created\": false, \"child_attempts\": 0, \"version\": 1, \"child_history\": [{\"answer\": \"Censorship in the Libraries\\r<br>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author\\r<br><br>Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.\", \"post_assessment\": \"{\\\"submission_id\\\": 1461, \\\"score\\\": 12, \\\"feedback\\\": \\\"{\\\\\\\"feedback\\\\\\\": \\\\\\\"\\\\\\\"}\\\", \\\"success\\\": true, \\\"grader_id\\\": 5414, \\\"grader_type\\\": \\\"IN\\\", \\\"rubric_scores_complete\\\": true, \\\"rubric_xml\\\": \\\"<rubric><category><description>\\\\nIdeas\\\\n</description><score>3</score><option points='0'>\\\\nDifficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.\\\\n</option><option points='1'>\\\\nAttempts a main idea. Sometimes loses focus or ineffectively displays focus.\\\\n</option><option points='2'>\\\\nPresents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.\\\\n</option><option points='3'>\\\\nPresents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.\\\\n</option></category><category><description>\\\\nContent\\\\n</description><score>3</score><option points='0'>\\\\nIncludes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.\\\\n</option><option points='1'>\\\\nIncludes little information and few or no details. Explores only one or two facets of the topic.\\\\n</option><option points='2'>\\\\nIncludes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.\\\\n</option><option points='3'>\\\\nIncludes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.\\\\n</option></category><category><description>\\\\nOrganization\\\\n</description><score>2</score><option points='0'>\\\\nIdeas organized illogically, transitions weak, and response difficult to follow.\\\\n</option><option points='1'>\\\\nAttempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.\\\\n</option><option points='2'>\\\\nIdeas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.\\\\n</option></category><category><description>\\\\nStyle\\\\n</description><score>2</score><option points='0'>\\\\nContains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.\\\\n</option><option points='1'>\\\\nContains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).\\\\n</option><option points='2'>\\\\nIncludes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.\\\\n</option></category><category><description>\\\\nVoice\\\\n</description><score>2</score><option points='0'>\\\\nDemonstrates language and tone that may be inappropriate to task and reader.\\\\n</option><option points='1'>\\\\nDemonstrates an attempt to adjust language and tone to task and reader.\\\\n</option><option points='2'>\\\\nDemonstrates effective adjustment of language and tone to task and reader.\\\\n</option></category></rubric>\\\"}\", \"score\": 12}], \"max_score\": 12, \"child_state\": \"post_assessment\"}"]

Expand Down
12 changes: 2 additions & 10 deletions lms/djangoapps/instructor/management/commands/dump_grades.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from xmodule.modulestore.django import modulestore

from django.core.management.base import BaseCommand
from instructor.utils import DummyRequest


class Command(BaseCommand):
Expand Down Expand Up @@ -37,7 +38,7 @@ def handle(self, *args, **options):
if len(args) > 2:
get_raw_scores = args[2].lower() == 'raw'

request = self.DummyRequest()
request = DummyRequest()
try:
course = get_course_by_id(course_id)
except Exception:
Expand All @@ -63,12 +64,3 @@ def handle(self, *args, **options):

fp.close()
print "Done: %d records dumped" % len(datatable['data'])

class DummyRequest(object):
META = {}
def __init__(self):
return
def get_host(self):
return 'edx.mit.edu'
def is_secure(self):
return False
106 changes: 106 additions & 0 deletions lms/djangoapps/instructor/management/commands/openended_post.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
"""
Command to manually re-post open ended submissions to the grader.
"""
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from optparse import make_option

from xmodule.modulestore.django import modulestore
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule

from courseware.courses import get_course

from instructor.utils import get_module_for_student


class Command(BaseCommand):
"""
Command to manually re-post open ended submissions to the grader.
"""

help = ("Usage: openended_post <course_id> <problem_location> <student_ids.txt> --dry-run --task-number=<task_number>\n"
"The text file should contain a User.id in each line.")

option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except send the submission to the grader. "),
make_option('--task-number',
type='int', default=0,
help="Task number that needs to be submitted."),
)

def handle(self, *args, **options):

dry_run = options['dry_run']
task_number = options['task_number']

if len(args) == 3:
course_id = args[0]
location = args[1]
students_ids = [line.strip() for line in open(args[2])]
else:
print self.help
return

try:
course = get_course(course_id)
except ValueError as err:
print err
return

descriptor = modulestore().get_instance(course.id, location, depth=0)
if descriptor is None:
print "Location not found in course"
return

if dry_run:
print "Doing a dry run."

students = User.objects.filter(id__in=students_ids).order_by('username')
print "Number of students: {0}".format(students.count())

for student in students:
post_submission_for_student(student, course, location, task_number, dry_run=dry_run)


def post_submission_for_student(student, course, location, task_number, dry_run=True):
"""If the student's task child_state is ASSESSING post submission to grader."""

print "{0}:{1}".format(student.id, student.username)
try:
module = get_module_for_student(student, course, location)
if module is None:
print " WARNING: No state found."
return False

latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " WARNING: No task state found."
return False

if not isinstance(latest_task, OpenEndedModule):
print " ERROR: Not an OpenEndedModule task."
return False

latest_task_state = latest_task.child_state

if latest_task_state == OpenEndedChild.INITIAL:
print " WARNING: No submission."
elif latest_task_state == OpenEndedChild.POST_ASSESSMENT or latest_task_state == OpenEndedChild.DONE:
print " WARNING: Submission already graded."
elif latest_task_state == OpenEndedChild.ASSESSING:
latest_answer = latest_task.latest_answer()
if dry_run:
print " Skipped sending submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
else:
latest_task.send_to_grader(latest_answer, latest_task.system)
print " Sent submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
return True
else:
print "WARNING: Invalid task_state: {0}".format(latest_task_state)
except Exception as err: # pylint: disable=broad-except
print err

return False
Loading