Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@
/workbench.*
/dist
/templates
*.iml
Binary file modified doc/img/mrq-3.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
16 changes: 10 additions & 6 deletions problem_builder/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,15 @@ def student_view(self, context=None):
""" Normal view of this XBlock, identical to mentoring_view """
return self.mentoring_view(context)

def get_results(self, previous_response=None):
# Previous result is actually stored in database table-- ignore.
return {
'student_input': self.student_input,
'status': self.status,
'weight': self.weight,
'score': 1 if self.status == 'correct' else 0,
}

def submit(self, submission):
"""
The parent block is handling a student submission, including a new answer for this
Expand All @@ -187,12 +196,7 @@ def submit(self, submission):
self.student_input = submission[0]['value'].strip()
self.save()
log.info(u'Answer submitted for`{}`: "{}"'.format(self.name, self.student_input))
return {
'student_input': self.student_input,
'status': self.status,
'weight': self.weight,
'score': 1 if self.status == 'correct' else 0,
}
return self.get_results()

@property
def status(self):
Expand Down
18 changes: 13 additions & 5 deletions problem_builder/mcq.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,15 @@ def describe_choice_correctness(self, choice_value):
return self._(u"Wrong")
return self._(u"Not Acceptable")

def submit(self, submission):
log.debug(u'Received MCQ submission: "%s"', submission)

def calculate_results(self, submission):
correct = submission in self.correct_choices
tips_html = []
for tip in self.get_tips():
if submission in tip.values:
tips_html.append(tip.render('mentoring_view').content)

formatted_tips = None

if tips_html:
formatted_tips = loader.render_template('templates/html/tip_choice_group.html', {
'tips_html': tips_html,
Expand All @@ -94,13 +94,21 @@ def submit(self, submission):
# Also send to the submissions API:
sub_api.create_submission(self.student_item_key, submission)

result = {
return {
'submission': submission,
'status': 'correct' if correct else 'incorrect',
'tips': formatted_tips if tips_html else None,
'tips': formatted_tips,
'weight': self.weight,
'score': 1 if correct else 0,
}

def get_results(self, previous_result):
return self.calculate_results(previous_result['submission'])

def submit(self, submission):
log.debug(u'Received MCQ submission: "%s"', submission)
result = self.calculate_results(submission)
self.student_choice = submission
log.debug(u'MCQ submission result: %s', result)
return result

Expand Down
146 changes: 131 additions & 15 deletions problem_builder/mentoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
# Imports ###########################################################

import logging
import json

from collections import namedtuple

Expand Down Expand Up @@ -61,6 +62,10 @@ def _(text):

Score = namedtuple("Score", ["raw", "percentage", "correct", "incorrect", "partially_correct"])

CORRECT = 'correct'
INCORRECT = 'incorrect'
PARTIAL = 'partial'


@XBlock.needs("i18n")
@XBlock.wants('settings')
Expand Down Expand Up @@ -160,6 +165,11 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC
default=[],
scope=Scope.user_state
)
extended_feedback = Boolean(
help=_("Show extended feedback details when all attempts are used up."),
default=False,
Scope=Scope.content
)

# Global user state
next_step = String(
Expand All @@ -170,7 +180,7 @@ class MentoringBlock(XBlock, StepParentMixin, StudioEditableXBlockMixin, StudioC

editable_fields = (
'display_name', 'mode', 'followed_by', 'max_attempts', 'enforce_dependency',
'display_submit', 'weight',
'display_submit', 'weight', 'extended_feedback'
)
icon_class = 'problem'
has_score = True
Expand Down Expand Up @@ -201,17 +211,39 @@ def get_theme(self):
return xblock_settings[self.theme_key]
return _default_theme_config

def get_question_number(self, question_id):
"""
Get the step number of the question id
"""
for child_id in self.children:
question = self.runtime.get_block(child_id)
if isinstance(question, StepMixin) and (question.name == question_id):
return question.step_number
raise ValueError("Question ID in answer set not a step of this Mentoring Block!")

def answer_mapper(self, answer_status):
"""
Create a JSON-dumpable object with readable key names from a list of student answers.
"""
return [
{
'number': self.get_question_number(answer[0]),
'id': answer[0],
'details': answer[1],
} for answer in self.student_results if answer[1]['status'] == answer_status
]

@property
def score(self):
"""Compute the student score taking into account the weight of each step."""
weights = (float(self.runtime.get_block(step_id).weight) for step_id in self.steps)
total_child_weight = sum(weights)
if total_child_weight == 0:
return Score(0, 0, 0, 0, 0)
return Score(0, 0, [], [], [])
score = sum(r[1]['score'] * r[1]['weight'] for r in self.student_results) / total_child_weight
correct = sum(1 for r in self.student_results if r[1]['status'] == 'correct')
incorrect = sum(1 for r in self.student_results if r[1]['status'] == 'incorrect')
partially_correct = sum(1 for r in self.student_results if r[1]['status'] == 'partial')
correct = self.answer_mapper(CORRECT)
incorrect = self.answer_mapper(INCORRECT)
partially_correct = self.answer_mapper(PARTIAL)

return Score(score, int(round(score * 100)), correct, incorrect, partially_correct)

Expand Down Expand Up @@ -259,6 +291,7 @@ def student_view(self, context):
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html")
fragment.add_resource(loader.load_unicode('templates/html/mentoring_grade.html'), "text/html")
fragment.add_resource(loader.load_unicode('templates/html/mentoring_review_questions.html'), "text/html")

self.include_theme_files(fragment)
# Workbench doesn't have font awesome, so add it:
Expand Down Expand Up @@ -335,6 +368,87 @@ def publish_event(self, data, suffix=''):

return {'result': 'ok'}

def get_message(self, completed):
if self.max_attempts_reached:
return self.get_message_html('max_attempts_reached')
elif completed:
return self.get_message_html('completed')
else:
return self.get_message_html('incomplete')

@property
def assessment_message(self):
if not self.max_attempts_reached:
return self.get_message_html('on-assessment-review')
else:
return None

def show_extended_feedback(self):
return self.extended_feedback and self.max_attempts_reached

def feedback_dispatch(self, target_data, stringify):
if self.show_extended_feedback():
if stringify:
return json.dumps(target_data)
else:
return target_data

def correct_json(self, stringify=True):
return self.feedback_dispatch(self.score.correct, stringify)

def incorrect_json(self, stringify=True):
return self.feedback_dispatch(self.score.incorrect, stringify)

def partial_json(self, stringify=True):
return self.feedback_dispatch(self.score.partially_correct, stringify)

@XBlock.json_handler
def get_results(self, queries, suffix=''):
"""
Gets detailed results in the case of extended feedback.

It may be a good idea to eventually have this function get results
in the general case instead of loading them in the template in the future,
and only using it for extended feedback situations.

Right now there are two ways to get results-- through the template upon loading up
the mentoring block, or after submission of an AJAX request like in
submit or get_results here.
"""
results = []
if not self.show_extended_feedback():
return {
'results': [],
'error': 'Extended feedback results cannot be obtained.'
}
completed = True
choices = dict(self.student_results)
step = self.step
# Only one child should ever be of concern with this method.
for child_id in self.steps:
child = self.runtime.get_block(child_id)
if child.name and child.name in queries:
results = [child.name, child.get_results(choices[child.name])]
# Children may have their own definition of 'completed' which can vary from the general case
# of the whole mentoring block being completed. This is because in standard mode, all children
# must be correct to complete the block. In assessment mode with extended feedback, completion
# happens when you're out of attempts, no matter how you did.
completed = choices[child.name]['status']
break

# The 'completed' message should always be shown in this case, since no more attempts are available.
message = self.get_message(True)

return {
'results': results,
'completed': completed,
'attempted': self.attempted,
'message': message,
'step': step,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
}

@XBlock.json_handler
def submit(self, submissions, suffix=''):
log.info(u'Received submissions: {}'.format(submissions))
Expand All @@ -354,12 +468,7 @@ def submit(self, submissions, suffix=''):
child.save()
completed = completed and (child_result['status'] == 'correct')

if self.max_attempts_reached:
message = self.get_message_html('max_attempts_reached')
elif completed:
message = self.get_message_html('completed')
else:
message = self.get_message_html('incomplete')
message = self.get_message(completed)

# Once it has been completed once, keep completion even if user changes values
if self.completed:
Expand Down Expand Up @@ -402,7 +511,7 @@ def submit(self, submissions, suffix=''):
})

return {
'submitResults': submit_results,
'results': submit_results,
'completed': self.completed,
'attempted': self.attempted,
'message': message,
Expand All @@ -416,6 +525,7 @@ def handle_assessment_submit(self, submissions, suffix):
children = [self.runtime.get_block(child_id) for child_id in self.children]
children = [child for child in children if not isinstance(child, MentoringMessageBlock)]
steps = [child for child in children if isinstance(child, StepMixin)] # Faster than the self.steps property
assessment_message = None

for child in children:
if child.name and child.name in submissions:
Expand Down Expand Up @@ -451,6 +561,7 @@ def handle_assessment_submit(self, submissions, suffix):
'score_type': 'proficiency',
})
event_data['final_grade'] = score.raw
assessment_message = self.assessment_message

self.num_attempts += 1
self.completed = True
Expand All @@ -468,9 +579,14 @@ def handle_assessment_submit(self, submissions, suffix):
'num_attempts': self.num_attempts,
'step': self.step,
'score': score.percentage,
'correct_answer': score.correct,
'incorrect_answer': score.incorrect,
'partially_correct_answer': score.partially_correct,
'correct_answer': len(score.correct),
'incorrect_answer': len(score.incorrect),
'partially_correct_answer': len(score.partially_correct),
'correct': self.correct_json(stringify=False),
'incorrect': self.incorrect_json(stringify=False),
'partial': self.partial_json(stringify=False),
'extended_feedback': self.show_extended_feedback() or '',
'assessment_message': assessment_message,
}

@XBlock.json_handler
Expand Down
3 changes: 3 additions & 0 deletions problem_builder/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ class MentoringMessageBlock(XBlock, StudioEditableXBlockMixin):
{"display_name": "Completed", "value": "completed"},
{"display_name": "Incompleted", "value": "incomplete"},
{"display_name": "Reached max. # of attemps", "value": "max_attempts_reached"},
{"display_name": "Review with attempts left", "value": "on-assessment-review"}
),
)
editable_fields = ("content", )
Expand Down Expand Up @@ -84,6 +85,8 @@ def display_name_with_default(self):
return self._(u"Message shown when complete")
if self.type == 'incomplete':
return self._(u"Message shown when incomplete")
if self.type == 'on-assessment-review':
return self._(u"Message shown during review when attempts remain")
return u"INVALID MESSAGE"

@classmethod
Expand Down
27 changes: 18 additions & 9 deletions problem_builder/mrq.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,25 @@ def describe_choice_correctness(self, choice_value):
return self._(u"Ignored")
return self._(u"Not Acceptable")

def get_results(self, previous_result):
"""
Get the results a student has already submitted.
"""
result = self.calculate_results(previous_result['submissions'])
result['completed'] = True
return result

def submit(self, submissions):
log.debug(u'Received MRQ submissions: "%s"', submissions)

score = 0
result = self.calculate_results(submissions)
self.student_choices = submissions

log.debug(u'MRQ submissions result: %s', result)
return result

def calculate_results(self, submissions):
score = 0
results = []
for choice in self.custom_choices:
choice_completed = True
Expand All @@ -106,22 +120,20 @@ def submit(self, submissions):
choice_result = {
'value': choice.value,
'selected': choice_selected,
}
}
# Only include tips/results in returned response if we want to display them
if not self.hide_results:
loader = ResourceLoader(__name__)
choice_result['completed'] = choice_completed
choice_result['tips'] = loader.render_template('templates/html/tip_choice_group.html', {
'tips_html': choice_tips_html,
})
})

results.append(choice_result)

self.student_choices = submissions

status = 'incorrect' if score <= 0 else 'correct' if score >= len(results) else 'partial'

result = {
return {
'submissions': submissions,
'status': status,
'choices': results,
Expand All @@ -130,9 +142,6 @@ def submit(self, submissions):
'score': (float(score) / len(results)) if results else 0,
}

log.debug(u'MRQ submissions result: %s', result)
return result

def validate_field_data(self, validation, data):
"""
Validate this block's field data.
Expand Down
Loading