diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a37508641233..35dfc8ac5bc4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -112,6 +112,22 @@ client error are correctly passed through to the client. LMS: Improve performance of page load and thread list load for discussion tab +Studio: Support targeted feedback, which allows for authors to provide explanations for +incorrect choice selections for multiple choice question choices that will automatically +display. These are intended to help steer a student to the correct answer. Thus, they are +best used for quizzes that allow multiple attempts. To provide targeted feedback, add an +element called right before your or , and in +this element, provide a for each feedback. Within +you can specify your text explanation. Both the and should have +the same explanation-id attribute. + +Studio: Added feature to allow instructors to specify wait time between attempts +of the same quiz. In a problem's settings, instructors can specify how many +seconds student's are locked out of submitting another attempt of the same quiz. +The timer starts as soon as they submit an attempt for grading. Note that this +does not prevent a student from starting to work on another quiz attempt. It only +prevents the students from submitting a bunch of attempts in rapid succession. + LMS: The wiki markup cheatsheet dialog is now accessible to screen readers. (LMS-1303) @@ -121,6 +137,18 @@ Studio: Change course overview page, checklists, assets, import, export, and cou management page URLs to a RESTful interface. Also removed "\listing", which duplicated "\index". +Studio: Support answer pools for multiple choice question choices, so authors can provide +multiple incorrect and correct choices for a question and have 1 correct choice and n-1 +incorrect choices randomly selected and shuffled before being presented to the student. +In XML: enables an answer pool of 4 choices: 3 +correct choices and 1 incorrect choice. To provide multiple solution expanations, wrap +all solution elements within a , and make sure to add an attribute called +"explanation-id" to both the tag and its corresponding tag, and be +sure that the value for this "explanation-id" attribute is the same for both. Note that +this feature is only supported in the advanced XML problem editor, not the regular one. +Also note that if you want your question to have a different set of answers for different +attempts, be sure in the problem settings in Studio to set "Randomization" to "Always" + LMS: Fixed accessibility bug where users could not tab through wiki (LMS-1307) Blades: When start time and end time are specified for a video, a visual range diff --git a/cms/djangoapps/contentstore/features/problem-editor.py b/cms/djangoapps/contentstore/features/problem-editor.py index 2265b5010e4c..a68e6d1f73e3 100644 --- a/cms/djangoapps/contentstore/features/problem-editor.py +++ b/cms/djangoapps/contentstore/features/problem-editor.py @@ -14,7 +14,7 @@ PROBLEM_WEIGHT = "Problem Weight" RANDOMIZATION = 'Randomization' SHOW_ANSWER = "Show Answer" - +TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts" @step('I have created a Blank Common Problem$') def i_created_blank_common_problem(step): @@ -45,6 +45,7 @@ def i_see_advanced_settings_with_values(step): [PROBLEM_WEIGHT, "", False], [RANDOMIZATION, "Never", False], [SHOW_ANSWER, "Finished", False], + [TIMER_BETWEEN_ATTEMPTS, "0", False] ]) diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 08a223f609b8..f728918bac8b 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -34,6 +34,8 @@ from pytz import UTC +from random import Random + # dict of tagname, Response Class -- this should come from auto-registering response_tag_dict = dict([(x.response_tag, x) for x in responsetypes.__all__]) @@ -381,11 +383,198 @@ def get_answer_ids(self): answer_ids.append(results.keys()) return answer_ids + def sample_from_answer_pool(self, choices, rnd, num_pool): + """ + Takes in: + 1. list of choices + 2. random number generator + 3. the requested size "answer-pool" number, in effect a max + + Returns a tuple with 2 items: + 1. the solution_id corresponding with the chosen correct answer + 2. (subset) list of choice nodes with num-1 incorrect and 1 correct + """ + + correct_choices = [] + incorrect_choices = [] + + for choice in choices: + if choice.get('correct') == 'true': + correct_choices.append(choice) + else: + incorrect_choices.append(choice) + # TODO: check if we should require correct == "false" + + # We throw an error if the problem is highly ill-formed. + # There must be at least one correct and one incorrect choice. + # TODO: perhaps this makes more sense for *all* problems, not just down in this corner. + if len(correct_choices) < 1 or len(incorrect_choices) < 1: + raise responsetypes.LoncapaProblemError("Choicegroup must include at last 1 correct and 1 incorrect choice") + + # Limit the number of incorrect choices to what we actually have + num_incorrect = num_pool - 1 + num_incorrect = min(num_incorrect, len(incorrect_choices)) + + # Select the one correct choice + index = rnd.randint(0, len(correct_choices) - 1) + correct_choice = correct_choices[index] + solution_id = correct_choice.get('explanation-id') + + # Put together the result, pushing most of the work onto rnd.shuffle() + subset_choices = [correct_choice] + rnd.shuffle(incorrect_choices) + subset_choices += incorrect_choices[:num_incorrect] + rnd.shuffle(subset_choices) + + return (solution_id, subset_choices) + + def do_answer_pool(self, tree): + """ + Implements the answer-pool subsetting operation in-place on the tree. + Allows for problem questions with a pool of answers, from which answer options shown to the student + and randomly selected so that there is always 1 correct answer and n-1 incorrect answers, + where the author specifies n as the value of the attribute "answer-pool" within + + The tag must have an attribute 'answer-pool' giving the desired + pool size. If that attribute is zero or not present, no operation is performed. + Calling this a second time does nothing. + """ + + # If called a second time, don't do anything, since it's in-place destructive + if hasattr(self, 'answerpool_done'): + return + self.answerpool_done = True + + choicegroups = tree.xpath("//choicegroup[@answer-pool]") + + # Uses self.seed -- but want to randomize every time reaches this problem, + # so problem's "randomization" should be set to "always" + rnd = Random(self.seed) + + for choicegroup in choicegroups: + num_str = choicegroup.get('answer-pool') + try: + num_choices = int(num_str) + except ValueError: + raise responsetypes.LoncapaProblemError("answer-pool value should be an integer") + # choices == 0 disables the feature + if num_choices == 0: + break + + choices_list = list(choicegroup.getchildren()) + + # Remove all choices in the choices_list (we will add some back in later) + for choice in choices_list: + choicegroup.remove(choice) + + # Sample from the answer pool to get the subset choices and solution id + (solution_id, subset_choices) = self.sample_from_answer_pool(choices_list, rnd, num_choices) + + # Add back in randomly selected choices + for choice in subset_choices: + choicegroup.append(choice) + + # Filter out solutions that don't correspond to the correct answer we selected to show + # Note that this means that if the user simply provides a tag, nothing is filtered + solutionset = choicegroup.xpath('../following-sibling::solutionset') + if len(solutionset) != 0: + solutionset = solutionset[0] + solutions = solutionset.xpath('./solution') + for solution in solutions: + if solution.get('explanation-id') != solution_id: + solutionset.remove(solution) + + def do_targeted_feedback(self, tree): + """ +Allows for problem questions to show targeted feedback, which are choice-level explanations. +Targeted feedback is automatically visible after a student has submitted their answers. + +The tag must have an attribute 'targeted-feedback': +- if so, this method will modify the tree +- if not, this method will not modify the tree +- if the value is 'alwaysShowCorrectChoiceExplanation', then the correct-choice +explanation will be automatically visible too after a student has submitted answers + +Note if the value is 'alwaysShowCorrectChoiceExplanation', you probably want to set +the "Show Answer" setting to "Never" because now there's no need for a "Show Answer" +button because no solution will show up if you were to click the "Show Answer" button +""" + + # If called a second time, don't do anything, since it's in-place destructive + if hasattr(self, 'targeted_done'): + return + self.targeted_done = True + + for mult_choice_response in tree.xpath('//multiplechoiceresponse[@targeted-feedback]'): + show_explanation = mult_choice_response.get('targeted-feedback') == 'alwaysShowCorrectChoiceExplanation' + + # Grab the first choicegroup (there should only be one within each tag) + choicegroup = mult_choice_response.xpath('./choicegroup[@type="MultipleChoice"]')[0] + choices_list = list(choicegroup.iter('choice')) + + # Find the student answer key that matches our id + student_answer = self.student_answers.get(choicegroup.get('id')) + expl_id_for_student_answer = None + + # Keep track of the explanation-id that corresponds to the student's answer + # Also, keep track of the solution-id + solution_id = None + for choice in choices_list: + if choice.get('name') == student_answer: + expl_id_for_student_answer = choice.get('explanation-id') + if choice.get('correct') == 'true': + solution_id = choice.get('explanation-id') + + # Filter out targetedfeedback that doesn't correspond to the answer the student selected + # Note: following-sibling will grab all following siblings, so we just want the first in the list + targetedfeedbackset = mult_choice_response.xpath('./following-sibling::targetedfeedbackset') + if len(targetedfeedbackset) != 0: + targetedfeedbackset = targetedfeedbackset[0] + targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback') + for targetedfeedback in targetedfeedbacks: + # Don't show targeted feedback if the student hasn't answer the problem + # or if the target feedback doesn't match the student's (incorrect) answer + if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer: + targetedfeedbackset.remove(targetedfeedback) + + # Do not displace the solution under these circumstances + if not show_explanation or not self.done: + continue + + # The next element should either be or + next_element = targetedfeedbackset.getnext() + parent_element = tree + solution_element = None + if next_element.tag == 'solution': + solution_element = next_element + elif next_element.tag == 'solutionset': + solutions = next_element.xpath('./solution') + for solution in solutions: + if solution.get('explanation-id') == solution_id: + parent_element = next_element + solution_element = solution + + # If could not find the solution element, then skip the remaining steps below + if solution_element is None: + continue + + # Change our correct-choice explanation from a "solution explanation" to within + # the set of targeted feedback, which means the explanation will render on the page + # without the student clicking "Show Answer" or seeing a checkmark next to the correct choice + parent_element.remove(solution_element) + + # Add our solution instead to the targetedfeedbackset and change its tag name + solution_element.tag = 'targetedfeedback' + targetedfeedbackset.append(solution_element) + def get_html(self): ''' Main method called externally to get the HTML to be rendered for this capa Problem. ''' + self.do_answer_pool(self.tree) + self.do_targeted_feedback(self.tree) html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) + return html def handle_input_ajax(self, data): @@ -562,8 +751,7 @@ def _extract_html(self, problemtree): # private # other than to examine .tag to see if it's a string. :( return - if (problemtree.tag == 'script' and problemtree.get('type') - and 'javascript' in problemtree.get('type')): + if (problemtree.tag == 'script' and problemtree.get('type') and 'javascript' in problemtree.get('type')): # leave javascript intact. return deepcopy(problemtree) diff --git a/common/lib/capa/capa/customrender.py b/common/lib/capa/capa/customrender.py index f7d586c9d55d..e27dc88a9fcc 100644 --- a/common/lib/capa/capa/customrender.py +++ b/common/lib/capa/capa/customrender.py @@ -98,3 +98,41 @@ def get_html(self): return etree.XML(html) registry.register(SolutionRenderer) + +#----------------------------------------------------------------------------- + + +class TargetedFeedbackRenderer(object): + ''' + A targeted feedback is just a ... that is used for displaying an + extended piece of feedback to students if they incorrectly answered a question. + ''' + tags = ['targetedfeedback'] + + def __init__(self, system, xml): + self.system = system + self.xml = xml + + def get_html(self): + """ + Return the contents of this tag, rendered to html, as an etree element. + """ + + html = '
%s
' % ( + etree.tostring(self.xml)) + try: + xhtml = etree.XML(html) + except Exception as err: + if self.system.DEBUG: + msg = '

Error %s

' % ( + str(err).replace('<', '<')) + msg += ('

Failed to construct targeted feedback from

%s

' % + html.replace('<', '<')) + msg += "
" + log.error(msg) + return etree.XML(msg) + else: + raise + return xhtml + +registry.register(TargetedFeedbackRenderer) diff --git a/common/lib/capa/capa/tests/test_answer_pool.py b/common/lib/capa/capa/tests/test_answer_pool.py new file mode 100644 index 000000000000..456ac74d56b6 --- /dev/null +++ b/common/lib/capa/capa/tests/test_answer_pool.py @@ -0,0 +1,559 @@ +""" +Tests the logic of the "answer-pool" attribute, e.g. + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ + + + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 723 + the_html = problem.get_html() + # [('choice_3', u'wrong-3'), ('choice_5', u'correct-2'), ('choice_1', u'wrong-2'), ('choice_4', u'wrong-4')] + self.assertRegexpMatches(the_html, r"
.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*
") + self.assertRegexpMatches(the_html, r"
\{.*'1_solution_2'.*\}
") + # Calling get_html multiple times should yield the same thing + the_html2 = problem.get_html() + self.assertEquals(the_html, the_html2) + + def test_answer_pool_4_choices_1_multiplechoiceresponse_seed2(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 9 + the_html = problem.get_html() + # [('choice_0', u'wrong-1'), ('choice_4', u'wrong-4'), ('choice_3', u'wrong-3'), ('choice_2', u'correct-1')] + self.assertRegexpMatches(the_html, r"
.*\[.*'wrong-1'.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*
") + self.assertRegexpMatches(the_html, r"
\{.*'1_solution_1'.*\}
") + + def test_no_answer_pool_4_choices_1_multiplechoiceresponse(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + the_html = problem.get_html() + self.assertRegexpMatches(the_html, r"
.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*
") + self.assertRegexpMatches(the_html, r"
\{.*'1_solution_1'.*'1_solution_2'.*\}
") + + def test_0_answer_pool_4_choices_1_multiplechoiceresponse(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + the_html = problem.get_html() + self.assertRegexpMatches(the_html, r"
.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*
") + self.assertRegexpMatches(the_html, r"
\{.*'1_solution_1'.*'1_solution_2'.*\}
") + + def test_invalid_answer_pool(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + with self.assertRaises(LoncapaProblemError): + the_html = problem.get_html() + + def test_answer_pool_5_choices_1_multiplechoiceresponse_seed1(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 723 + the_html = problem.get_html() + self.assertRegexpMatches(the_html, r"
.*\[.*'correct-2'.*'wrong-1'.*'wrong-2'.*.*'wrong-3'.*'wrong-4'.*\].*
") + self.assertRegexpMatches(the_html, r"
\{.*'1_solution_2'.*\}
") + + def test_answer_pool_2_multiplechoiceresponses_seed1(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 723 + the_html = problem.get_html() + + str1 = r"
.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*
" + str2 = r"
.*\[.*'wrong-2'.*'wrong-1'.*'correct-2'.*\].*
" + str3 = r"
\{.*'1_solution_2'.*\}
" + str4 = r"
\{.*'1_solution_4'.*\}
" + + self.assertRegexpMatches(the_html, str1) + self.assertRegexpMatches(the_html, str2) + self.assertRegexpMatches(the_html, str3) + self.assertRegexpMatches(the_html, str4) + + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2) + self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4) + + def test_answer_pool_2_multiplechoiceresponses_seed2(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 9 + the_html = problem.get_html() + + str1 = r"
.*\[.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*
" + str2 = r"
.*\[.*'wrong-2'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*
" + str3 = r"
\{.*'1_solution_1'.*\}
" + str4 = r"
\{.*'1_solution_4'.*\}
" + + self.assertRegexpMatches(the_html, str1) + self.assertRegexpMatches(the_html, str2) + self.assertRegexpMatches(the_html, str3) + self.assertRegexpMatches(the_html, str4) + + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2) + self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4) + + def test_answer_pool_and_no_answer_pool(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + + + + +
+

Explanation

+

This is the solution

+

Not much to explain here, sorry!

+
+
+ +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + + +
+

Explanation

+

This is the 1st solution

+

Not much to explain here, sorry!

+
+
+ + +
+

Explanation

+

This is the 2nd solution

+
+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 723 + the_html = problem.get_html() + + str1 = r"
.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*
" + str2 = r"
.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*
" + str3 = r"
\{.*'1_solution_1'.*\}
" + str4 = r"
\{.*'1_solution_3'.*\}
" + + self.assertRegexpMatches(the_html, str1) + self.assertRegexpMatches(the_html, str2) + self.assertRegexpMatches(the_html, str3) + self.assertRegexpMatches(the_html, str4) + + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2) + self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4) + + def test_answer_pool_without_solutionset(self): + xml_str = textwrap.dedent(""" + + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + wrong-4 + correct-2 + + + + +
+

Explanation

+

This is the solution

+

Not much to explain here, sorry!

+
+
+ +
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.seed = 723 + the_html = problem.get_html() + + self.assertRegexpMatches(the_html, r"
.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*
") + self.assertRegexpMatches(the_html, r"
\{.*'1_solution_1'.*\}
") diff --git a/common/lib/capa/capa/tests/test_targeted_feedback.py b/common/lib/capa/capa/tests/test_targeted_feedback.py new file mode 100644 index 000000000000..204d68baaeb6 --- /dev/null +++ b/common/lib/capa/capa/tests/test_targeted_feedback.py @@ -0,0 +1,617 @@ +""" +Tests the logic of the "targeted-feedback" attribute for MultipleChoice questions, +i.e. those with the element +""" + +import unittest +import textwrap +from . import test_system, new_loncapa_problem + + +class CapaTargetedFeedbackTest(unittest.TestCase): + ''' + Testing class + ''' + + def setUp(self): + super(CapaTargetedFeedbackTest, self).setUp() + self.system = test_system() + + def test_no_targeted_feedback(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r"
.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*
") + self.assertRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC") + + def test_targeted_feedback_not_finished(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r"
.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*
") + self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC") + # Check that calling it multiple times yields the same thing + the_html2 = problem.get_html() + self.assertEquals(the_html, the_html2) + + def test_targeted_feedback_student_answer1(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_3'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r".*3rd WRONG solution") + self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedbackC") + # Check that calling it multiple times yields the same thing + the_html2 = problem.get_html() + self.assertEquals(the_html, the_html2) + + def test_targeted_feedback_student_answer2(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_0'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + self.assertRegexpMatches(without_new_lines, r"
\{.*'1_solution_1'.*\}
") + self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC") + + def test_targeted_feedback_show_solution_explanation(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_0'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3") + # Check that calling it multiple times yields the same thing + the_html2 = problem.get_html() + self.assertEquals(the_html, the_html2) + + def test_targeted_feedback_no_show_solution_explanation(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_0'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + self.assertNotRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC") + + def test_targeted_feedback_with_solutionset_explanation(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + correct-2 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 2nd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ + +
+

Targeted Feedback

+

Feedback on the other solution...

+
+
+ +
+ + + +
+

Explanation

+

This is the other solution explanation

+

Not much to explain here, sorry!

+
+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_0'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3") + + def test_targeted_feedback_no_feedback_for_selected_choice1(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_1'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3") + + def test_targeted_feedback_no_feedback_for_selected_choice2(self): + xml_str = textwrap.dedent(""" + +

What is the correct answer?

+ + + wrong-1 + wrong-2 + correct-1 + wrong-3 + + + + + +
+

Targeted Feedback

+

This is the 1st WRONG solution

+
+
+ + +
+

Targeted Feedback

+

This is the 3rd WRONG solution

+
+
+ + +
+

Targeted Feedback

+

Feedback on your correct solution...

+
+
+ +
+ + + +
+

Explanation

+

This is the solution explanation

+

Not much to explain here, sorry!

+
+
+
+
+ + """) + + problem = new_loncapa_problem(xml_str) + problem.done = True + problem.student_answers = {'1_2_1': 'choice_1'} + + the_html = problem.get_html() + without_new_lines = the_html.replace("\n", "") + + self.assertNotRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3|feedbackC") diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index b78e2a4a5019..f48802a4dc0f 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -141,6 +141,14 @@ class CapaFields(object): student_answers = Dict(help="Dictionary with the current student responses", scope=Scope.user_state) done = Boolean(help="Whether the student has answered the problem", scope=Scope.user_state) seed = Integer(help="Random seed for this student", scope=Scope.user_state) + + last_submission_time = Date(help="Last submission time", scope=Scope.user_state) + submission_wait_seconds = Integer( + display_name="Timer Between Attempts", + help="Seconds a student must wait between submissions for a problem with multiple attempts.", + scope=Scope.settings, + default=0) + weight = Float( display_name="Problem Weight", help=("Defines the number of points each problem is worth. " @@ -303,6 +311,12 @@ def set_state_from_lcp(self): self.student_answers = lcp_state['student_answers'] self.seed = lcp_state['seed'] + def set_last_submission_time(self): + """ + Set the module's last submission time (when the problem was checked) + """ + self.last_submission_time = datetime.datetime.now(UTC()) + def get_score(self): """ Access the problem's score @@ -894,7 +908,7 @@ def publish_grade(self): return {'grade': score['score'], 'max_grade': score['total']} - def check_problem(self, data): + def check_problem(self, data, override_time=False): """ Checks whether answers to a problem are correct @@ -909,6 +923,11 @@ def check_problem(self, data): answers = self.make_dict_of_responses(data) event_info['answers'] = convert_files_to_filenames(answers) + # Can override current time + current_time = datetime.datetime.now(UTC()) + if override_time is not False: + current_time = override_time + # Too late. Cannot submit if self.closed(): event_info['failure'] = 'closed' @@ -923,23 +942,31 @@ def check_problem(self, data): # Problem queued. Students must wait a specified waittime before they are allowed to submit if self.lcp.is_queued(): - current_time = datetime.datetime.now(UTC()) prev_submit_time = self.lcp.get_recentmost_queuetime() + waittime_between_requests = self.system.xqueue['waittime'] if (current_time - prev_submit_time).total_seconds() < waittime_between_requests: msg = u'You must wait at least {wait} seconds between submissions'.format( wait=waittime_between_requests) return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback + # Wait time between resets + if self.last_submission_time is not None and self.submission_wait_seconds != 0: + if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds: + seconds_left = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds()) + msg = u'You must wait at least {w} between submissions. {s} remaining.'.format( + w=self.pretty_print_seconds(self.submission_wait_seconds), s=self.pretty_print_seconds(seconds_left)) + return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback + try: correct_map = self.lcp.grade_answers(answers) self.attempts = self.attempts + 1 self.lcp.done = True self.set_state_from_lcp() + self.set_last_submission_time() except (StudentInputError, ResponseError, LoncapaProblemError) as inst: - log.warning("StudentInputError in capa_module:problem_check", - exc_info=True) + log.warning("StudentInputError in capa_module:problem_check", exc_info=True) # Save the user's state before failing self.set_state_from_lcp() @@ -990,9 +1017,31 @@ def check_problem(self, data): # render problem into HTML html = self.get_problem_html(encapsulate=False) - return {'success': success, - 'contents': html, - } + return {'success': success, 'contents': html} + + def pretty_print_seconds(self, num_seconds): + """ + Returns time formatted nicely. + """ + if(num_seconds < 60): + plural = "s" if num_seconds > 1 else "" + return "%i second%s" % (num_seconds, plural) + elif(num_seconds < 60 * 60): + min_display = int(num_seconds / 60) + sec_display = num_seconds % 60 + plural = "s" if min_display > 1 else "" + if sec_display == 0: + return "%i minute%s" % (min_display, plural) + else: + return "%i min, %i sec" % (min_display, sec_display) + else: + hr_display = int(num_seconds / 3600) + min_display = int((num_seconds % 3600) / 60) + sec_display = num_seconds % 60 + if sec_display == 0: + return "%i hr, %i min" % (hr_display, min_display) + else: + return "%i hr, %i min, %i sec" % (hr_display, min_display, sec_display) def rescore_problem(self): """ diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss index ddf57e846d37..68dd76b701e1 100644 --- a/common/lib/xmodule/xmodule/css/capa/display.scss +++ b/common/lib/xmodule/xmodule/css/capa/display.scss @@ -126,6 +126,23 @@ section.problem { } } + .targeted-feedback-span { + > span { + margin: $baseline 0; + display: block; + border: 1px solid #000; + padding: 9px 15px $baseline; + background: #fff; + position: relative; + box-shadow: inset 0 0 0 1px #eee; + border-radius: 3px; + + &:empty { + display: none; + } + } + } + div { p { &.answer { @@ -628,6 +645,34 @@ section.problem { } } + .detailed-targeted-feedback { + > p:first-child { + color: red; + text-transform: uppercase; + font-weight: bold; + font-style: normal; + font-size: 0.9em; + } + + p:last-child { + margin-bottom: 0; + } + } + + .detailed-targeted-feedback-correct { + > p:first-child { + color: green; + text-transform: uppercase; + font-weight: bold; + font-style: normal; + font-size: 0.9em; + } + + p:last-child { + margin-bottom: 0; + } + } + div.capa_alert { margin-top: $baseline; padding: 8px 12px; diff --git a/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py new file mode 100644 index 000000000000..dadb513ad445 --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py @@ -0,0 +1,410 @@ +""" +Tests the logic of problems with a delay between attempt submissions. + +Note that this test file is based off of test_capa_module.py and as +such, uses the same CapaFactory problem setup to test the functionality +of the check_problem method of a capa module when the "delay between quiz +submissions" setting is set to different values +""" + +import unittest +import textwrap +import datetime + +from mock import Mock + +import xmodule +from xmodule.capa_module import CapaModule +from xmodule.modulestore import Location +from xblock.field_data import DictFieldData +from xblock.fields import ScopeIds + +from . import get_test_system +from pytz import UTC + + +class XModuleQuizAttemptsDelayTest(unittest.TestCase): + ''' + Actual class to test delay between quiz attempts + ''' + + def test_first_submission(self): + # Not attempted yet + num_attempts = 0 + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, last_submission_time=None) + + # Simulate problem is not completed yet + module.done = False + + # Expect that we can submit + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Successfully submitted and answered + # Also, the number of attempts should increment by 1 + self.assertEqual(result['success'], 'correct') + self.assertEqual(module.attempts, num_attempts + 1) + + def test_no_wait_time(self): + # Already attempted once (just now) and thus has a submitted time + num_attempts = 1 + last_submitted_time = datetime.datetime.now(UTC) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=0) + + # Simulate problem is not completed yet + module.done = False + + # Expect that we can submit + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # Successfully submitted and answered + # Also, the number of attempts should increment by 1 + self.assertEqual(result['success'], 'correct') + self.assertEqual(module.attempts, num_attempts + 1) + + def test_submit_quiz_in_rapid_succession(self): + # Already attempted once (just now) and thus has a submitted time + num_attempts = 1 + last_submitted_time = datetime.datetime.now(UTC) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=123) + + # Simulate problem is not completed yet + module.done = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict) + + # You should get a dialog that tells you to wait + # Also, the number of attempts should not be incremented + self.assertRegexpMatches(result['success'], r"You must wait at least.*") + self.assertEqual(module.attempts, num_attempts) + + def test_submit_quiz_too_soon(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 0, 18, 36) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=180) + + # Simulate problem is not completed yet + module.done = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # You should get a dialog that tells you to wait 2 minutes + # Also, the number of attempts should not be incremented + self.assertRegexpMatches(result['success'], r"You must wait at least 3 minutes between submissions. 2 minutes remaining\..*") + self.assertEqual(module.attempts, num_attempts) + + def test_submit_quiz_1_second_too_soon(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 0, 20, 35) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=180) + + # Simulate problem is not completed yet + module.done = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # You should get a dialog that tells you to wait 2 minutes + # Also, the number of attempts should not be incremented + self.assertRegexpMatches(result['success'], r"You must wait at least 3 minutes between submissions. 1 second remaining\..*") + self.assertEqual(module.attempts, num_attempts) + + def test_submit_quiz_as_soon_as_allowed(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 0, 20, 36) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=180) + + # Simulate problem is not completed yet + module.done = False + + # Expect that we can submit + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # Successfully submitted and answered + # Also, the number of attempts should increment by 1 + self.assertEqual(result['success'], 'correct') + self.assertEqual(module.attempts, num_attempts + 1) + + def test_submit_quiz_after_delay_expired(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 0, 24, 0) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=180) + + # Simulate problem is not completed yet + module.done = False + + # Expect that we can submit + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # Successfully submitted and answered + # Also, the number of attempts should increment by 1 + self.assertEqual(result['success'], 'correct') + self.assertEqual(module.attempts, num_attempts + 1) + + def test_still_cannot_submit_after_max_attempts(self): + # Already attempted once (just now) and thus has a submitted time + num_attempts = 99 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 0, 24, 0) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=180) + + # Simulate problem is not completed yet + module.done = False + + # Expect that we cannot submit + with self.assertRaises(xmodule.exceptions.NotFoundError): + get_request_dict = {CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict, considered_now) + + # Expect that number of attempts NOT incremented + self.assertEqual(module.attempts, num_attempts) + + def test_submit_quiz_with_long_delay(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 2, 15, 35) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=60 * 60 * 2) + + # Simulate problem is not completed yet + module.done = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # You should get a dialog that tells you to wait 2 minutes + # Also, the number of attempts should not be incremented + self.assertRegexpMatches(result['success'], r"You must wait at least 2 hr, 0 min between submissions. 2 min, 1 sec remaining\..*") + self.assertEqual(module.attempts, num_attempts) + + def test_submit_quiz_with_involved_pretty_print(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = datetime.datetime(2013, 12, 6, 1, 15, 40) + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=60 * 60 * 2 + 63) + + # Simulate problem is not completed yet + module.done = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # You should get a dialog that tells you to wait 2 minutes + # Also, the number of attempts should not be incremented + self.assertRegexpMatches(result['success'], r"You must wait at least 2 hr, 1 min, 3 sec between submissions. 1 hr, 2 min, 59 sec remaining\..*") + self.assertEqual(module.attempts, num_attempts) + + def test_submit_quiz_with_nonplural_pretty_print(self): + # Already attempted once (just now) + num_attempts = 1 + + # Specify two times + last_submitted_time = datetime.datetime(2013, 12, 6, 0, 17, 36) + considered_now = last_submitted_time + + # Many attempts remaining + module = CapaFactory.create(attempts=num_attempts, max_attempts=99, + last_submission_time=last_submitted_time, submission_wait_seconds=60) + + # Simulate problem is not completed yet + module.done = False + + # Check the problem + get_request_dict = {CapaFactory.input_key(): '3.14'} + result = module.check_problem(get_request_dict, considered_now) + + # You should get a dialog that tells you to wait 2 minutes + # Also, the number of attempts should not be incremented + self.assertRegexpMatches(result['success'], r"You must wait at least 1 minute between submissions. 1 minute remaining\..*") + self.assertEqual(module.attempts, num_attempts) + + +class CapaFactory(object): + """ + A helper class to create problem modules with various parameters for testing. + """ + + sample_problem_xml = textwrap.dedent("""\ + + + +

What is pi, to two decimal places?

+
+ + + +
+ """) + + num = 0 + + @classmethod + def next_num(cls): + """ + Return the next cls number + """ + cls.num += 1 + return cls.num + + @classmethod + def input_key(cls, input_num=2): + """ + Return the input key to use when passing GET parameters + """ + return ("input_" + cls.answer_key(input_num)) + + @classmethod + def answer_key(cls, input_num=2): + """ + Return the key stored in the capa problem answer dict + """ + return ( + "%s_%d_1" % ( + "-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]), + input_num, + ) + ) + + @classmethod + def create(cls, + graceperiod=None, + due=None, + max_attempts=None, + showanswer=None, + rerandomize=None, + force_save_button=None, + attempts=None, + problem_state=None, + correct=False, + done=None, + text_customization=None, + last_submission_time=None, + submission_wait_seconds=None + ): + """ + All parameters are optional, and are added to the created problem if specified. + + Arguments: + graceperiod: + due: + max_attempts: + showanswer: + force_save_button: + rerandomize: all strings, as specified in the policy for the problem + + problem_state: a dict to to be serialized into the instance_state of the + module. + + attempts: also added to instance state. Will be converted to an int. + """ + location = Location(["i4x", "edX", "capa_test", "problem", + "SampleProblem{0}".format(cls.next_num())]) + field_data = {'data': cls.sample_problem_xml} + + if graceperiod is not None: + field_data['graceperiod'] = graceperiod + if due is not None: + field_data['due'] = due + if max_attempts is not None: + field_data['max_attempts'] = max_attempts + if showanswer is not None: + field_data['showanswer'] = showanswer + if force_save_button is not None: + field_data['force_save_button'] = force_save_button + if rerandomize is not None: + field_data['rerandomize'] = rerandomize + if done is not None: + field_data['done'] = done + if text_customization is not None: + field_data['text_customization'] = text_customization + if last_submission_time is not None: + field_data['last_submission_time'] = last_submission_time + if submission_wait_seconds is not None: + field_data['submission_wait_seconds'] = submission_wait_seconds + + descriptor = Mock(weight="1") + if problem_state is not None: + field_data.update(problem_state) + if attempts is not None: + # converting to int here because I keep putting "0" and "1" in the tests + # since everything else is a string. + field_data['attempts'] = int(attempts) + + system = get_test_system() + system.render_template = Mock(return_value="
Test Template HTML
") + module = CapaModule( + descriptor, + system, + DictFieldData(field_data), + ScopeIds(None, None, location, location), + ) + + if correct: + # TODO: probably better to actually set the internal state properly, but... + module.get_score = lambda: {'score': 1, 'total': 1} + else: + module.get_score = lambda: {'score': 0, 'total': 1} + + return module