Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ caniusepython3
diff-cover >= 0.2.1
ddt==0.8.0
tox
hypothesis>=3.33.0,<4.0


# For docs
-r doc/requirements.txt
Expand Down
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ deps =
django111: Django>=1.11,<2.0
-rrequirements.txt
commands =
coverage run -m nose
coverage run -m nose {posargs}
py27-django18: make quality
whitelist_externals =
make
Expand Down
2 changes: 1 addition & 1 deletion xblock/VERSION.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.0.1
1.1.0
54 changes: 54 additions & 0 deletions xblock/completable.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""
This module defines CompletableXBlockMixin and completion mode enumeration.
"""
from __future__ import absolute_import, unicode_literals

class XBlockCompletionMode(object):
"""
Enumeration for completion modes.
"""
COMPLETABLE = "completable"
AGGREGATOR = "aggregator"
EXCLUDED = "excluded"


class CompletableXBlockMixin(object):
"""
This mixin sets attributes and provides helper method to integrate XBlock with Completion API.
"""

has_custom_completion = True
completion_method = XBlockCompletionMode.COMPLETABLE

# To read more on the debate about using the terms percent vs ratio, see:
# https://openedx.atlassian.net/wiki/spaces/OpenDev/pages/245465398/Naming+with+Percent+or+Ratio
def emit_completion(self, completion_percent):
"""
Emits completion event through Completion API.

Unlike grading API, calling this method allows completion to go down - i.e. emitting a value of 0.0 on
a previously completed block indicates that it is no longer considered complete.

Arguments:
completion_percent (float): Completion in range [0.0; 1.0] (inclusive), where 0.0 means the block
is not completed, 1.0 means the block is fully completed.

Returns:
None
"""
if not self.has_custom_completion or self.completion_method != 'completable':
raise AttributeError(
"Using `emit_completion` requires `has_custom_completion == True` (was {}) "
"and `completion_method == 'completable'` (was {})".format(
self.has_custom_completion, self.completion_method
)
)

if completion_percent is None or not 0.0 <= completion_percent <= 1.0:
raise ValueError("Completion percent must be in [0.0; 1.0] interval, {} given".format(completion_percent))

self.runtime.publish(
self,
'completion',
{'completion': completion_percent},
)
121 changes: 121 additions & 0 deletions xblock/test/test_completable.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
"""
Tests of the CompletableXBlockMixin.
"""
from __future__ import absolute_import, unicode_literals

import math
from unittest import TestCase

import mock
from hypothesis import given, example
import hypothesis.strategies as strategies

from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import Runtime
from xblock.completable import CompletableXBlockMixin, XBlockCompletionMode


class CompletableXBlockMixinTest(TestCase):
"""
Tests for CompletableXBlockMixin.
"""
class TestBuddyXBlock(XBlock, CompletableXBlockMixin):
"""
Simple XBlock extending CompletableXBlockMixin.
"""

class TestIllegalCustomCompletionAttrXBlock(XBlock, CompletableXBlockMixin):
"""
XBlock extending CompletableXBlockMixin using illegal `has_custom_completion` attribute.
"""
has_custom_completion = False

class TestIllegalCompletionMethodAttrXBlock(XBlock, CompletableXBlockMixin):
"""
XBlock extending CompletableXBlockMixin using illegal `completion_method` attribute.
"""
completion_method = "something_else"

def _make_block(self, runtime=None, block_type=None):
"""
Creates a test block.
"""
block_type = block_type if block_type else self.TestBuddyXBlock
runtime = runtime if runtime else mock.Mock(spec=Runtime)
scope_ids = ScopeIds("user_id", "test_buddy", "def_id", "usage_id")
return block_type(runtime=runtime, scope_ids=scope_ids)

def test_has_custom_completion_property(self):
"""
Test `has_custom_completion` property is set by mixin.
"""
block = self._make_block()
self.assertTrue(block.has_custom_completion)
self.assertTrue(getattr(block, 'has_custom_completion', False))

def test_completion_method_property(self):
"""
Test `completion_method` property is set by mixin.
"""
block = self._make_block()
self.assertEqual(block.completion_method, XBlockCompletionMode.COMPLETABLE)
self.assertEqual(getattr(block, 'completion_method', ""), XBlockCompletionMode.COMPLETABLE)

@given(strategies.floats())
def test_emit_completion_illegal_custom_completion(self, any_completion):
"""
Test `emit_completion` raises exception when called on a XBlock with illegal `has_custom_completion` value.
"""
runtime_mock = mock.Mock(spec=Runtime)
illegal_custom_completion_block = self._make_block(runtime_mock, self.TestIllegalCustomCompletionAttrXBlock)
with self.assertRaises(AttributeError):
illegal_custom_completion_block.emit_completion(any_completion)

@given(strategies.floats())
def test_emit_completion_completion_method(self, any_completion):
"""
Test `emit_completion` raises exception when called on a XBlock with illegal `completion_method` value.
"""
runtime_mock = mock.Mock(spec=Runtime)
illegal_completion_method_block = self._make_block(runtime_mock, self.TestIllegalCompletionMethodAttrXBlock)
with self.assertRaises(AttributeError):
illegal_completion_method_block.emit_completion(any_completion)

@given(strategies.floats(min_value=0.0, max_value=1.0))
@example(1.0)
@example(0.0)
def test_emit_completion_emits_event(self, valid_completion_percent):
"""
Test `emit_completion` emits completion events when passed a valid argument.

Given a valid completion percent
When emit_completion is called
Then runtime.publish is called with expected arguments
"""
runtime_mock = mock.Mock(spec=Runtime)
block = self._make_block(runtime_mock)
block.emit_completion(valid_completion_percent)

runtime_mock.publish.assert_called_once_with(block, "completion", {"completion": valid_completion_percent})

@given(strategies.floats().filter(lambda x: math.isnan(x) or x < 0.0 or x > 1.0))
@example(None)
@example(float('+inf'))
@example(float('-inf'))
def test_emit_completion_raises_assertion_error_if_invalid(self, invalid_completion_percent):
"""
Test `emit_completion` raises exception when passed an invalid argument.

Given an invalid completion percent
* Less than 0.0
* Greater than 1.0
* Positive or negative infinity
* NaN
When emit_completion is called
Then value error is thrown
"""
runtime_mock = mock.Mock(spec=Runtime)
block = self._make_block(runtime_mock)
with self.assertRaises(ValueError):
self.assertRaises(block.emit_completion(invalid_completion_percent))
8 changes: 8 additions & 0 deletions xblock/test/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1039,6 +1039,14 @@ class TestBlock(XBlock):
"""An empty XBlock for testing"""
pass

# FIXME: This test is fragile - fails in python27 if a warning it is expecting is emitted before it is executed
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@robrap FYI.

Copy link

@bdero bdero Oct 25, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The warnings module has different default filter behavior for different warning categories across different versions of python. Most likely this warning filter is getting set to "once" in this case.

I think this can be worked around by calling warnings.resetwarnings() or warnings.simplefilter("always") in the test setup.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks @e-kolpakov. I'm unclear on the the scope of the fragility. Will it pass 100% of the time in Jenkins based on the current suite, or due to randomization or other issues, can it sometimes fail?

If it can sometimes fail on Jenkins, we have a new flaky test process: https://openedx.atlassian.net/wiki/spaces/TE/pages/161427235/Updated+Flaky+Test+Process
The options are:

  1. Fix the test, or
  2. Delete the test and create a ticket.

If it is just flaky locally depending on how you are running the tests, then the comment may suffice.

@jmbowman: I feel like you may have been dealing with test flakiness. Do you have any thoughts on this? Thanks.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TIL. Thanks @bdero

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jcdyer @bdero No guys, it's not that simple.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@e-kolpakov Have you tried sliding a warnings.resetwarnings() in there before the catch_warnings line? It's totally possible there's a subtle behavior difference between different versions of python (or a bug in the specific release being used), so hopefully playing around with the warning options will yield some good information on the problem.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bdero Yes, I've tried (and double checked just now) spamming with resetwarnings and filter("always") with different combinations and at different locations - no help.

Unfortunately, I don't have capacity right now to further investigate this issue, so I'd suggest we just wrap it up as is.

@robrap It's not a flaky test - it is a stable test that's easy to break as it depends on the order of tests executed. In short, it only passes now because test_core is run before any other test that could trigger the deprecation warning. If I understand right, nose runs tests alphabetically, so test_core is run very early - only asides and now completion is run earlier. Asides code does not have anything to do with creating XBlocks (where the deprecation warning is set), and completion also doesn't use the deprecated feature - but only after I figured out what's wrong with it - most of the other tests are still using it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@e-kolpakov @robrap I suspect that sometime reasonably soon we'll want to switch this repository to use pytest instead of nose, and make a concerted effort to fix any dependencies between the tests so they can be run in random order. If the tests are stable until the order changes, I think it's probably ok to leave it as is until we tackle that. pytest has some dedicated functionality for testing warnings which may prove useful when we attempt that.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks @e-kolpakov. You may want to add an additional note regarding @jmbowman's response in the comment if you think it would be helpful to someone coming across this. Or, you could leave a link to this discussion regarding how and when it might get fixed.

# warnings module in py27 seems to be intelligent enough to deduplicate warnings raised at the same place. So if
# this test is executed _after_ any test that already raised FieldDataDeprecationWarning in XBlock constructor
# (currently raised in `ScopedStorageMixin`) it will not catch the warning and fail.
# Example: XBlock(runtime, field_data=Whatever(), scopeIds=ScopeIds())
# Note that test_mixins have that pattern in abundance - the only reason it didn't fail earlier is that
# test_mixins are alphabetically preceding
# Related: https://github.com/edx/XBlock/pull/368#discussion_r146740102
def test_field_data_paramater(self):
field_data = Mock(spec=FieldData)
with self.assertWarns(FieldDataDeprecationWarning):
Expand Down