diff --git a/allure-behave/setup.py b/allure-behave/setup.py index 62cb593e..9bb3dc41 100644 --- a/allure-behave/setup.py +++ b/allure-behave/setup.py @@ -12,12 +12,12 @@ 'Topic :: Software Development :: Testing :: BDD', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] setup_requires = [ @@ -66,4 +66,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/allure-nose2/setup.py b/allure-nose2/setup.py index 2c64e8ff..6f7a1ec5 100644 --- a/allure-nose2/setup.py +++ b/allure-nose2/setup.py @@ -11,12 +11,12 @@ 'Topic :: Software Development :: Testing', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] setup_requires = [ diff --git a/allure-pytest-bdd/setup.py b/allure-pytest-bdd/setup.py index 5f8cf7af..cdf802bc 100644 --- a/allure-pytest-bdd/setup.py +++ b/allure-pytest-bdd/setup.py @@ -10,14 +10,15 @@ 'License :: OSI Approved :: Apache Software License', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', + 'Topic :: Software Development :: Testing :: BDD', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] setup_requires = [ diff --git a/allure-pytest-bdd/src/allure_api_listener.py b/allure-pytest-bdd/src/allure_api_listener.py new file mode 100644 index 00000000..e132e8e2 --- /dev/null +++ b/allure-pytest-bdd/src/allure_api_listener.py @@ -0,0 +1,119 @@ +import pytest + +import allure_commons + +from allure_commons.model2 import Label +from allure_commons.model2 import Link +from allure_commons.model2 import Parameter +from allure_commons.utils import represent + +from .utils import ALLURE_DESCRIPTION_HTML_MARK +from .utils import ALLURE_DESCRIPTION_MARK +from .utils import ALLURE_LABEL_MARK +from .utils import ALLURE_LINK_MARK +from .utils import ALLURE_TITLE_ATTR + +from .utils import apply_link_pattern +from .utils import attach_data +from .utils import attach_file +from .utils import get_link_patterns +from .steps import start_step +from .steps import stop_step + + +class AllurePytestBddApiHooks: + def __init__(self, config, lifecycle): + self.lifecycle = lifecycle + self.__link_patterns = get_link_patterns(config) + + @allure_commons.hookimpl + def decorate_as_title(self, test_title): + + def decorator(fn): + setattr(fn, ALLURE_TITLE_ATTR, test_title) + return fn + + return decorator + + @allure_commons.hookimpl + def add_title(self, test_title): + with self.lifecycle.update_test_case() as test_result: + test_result.name = test_title + + @allure_commons.hookimpl + def decorate_as_description(self, test_description): + allure_description_mark = getattr(pytest.mark, ALLURE_DESCRIPTION_MARK) + return allure_description_mark(test_description) + + @allure_commons.hookimpl + def add_description(self, test_description): + with self.lifecycle.update_test_case() as test_result: + test_result.description = test_description + + @allure_commons.hookimpl + def decorate_as_description_html(self, test_description_html): + allure_description_html_mark = getattr(pytest.mark, ALLURE_DESCRIPTION_HTML_MARK) + return allure_description_html_mark(test_description_html) + + @allure_commons.hookimpl + def add_description_html(self, test_description_html): + with self.lifecycle.update_test_case() as test_result: + test_result.descriptionHtml = test_description_html + + @allure_commons.hookimpl + def decorate_as_label(self, label_type, labels): + allure_label_mark = getattr(pytest.mark, ALLURE_LABEL_MARK) + return allure_label_mark(*labels, label_type=label_type) + + @allure_commons.hookimpl + def add_label(self, label_type, labels): + with self.lifecycle.update_test_case() as test_result: + test_result.labels.extend( + Label(name=label_type, value=value) for value in labels or [] + ) + + @allure_commons.hookimpl + def decorate_as_link(self, url, link_type, name): + url = apply_link_pattern(self.__link_patterns, link_type, url) + allure_link_mark = getattr(pytest.mark, ALLURE_LINK_MARK) + return allure_link_mark(url, name=name, link_type=link_type) + + @allure_commons.hookimpl + def add_link(self, url, link_type, name): + url = apply_link_pattern(self.__link_patterns, link_type, url) + with self.lifecycle.update_test_case() as test_result: + test_result.links.append(Link(url=url, name=name, type=link_type)) + + @allure_commons.hookimpl + def add_parameter(self, name, value, excluded, mode): + with self.lifecycle.update_test_case() as test_result: + test_result.parameters.append( + Parameter( + name=name, + value=represent(value), + excluded=excluded, + mode=mode.value if mode else None, + ), + ) + + @allure_commons.hookimpl + def start_step(self, uuid, title, params): + start_step(self.lifecycle, step_uuid=uuid, title=title, params=params) + + @allure_commons.hookimpl + def stop_step(self, uuid, exc_type, exc_val, exc_tb): + stop_step( + self.lifecycle, + uuid, + exception=exc_val, + exception_type=exc_type, + traceback=exc_tb, + ) + + @allure_commons.hookimpl + def attach_data(self, body, name, attachment_type, extension): + attach_data(self.lifecycle, body, name, attachment_type, extension) + + @allure_commons.hookimpl + def attach_file(self, source, name, attachment_type, extension): + attach_file(self.lifecycle, source, name, attachment_type, extension) diff --git a/allure-pytest-bdd/src/plugin.py b/allure-pytest-bdd/src/plugin.py index 5d6b8310..521eadc4 100644 --- a/allure-pytest-bdd/src/plugin.py +++ b/allure-pytest-bdd/src/plugin.py @@ -1,8 +1,18 @@ -import allure_commons +import argparse import os + +import allure_commons from allure_commons.logger import AllureFileLogger +from allure_commons.lifecycle import AllureLifecycle + +from .allure_api_listener import AllurePytestBddApiHooks from .pytest_bdd_listener import PytestBDDListener +from .utils import ALLURE_DESCRIPTION_MARK +from .utils import ALLURE_DESCRIPTION_HTML_MARK +from .utils import ALLURE_LABEL_MARK +from .utils import ALLURE_LINK_MARK + def pytest_addoption(parser): parser.getgroup("reporting").addoption('--alluredir', @@ -17,6 +27,27 @@ def pytest_addoption(parser): dest="clean_alluredir", help="Clean alluredir folder if it exists") + def link_pattern(string): + pattern = string.split(':', 1) + if not pattern[0]: + raise argparse.ArgumentTypeError("A link type is mandatory") + + if len(pattern) != 2: + raise argparse.ArgumentTypeError("A link pattern is mandatory") + return pattern + + parser.getgroup("general").addoption( + "--allure-link-pattern", + action="append", + dest="allure_link_pattern", + metavar="LINK_TYPE:LINK_PATTERN", + default=[], + type=link_pattern, + help="""A URL pattern for a link type. Allows short links in tests, + e.g., 'issue-1'. `pattern.format(short_url)` will be called to get + the full URL""" + ) + def cleanup_factory(plugin): def clean_up(): @@ -25,18 +56,33 @@ def clean_up(): return clean_up +def register_marks(config): + config.addinivalue_line("markers", f"{ALLURE_DESCRIPTION_MARK}: allure description") + config.addinivalue_line("markers", f"{ALLURE_DESCRIPTION_HTML_MARK}: allure description in HTML") + config.addinivalue_line("markers", f"{ALLURE_LABEL_MARK}: allure label marker") + config.addinivalue_line("markers", f"{ALLURE_LINK_MARK}: allure link marker") + + def pytest_configure(config): + register_marks(config) + report_dir = config.option.allure_report_dir clean = False if config.option.collectonly else config.option.clean_alluredir if report_dir: report_dir = os.path.abspath(report_dir) - pytest_bdd_listener = PytestBDDListener() + lifecycle = AllureLifecycle() + + pytest_bdd_listener = PytestBDDListener(lifecycle) config.pluginmanager.register(pytest_bdd_listener) allure_commons.plugin_manager.register(pytest_bdd_listener) config.add_cleanup(cleanup_factory(pytest_bdd_listener)) + allure_api_impl = AllurePytestBddApiHooks(config, lifecycle) + allure_commons.plugin_manager.register(allure_api_impl) + config.add_cleanup(cleanup_factory(allure_api_impl)) + file_logger = AllureFileLogger(report_dir, clean) allure_commons.plugin_manager.register(file_logger) config.add_cleanup(cleanup_factory(file_logger)) diff --git a/allure-pytest-bdd/src/pytest_bdd_listener.py b/allure-pytest-bdd/src/pytest_bdd_listener.py index d4c73115..d0697380 100644 --- a/allure-pytest-bdd/src/pytest_bdd_listener.py +++ b/allure-pytest-bdd/src/pytest_bdd_listener.py @@ -1,57 +1,79 @@ import pytest -import allure_commons + from allure_commons.utils import now -from allure_commons.utils import uuid4 from allure_commons.model2 import Label from allure_commons.model2 import Status - from allure_commons.types import LabelType, AttachmentType from allure_commons.utils import platform_label from allure_commons.utils import host_tag, thread_tag from allure_commons.utils import md5 -from .utils import get_uuid -from .utils import get_step_name -from .utils import get_status_details + +from .steps import get_step_uuid +from .steps import process_gherkin_step_args +from .steps import report_remaining_steps +from .steps import report_undefined_step +from .steps import start_gherkin_step +from .steps import stop_gherkin_step +from .storage import save_excinfo +from .storage import save_test_data +from .utils import attach_data +from .utils import get_allure_description +from .utils import get_allure_description_html +from .utils import get_allure_labels +from .utils import get_allure_links +from .utils import convert_params +from .utils import get_full_name +from .utils import get_outline_params +from .utils import get_pytest_params from .utils import get_pytest_report_status -from allure_commons.model2 import StatusDetails +from .utils import get_scenario_status_details +from .utils import get_test_name +from .utils import get_uuid +from .utils import post_process_test_result + from functools import partial -from allure_commons.lifecycle import AllureLifecycle -from .utils import get_full_name, get_name, get_params class PytestBDDListener: - def __init__(self): - self.lifecycle = AllureLifecycle() + def __init__(self, lifecycle): + self.lifecycle = lifecycle self.host = host_tag() self.thread = thread_tag() - def _scenario_finalizer(self, scenario): - for step in scenario.steps: - step_uuid = get_uuid(str(id(step))) - with self.lifecycle.update_step(uuid=step_uuid) as step_result: - if step_result: - step_result.status = Status.SKIPPED - self.lifecycle.stop_step(uuid=step_uuid) - @pytest.hookimpl def pytest_bdd_before_scenario(self, request, feature, scenario): - uuid = get_uuid(request.node.nodeid) + item = request.node + uuid = get_uuid(item.nodeid) + + outline_params = get_outline_params(item) + pytest_params = get_pytest_params(item) + params = {**pytest_params, **outline_params} + + save_test_data( + item=item, + feature=feature, + scenario=scenario, + params=params, + ) + full_name = get_full_name(feature, scenario) - name = get_name(request.node, scenario) with self.lifecycle.schedule_test_case(uuid=uuid) as test_result: test_result.fullName = full_name - test_result.name = name + test_result.name = get_test_name(item, scenario, params) + test_result.description = get_allure_description(item, feature, scenario) + test_result.descriptionHtml = get_allure_description_html(item) test_result.start = now() - test_result.historyId = md5(request.node.nodeid) + test_result.testCaseId = md5(full_name) test_result.labels.append(Label(name=LabelType.HOST, value=self.host)) test_result.labels.append(Label(name=LabelType.THREAD, value=self.thread)) test_result.labels.append(Label(name=LabelType.FRAMEWORK, value="pytest-bdd")) test_result.labels.append(Label(name=LabelType.LANGUAGE, value=platform_label())) - test_result.labels.append(Label(name=LabelType.FEATURE, value=feature.name)) - test_result.parameters = get_params(request.node) + test_result.labels.extend(get_allure_labels(item)) + test_result.links.extend(get_allure_links(item)) + test_result.parameters.extend(convert_params(outline_params, pytest_params)) - finalizer = partial(self._scenario_finalizer, scenario) - request.node.addfinalizer(finalizer) + finalizer = partial(report_remaining_steps, self.lifecycle, item) + item.addfinalizer(finalizer) @pytest.hookimpl def pytest_bdd_after_scenario(self, request, feature, scenario): @@ -61,42 +83,32 @@ def pytest_bdd_after_scenario(self, request, feature, scenario): @pytest.hookimpl def pytest_bdd_before_step(self, request, feature, scenario, step, step_func): - parent_uuid = get_uuid(request.node.nodeid) - uuid = get_uuid(str(id(step))) - with self.lifecycle.start_step(parent_uuid=parent_uuid, uuid=uuid) as step_result: - step_result.name = get_step_name(step) + start_gherkin_step(self.lifecycle, request.node, step, step_func) + + @pytest.hookimpl + def pytest_bdd_before_step_call(self, request, feature, scenario, step, step_func, step_func_args): + process_gherkin_step_args(self.lifecycle, request.node, step, step_func, step_func_args) @pytest.hookimpl def pytest_bdd_after_step(self, request, feature, scenario, step, step_func, step_func_args): - uuid = get_uuid(str(id(step))) - with self.lifecycle.update_step(uuid=uuid) as step_result: - step_result.status = Status.PASSED - self.lifecycle.stop_step(uuid=uuid) + stop_gherkin_step(self.lifecycle, request.node, get_step_uuid(step)) @pytest.hookimpl def pytest_bdd_step_error(self, request, feature, scenario, step, step_func, step_func_args, exception): - uuid = get_uuid(str(id(step))) - with self.lifecycle.update_step(uuid=uuid) as step_result: - step_result.status = Status.FAILED - step_result.statusDetails = get_status_details(exception) - self.lifecycle.stop_step(uuid=uuid) + stop_gherkin_step(self.lifecycle, request.node, get_step_uuid(step), exception=exception) @pytest.hookimpl def pytest_bdd_step_func_lookup_error(self, request, feature, scenario, step, exception): - uuid = get_uuid(str(id(step))) - with self.lifecycle.update_step(uuid=uuid) as step_result: - step_result.status = Status.BROKEN - self.lifecycle.stop_step(uuid=uuid) + report_undefined_step(self.lifecycle, request.node, step, exception) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(self, item, call): report = (yield).get_result() - status = get_pytest_report_status(report) + excinfo = call.excinfo - status_details = StatusDetails( - message=call.excinfo.exconly(), - trace=report.longreprtext) if call.excinfo else None + status = get_pytest_report_status(report, excinfo) + status_details = get_scenario_status_details(report, excinfo) uuid = get_uuid(report.nodeid) with self.lifecycle.update_test_case(uuid=uuid) as test_result: @@ -106,28 +118,26 @@ def pytest_runtest_makereport(self, item, call): test_result.statusDetails = status_details if report.when == "call" and test_result: - if test_result.status not in [Status.PASSED, Status.FAILED]: + + # Save the exception to access it from the finalizer to report + # the remaining steps + save_excinfo(item, excinfo) + + if test_result.status is None or test_result.status == Status.PASSED: test_result.status = status test_result.statusDetails = status_details if report.when == "teardown" and test_result: - if test_result.status == Status.PASSED and status != Status.PASSED: + if test_result.status == Status.PASSED and status in [Status.FAILED, Status.BROKEN]: test_result.status = status test_result.statusDetails = status_details if report.caplog: - self.attach_data(report.caplog, "log", AttachmentType.TEXT, None) + attach_data(self.lifecycle, report.caplog, "log", AttachmentType.TEXT, None) if report.capstdout: - self.attach_data(report.capstdout, "stdout", AttachmentType.TEXT, None) + attach_data(self.lifecycle, report.capstdout, "stdout", AttachmentType.TEXT, None) if report.capstderr: - self.attach_data(report.capstderr, "stderr", AttachmentType.TEXT, None) + attach_data(self.lifecycle, report.capstderr, "stderr", AttachmentType.TEXT, None) + post_process_test_result(item, test_result) if report.when == 'teardown': self.lifecycle.write_test_case(uuid=uuid) - - @allure_commons.hookimpl - def attach_data(self, body, name, attachment_type, extension): - self.lifecycle.attach_data(uuid4(), body, name=name, attachment_type=attachment_type, extension=extension) - - @allure_commons.hookimpl - def attach_file(self, source, name, attachment_type, extension): - self.lifecycle.attach_file(uuid4(), source, name=name, attachment_type=attachment_type, extension=extension) diff --git a/allure-pytest-bdd/src/steps.py b/allure-pytest-bdd/src/steps.py new file mode 100644 index 00000000..cfc2741b --- /dev/null +++ b/allure-pytest-bdd/src/steps.py @@ -0,0 +1,205 @@ +from allure import attachment_type +from allure_commons.model2 import StatusDetails +from allure_commons.model2 import Status +from allure_commons.model2 import Parameter +from allure_commons.utils import format_exception +from allure_commons.utils import represent + +from .storage import get_saved_params +from .storage import get_test_data +from .storage import save_reported_step +from .utils import attach_data +from .utils import format_csv +from .utils import get_allure_title +from .utils import get_uuid +from .utils import get_status +from .utils import get_status_details + + +def get_step_name(item, step, step_func, step_func_args=None): + return get_allure_title_of_step(item, step_func, step_func_args) or \ + f"{step.keyword} {step.name}" + + +def get_allure_title_of_step(item, step_func, step_func_args): + return get_allure_title( + step_func, + { + **(get_saved_params(item) or {}), + **(step_func_args or {}), + }, + ) + + +def get_step_uuid(step): + return get_uuid(str(id(step))) + + +def start_step(lifecycle, step_uuid, title, params=None, parent_uuid=None): + with lifecycle.start_step(uuid=step_uuid, parent_uuid=parent_uuid) as step_result: + step_result.name = title + if params: + step_result.parameters.extend( + Parameter( + name=name, + value=represent(value), + ) for name, value in params.items() + ) + + +def stop_step(lifecycle, uuid, status=None, status_details=None, exception=None, exception_type=None, traceback=None): + with lifecycle.update_step(uuid=uuid) as step_result: + if step_result is None: + return False + step_result.status = status or get_status(exception) + step_result.statusDetails = status_details or get_status_details(exception, exception_type, traceback) + lifecycle.stop_step(uuid=uuid) + return True + + +def start_gherkin_step(lifecycle, item, step, step_func=None, step_uuid=None): + if step_uuid is None: + step_uuid = get_step_uuid(step) + + start_step( + lifecycle, + step_uuid=step_uuid, + title=get_step_name(item, step, step_func), + parent_uuid=get_uuid(item.nodeid), + ) + + +def process_gherkin_step_args(lifecycle, item, step, step_func, step_func_args): + allure_step_params = dict(step_func_args) + step_uuid = get_step_uuid(step) + + docstring = step_func_args.get("docstring") + if try_attach_docstring(lifecycle, step_uuid, docstring): + del allure_step_params["docstring"] + + datatable = step_func_args.get("datatable") + if try_attach_datatable(lifecycle, step_uuid, datatable): + del allure_step_params["datatable"] + + add_step_parameters(lifecycle, step_uuid, allure_step_params) + + update_step_name(lifecycle, item, step_uuid, step_func, step_func_args) + + +def try_attach_docstring(lifecycle, step_uuid, docstring): + if isinstance(docstring, str): + attach_data( + lifecycle=lifecycle, + body=docstring, + name="Doc string", + attachment_type=attachment_type.TEXT, + parent_uuid=step_uuid, + ) + return True + return False + + +def try_attach_datatable(lifecycle, step_uuid, datatable): + if is_datatable(datatable): + attach_data( + lifecycle=lifecycle, + body=format_csv(datatable), + name="Data table", + attachment_type=attachment_type.CSV, + parent_uuid=step_uuid, + ) + return True + return False + + +def add_step_parameters(lifecycle, step_uuid, step_params): + if not step_params: + return + + with lifecycle.update_step(uuid=step_uuid) as step_result: + if step_result is None: + return + + step_result.parameters.extend( + Parameter( + name=name, + value=represent(value), + ) for name, value in step_params.items() + ) + + +def update_step_name(lifecycle, item, step_uuid, step_func, step_func_args): + if not step_func_args: + return + + new_name = get_allure_title_of_step(item, step_func, step_func_args) + if new_name is None: + return + + with lifecycle.update_step(uuid=step_uuid) as step_result: + if step_result is not None: + step_result.name = new_name + + +def is_datatable(value): + return isinstance(value, list) and all(isinstance(row, list) for row in value) + + +def stop_gherkin_step(lifecycle, item, step_uuid, **kwargs): + res = stop_step(lifecycle, step_uuid, **kwargs) + if res: + save_reported_step(item, step_uuid) + return res + + +def ensure_gherkin_step_reported(lifecycle, item, step, step_uuid=None, **kwargs): + + if not step_uuid: + step_uuid = get_step_uuid(step) + + if stop_gherkin_step(lifecycle, item, step_uuid, **kwargs): + return + + start_gherkin_step(lifecycle, item, step, step_uuid=step_uuid) + stop_gherkin_step(lifecycle, item, step_uuid, **kwargs) + + +def report_undefined_step(lifecycle, item, step, exception): + ensure_gherkin_step_reported( + lifecycle, + item, + step, + status=Status.BROKEN, + status_details=StatusDetails( + message=format_exception(type(exception), exception), + ), + ) + + +def report_remaining_steps(lifecycle, item): + test_data = get_test_data(item) + scenario = test_data.scenario + excinfo = test_data.excinfo + reported_steps = test_data.reported_steps + + for step in scenario.steps: + step_uuid = get_step_uuid(step) + if step_uuid not in reported_steps: + __report_remaining_step(lifecycle, item, step, step_uuid, excinfo) + excinfo = None # Only show the full message and traceback once + + +def __report_remaining_step(lifecycle, item, step, step_uuid, excinfo): + args = [lifecycle, item, step, step_uuid] + kwargs = { + "exception": excinfo.value, + "exception_type": excinfo.type, + "traceback": excinfo.tb, + } if __is_step_running(lifecycle, step_uuid) and excinfo else {"status": Status.SKIPPED} + + ensure_gherkin_step_reported(*args, **kwargs) + + +def __is_step_running(lifecycle, step_uuid): + with lifecycle.update_step(uuid=step_uuid) as step_result: + return step_result is not None diff --git a/allure-pytest-bdd/src/storage.py b/allure-pytest-bdd/src/storage.py new file mode 100644 index 00000000..deb031d6 --- /dev/null +++ b/allure-pytest-bdd/src/storage.py @@ -0,0 +1,42 @@ +import pytest + + +ALLURE_PYTEST_BDD_HASHKEY = pytest.StashKey() + + +class AllurePytestBddTestData: + + def __init__(self, feature, scenario, params): + self.feature = feature + self.scenario = scenario + self.params = params + self.excinfo = None + self.reported_steps = set() + + +def save_test_data(item, feature, scenario, params): + item.stash[ALLURE_PYTEST_BDD_HASHKEY] = AllurePytestBddTestData( + feature=feature, + scenario=scenario, + params=params, + ) + + +def get_test_data(item): + return item.stash.get(ALLURE_PYTEST_BDD_HASHKEY, (None, None)) + + +def get_saved_params(item): + return get_test_data(item).params + + +def save_excinfo(item, excinfo): + test_data = get_test_data(item) + if test_data: + test_data.excinfo = excinfo + + +def save_reported_step(item, step_uuid): + test_data = get_test_data(item) + if test_data: + test_data.reported_steps.add(step_uuid) diff --git a/allure-pytest-bdd/src/utils.py b/allure-pytest-bdd/src/utils.py index ac70aac2..43cc74af 100644 --- a/allure-pytest-bdd/src/utils.py +++ b/allure-pytest-bdd/src/utils.py @@ -1,22 +1,167 @@ +import csv +import io import os +from urllib.parse import urlparse from uuid import UUID -from allure_commons.utils import md5 + +import pytest + +from allure_commons.model2 import Label +from allure_commons.model2 import Link from allure_commons.model2 import StatusDetails from allure_commons.model2 import Status from allure_commons.model2 import Parameter +from allure_commons.types import LabelType +from allure_commons.types import LinkType + from allure_commons.utils import format_exception +from allure_commons.utils import format_traceback +from allure_commons.utils import md5 +from allure_commons.utils import represent +from allure_commons.utils import SafeFormatter +from allure_commons.utils import uuid4 +from .storage import get_test_data -def get_step_name(step): - return f"{step.keyword} {step.name}" +ALLURE_TITLE_ATTR = "__allure_display_name__" +ALLURE_DESCRIPTION_MARK = "allure_description" +ALLURE_DESCRIPTION_HTML_MARK = "allure_description_html" +ALLURE_LABEL_MARK = 'allure_label' +ALLURE_LINK_MARK = 'allure_link' +MARK_NAMES_TO_IGNORE = { + "usefixtures", + "filterwarnings", + "skip", + "skipif", + "xfail", + "parametrize", +} + + +def get_allure_title_of_test(item, params): + obj = getattr(item, "obj", None) + if obj is not None: + return get_allure_title(obj, params) + + +def get_allure_title(fn, kwargs): + if fn is not None: + title_format = getattr(fn, ALLURE_TITLE_ATTR, None) + if title_format: + return interpolate(title_format, kwargs) + + +def interpolate(format_str, kwargs): + return SafeFormatter().format(format_str, **kwargs) if kwargs else format_str + + +def get_allure_description(item, feature, scenario): + value = get_marker_value(item, ALLURE_DESCRIPTION_MARK) + if value: + return value + + feature_description = resolve_description(feature.description) + scenario_description = resolve_description(scenario.description) + return "\n\n".join(filter(None, [feature_description, scenario_description])) + + +def get_allure_description_html(item): + return get_marker_value(item, ALLURE_DESCRIPTION_HTML_MARK) + + +def iter_all_labels(item): + for mark in item.iter_markers(name=ALLURE_LABEL_MARK): + name = mark.kwargs.get("label_type") + if name: + yield from ((name, value) for value in mark.args or []) + + +def iter_label_values(item, name): + return (pair for pair in iter_all_labels(item) if pair[0] == name) + + +def convert_labels(labels): + return [Label(name, value) for name, value in labels] + + +def get_allure_labels(item): + return convert_labels(iter_all_labels(item)) + + +def iter_all_links(item): + for marker in item.iter_markers(name=ALLURE_LINK_MARK): + url = marker.args[0] if marker and marker.args else None + if url: + yield url, marker.kwargs.get("name"), marker.kwargs.get("link_type") + + +def convert_links(links): + return [Link(url=url, name=name, type=link_type) for url, name, link_type in links] + + +def get_allure_links(item): + return convert_links(iter_all_links(item)) + + +def get_link_patterns(config): + patterns = {} + for link_type, pattern in config.option.allure_link_pattern: + patterns[link_type] = pattern + return patterns + + +def is_url(maybeUrl): + try: + result = urlparse(maybeUrl) + except AttributeError: + return False + + return result and ( + getattr(result, "scheme", None) or getattr(result, "netloc", None) + ) + + +def apply_link_pattern(patterns, link_type, url): + if is_url(url): + return url + + pattern = patterns.get(link_type or LinkType.LINK) + return url if pattern is None else pattern.format(url) + + +def get_marker_value(item, keyword): + marker = item.get_closest_marker(keyword) + return marker.args[0] if marker and marker.args else None -def get_name(node, scenario): - if hasattr(node, 'callspec'): - parts = node.nodeid.rsplit("[") - params = parts[-1] - return f"{scenario.name} [{params}" - return scenario.name + +def should_convert_mark_to_tag(mark): + return mark.name not in MARK_NAMES_TO_IGNORE and\ + not mark.args and not mark.kwargs + + +def iter_pytest_tags(item): + for mark in item.iter_markers(): + if should_convert_mark_to_tag(mark): + yield LabelType.TAG, mark.name + + +def resolve_description(description): + if isinstance(description, str): + return description + + if not isinstance(description, list): + return None + + while description and description[0] == "": + description = description[1:] + while description and description[-1] == "": + description = description[:-1] + return "\n".join(description) or None + + +def get_test_name(node, scenario, params): + return get_allure_title_of_test(node, params) or scenario.name def get_full_name(feature, scenario): @@ -28,23 +173,146 @@ def get_uuid(*args): return str(UUID(md5(*args))) -def get_status_details(exception): - message = str(exception) - trace = format_exception(type(exception), exception) +def get_status(exception): + if exception: + if isinstance(exception, (pytest.skip.Exception, pytest.xfail.Exception)): + return Status.SKIPPED + elif isinstance(exception, (AssertionError, pytest.fail.Exception)): + return Status.FAILED + return Status.BROKEN + else: + return Status.PASSED + + +def get_status_details(exception, exception_type=None, traceback=None): + if exception_type is None and exception is not None: + exception_type = type(exception) + message = format_exception(exception_type, exception) + trace = format_traceback(traceback or getattr(exception, "__traceback__", None)) return StatusDetails(message=message, trace=trace) if message or trace else None -def get_pytest_report_status(pytest_report): - pytest_statuses = ('failed', 'passed', 'skipped') - statuses = (Status.FAILED, Status.PASSED, Status.SKIPPED) - for pytest_status, status in zip(pytest_statuses, statuses): - if getattr(pytest_report, pytest_status): - return status +def get_pytest_report_status(pytest_report, excinfo): + if pytest_report.failed: + return get_status(excinfo.value) if excinfo else Status.BROKEN + + if pytest_report.passed: + return Status.PASSED + + if pytest_report.skipped: + return Status.SKIPPED + + +def is_runtime_xfail(excinfo): + return isinstance(excinfo.value, pytest.xfail.Exception) + + +def get_scenario_status_details(report, excinfo): + if excinfo: + message = excinfo.exconly() + trace = report.longreprtext + if not is_runtime_xfail(excinfo) and hasattr(report, "wasxfail"): + reason = report.wasxfail + message = (f"XFAIL {reason}" if reason else "XFAIL") + "\n\n" + message + return StatusDetails(message=message, trace=trace) + elif report.passed and hasattr(report, "wasxfail"): + reason = report.wasxfail + return StatusDetails(message=f"XPASS {reason}" if reason else "XPASS") + elif report.failed and "XPASS(strict)" in report.longrepr: + return StatusDetails(message=report.longrepr) + + +def get_outline_params(node): + if hasattr(node, 'callspec'): + return node.callspec.params.get('_pytest_bdd_example', {}) + return {} -def get_params(node): +def get_pytest_params(node): if hasattr(node, 'callspec'): - params = dict(node.callspec.params) - outline_params = params.pop('_pytest_bdd_example', {}) - params.update(outline_params) - return [Parameter(name=name, value=value) for name, value in params.items()] + pytest_params = dict(node.callspec.params) + if "_pytest_bdd_example" in pytest_params: + del pytest_params["_pytest_bdd_example"] + return pytest_params + return {} + + +def convert_params(outline_params, pytest_params): + return [ + *(Parameter( + name=name, + value=value, + ) for name, value in outline_params.items()), + *(Parameter( + name=name, + value=represent(value), + ) for name, value in pytest_params.items() if name not in outline_params), + ] + + +def iter_pytest_labels(item, test_result): + test_data = get_test_data(item) + + existing_labels = {label.name for label in test_result.labels} + + if LabelType.FEATURE not in existing_labels: + yield LabelType.FEATURE, test_data.feature.name + + yield from iter_pytest_tags(item) + + +def iter_default_labels(item, test_result): + return ( + Label( + name=name, + value=value, + ) for name, value in iter_pytest_labels(item, test_result) + ) + + +def get_history_id(test_case_id, parameters, pytest_params): + parameters_part = md5(*(pytest_params.get(p.name, p.value) for p in sorted( + filter(lambda p: not p.excluded, parameters), + key=lambda p: p.name, + ))) + return f"{test_case_id}.{parameters_part}" + + +def post_process_test_result(item, test_result): + test_data = get_test_data(item) + + test_result.labels.extend(iter_default_labels(item, test_result)) + test_result.historyId = get_history_id( + test_case_id=test_result.testCaseId, + parameters=test_result.parameters, + pytest_params=test_data.params, + ) + + +def attach_data(lifecycle, body, name, attachment_type, extension=None, parent_uuid=None): + lifecycle.attach_data( + uuid4(), + body, + name=name, + attachment_type=attachment_type, + extension=extension, + parent_uuid=parent_uuid, + ) + + +def attach_file(lifecycle, source, name, attachment_type, extension=None): + lifecycle.attach_file( + uuid4(), + source, + name=name, + attachment_type=attachment_type, + extension=extension, + ) + + +def format_csv(rows): + with io.StringIO() as buffer: + writer = csv.writer(buffer) + writer.writerow(rows[0]) + writer.writerows(rows[1:]) + return buffer.getvalue() diff --git a/allure-pytest/setup.py b/allure-pytest/setup.py index f88df22b..6597abf5 100644 --- a/allure-pytest/setup.py +++ b/allure-pytest/setup.py @@ -24,12 +24,12 @@ 'Topic :: Software Development :: Testing', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] setup_requires = [ @@ -80,4 +80,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/allure-python-commons-test/setup.py b/allure-python-commons-test/setup.py index 1f8d1aa1..bfcaddca 100644 --- a/allure-python-commons-test/setup.py +++ b/allure-python-commons-test/setup.py @@ -11,12 +11,12 @@ 'Topic :: Software Development :: Testing', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] install_requires = [ diff --git a/allure-python-commons-test/src/label.py b/allure-python-commons-test/src/label.py index 12782b06..15d9e3d2 100644 --- a/allure-python-commons-test/src/label.py +++ b/allure-python-commons-test/src/label.py @@ -51,3 +51,11 @@ def has_parent_suite(parent_suite): def has_sub_suite(sub_suite): return has_label('subSuite', sub_suite) + + +def has_allure_id(allure_id): + return has_label('as_id', allure_id) + + +def has_manual(allure_id): + return has_label('ALLURE_MANUAL', allure_id) diff --git a/allure-python-commons-test/src/result.py b/allure-python-commons-test/src/result.py index c9c3d18e..84bb6094 100644 --- a/allure-python-commons-test/src/result.py +++ b/allure-python-commons-test/src/result.py @@ -62,10 +62,11 @@ """ -from hamcrest import all_of, anything, not_ -from hamcrest import equal_to, not_none +from hamcrest import all_of, anything, not_, any_of +from hamcrest import equal_to, none, not_none from hamcrest import has_entry, has_item from hamcrest import contains_string +from hamcrest import contains_exactly from allure_commons_test.lookup import maps_to @@ -93,6 +94,13 @@ def has_step(name, *matchers): ) +def with_steps(*matchers): + return has_entry( + "steps", + contains_exactly(*matchers), + ) + + def get_parameter_matcher(name, *matchers): return has_entry( 'parameters', @@ -114,12 +122,21 @@ def has_parameter(name, value, *matchers): def doesnt_have_parameter(name): - return has_entry('parameters', - not_( - has_item( - has_entry('name', equal_to(name)), - ) - )) + return not_( + has_entry( + "parameters", + has_item( + has_entry("name", name), + ), + ), + ) + + +def resolve_link_attr_matcher(key, value): + return has_entry(key, value) if value is not None else any_of( + not_(has_entry(key)), + none(), + ) def has_link(url, link_type=None, name=None): @@ -128,7 +145,7 @@ def has_link(url, link_type=None, name=None): has_item( all_of( *[ - has_entry(key, value) for key, value in zip( + resolve_link_attr_matcher(key, value) for key, value in zip( ('url', 'type', 'name'), (url, link_type, name) ) if value is not None @@ -205,5 +222,5 @@ def with_mode(mode): return has_entry('mode', mode) -def has_history_id(): - return has_entry('historyId', anything()) +def has_history_id(matcher=None): + return has_entry('historyId', matcher or anything()) diff --git a/allure-python-commons/setup.py b/allure-python-commons/setup.py index 91a1e1f0..ee645587 100644 --- a/allure-python-commons/setup.py +++ b/allure-python-commons/setup.py @@ -11,12 +11,12 @@ 'Topic :: Software Development :: Testing', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] install_requires = [ diff --git a/allure-robotframework/setup.py b/allure-robotframework/setup.py index f333225c..8b194c29 100644 --- a/allure-robotframework/setup.py +++ b/allure-robotframework/setup.py @@ -13,12 +13,12 @@ 'Topic :: Software Development :: Testing', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', ] setup_requires = [ diff --git a/requirements/testing.txt b/requirements/testing.txt index 09919cdf..fcea2af5 100644 --- a/requirements/testing.txt +++ b/requirements/testing.txt @@ -1,6 +1,7 @@ -r ./core.txt docutils mock +packaging poethepoet PyHamcrest Pygments diff --git a/tests/allure_pytest_bdd/acceptance/attachments_test.py b/tests/allure_pytest_bdd/acceptance/attachments_test.py new file mode 100644 index 00000000..907e6604 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/attachments_test.py @@ -0,0 +1,533 @@ +import pytest + +from hamcrest import assert_that +from hamcrest import equal_to +from hamcrest import ends_with +from hamcrest import not_ + +from allure_commons_test.content import csv_equivalent +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_attachment +from allure_commons_test.result import has_attachment_with_content +from allure_commons_test.result import has_step +from allure_commons_test.result import has_parameter +from allure_commons_test.result import doesnt_have_parameter + +from tests.allure_pytest.pytest_runner import AllurePytestRunner +from tests.e2e import version_lt +from tests.e2e import version_gte + + +def test_attach_content_from_scenario_function(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.attach("Lorem Ipsum", name="foo") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_attachment_with_content( + allure_results.attachments, + equal_to("Lorem Ipsum"), + name="foo", + ) + ) + ) + + +def test_attach_file_from_scenario_function(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.attach.file(__file__, name="foo") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_attachment_with_content( + allure_results.attachments, + ends_with("test_attach_file_from_scenario_function.py"), + name="foo", + ) + ) + ) + + +def test_attach_content_from_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + When data is attached + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, when + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @when("data is attached") + def when_data_is_attached(): + allure.attach("Lorem Ipsum", name="foo") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "When data is attached", + has_attachment_with_content( + allure_results.attachments, + equal_to("Lorem Ipsum"), + name="foo", + ), + ), + ), + ) + + +def test_attach_file_from_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + When a file is attached + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, when + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @when("a file is attached") + def when_file_is_attached(): + allure.attach.file(__file__, name="foo") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "When a file is attached", + has_attachment_with_content( + allure_results.attachments, + ends_with("test_attach_file_from_step.py"), + name="foo", + ), + ), + ), + ) + + +def test_attach_file_from_hook(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + conftest_content = ( + """ + import allure + def pytest_runtest_teardown(item): + allure.attach.file(__file__, name="foo") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + conftest_literal=conftest_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_attachment_with_content( + allure_results.attachments, + ends_with("conftest.py"), + name="foo", + ), + ), + ) + + +@pytest.mark.skipif(version_lt("pytest-bdd", 8), reason="Data tables support added in 8.0.0") +def test_attach_datatable(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given a datatable: + | foo | bar | + | baz | qux | + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("a datatable:") + def given_datatable(datatable): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given a datatable:", + has_attachment_with_content( + allure_results.attachments, + csv_equivalent([ + ["foo", "bar"], + ["baz", "qux"], + ]), + name="Data table", + attach_type="text/csv", + ), + doesnt_have_parameter("datatable"), + ), + ), + ) + + +@pytest.mark.skipif(version_gte("pytest-bdd", 8), reason="Pytest-BDD features proper data tables starting from 8.0") +def test_attach_datatable_compat_well_defined(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given a datatable: + | foo | bar | + | baz | qux | + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given, parsers + + def parse_data_table(text): + return [ + [x.strip() for x in line.split("|")] + for line in (x.strip("|") for x in text.splitlines()) + ] + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given(parsers.parse("a datatable:\\n{datatable:Datatable}", extra_types={"Datatable": parse_data_table})) + def given_datatable(datatable): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given a datatable:\n| foo | bar |\n| baz | qux |", + has_attachment_with_content( + allure_results.attachments, + csv_equivalent([ + ["foo", "bar"], + ["baz", "qux"], + ]), + name="Data table", + attach_type="text/csv", + ), + doesnt_have_parameter("datatable"), + ), + ), + ) + + +@pytest.mark.skipif(version_gte("pytest-bdd", 8), reason="Pytest-BDD features proper data tables starting from 8.0") +def test_attach_datatable_compat_string(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given a datatable: + | foo | bar | + | baz | qux | + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given, parsers + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given(parsers.parse("a datatable:\\n{datatable}")) + def given_datatable(datatable): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given a datatable:\n| foo | bar |\n| baz | qux |", + not_(has_attachment(name="Data table")), + has_parameter("datatable", "'| foo | bar |\n| baz | qux |'"), + ), + ), + ) + + +@pytest.mark.skipif(version_lt("pytest-bdd", 8), reason="Doc strings support added in 8.0.0") +def test_attach_docstring(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + ''' + Feature: Foo + Scenario: Bar + Given a docstring: + """ + Lorem ipsum dolor sit amet, consectetur adipiscing elit + """ + ''' + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("a docstring:") + def given_docstring(docstring): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given a docstring:", + has_attachment_with_content( + allure_results.attachments, + "Lorem ipsum dolor sit amet, consectetur adipiscing elit", + name="Doc string", + attach_type="text/plain", + ), + doesnt_have_parameter("docstring"), + ), + ), + ) + + +@pytest.mark.skipif(version_gte("pytest-bdd", 8), reason="Pytest-BDD features proper doc strings starting from 8.0") +def test_attach_docstring_compat_well_defined(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + ''' + Feature: Foo + Scenario: Bar + Given a docstring: + """Lorem Ipsum""" + ''' + ) + steps_content = ( + """ + from pytest_bdd import scenario, given, parsers + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given(parsers.parse('a docstring:\\n\"\"\"{docstring}\"\"\"')) + def given_docstring(docstring): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + 'Given a docstring:\n"""Lorem Ipsum"""', + has_attachment_with_content( + allure_results.attachments, + "Lorem Ipsum", + name="Doc string", + attach_type="text/plain", + ), + doesnt_have_parameter("docstring"), + ), + ), + ) + + +@pytest.mark.skipif(version_gte("pytest-bdd", 8), reason="Pytest-BDD features proper doc strings starting from 8.0") +def test_attach_datatable_compat_not_string(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + ''' + Feature: Foo + Scenario: Bar + Given a docstring: + """Lorem Ipsum""" + ''' + ) + steps_content = ( + """ + from pytest_bdd import scenario, given, parsers + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given( + parsers.parse( + 'a docstring:\\n\"\"\"{docstring:Converted}\"\"\"', + extra_types={"Converted": lambda _: 0}, + ), + ) + def given_docstring(docstring): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + 'Given a docstring:\n"""Lorem Ipsum"""', + not_(has_attachment(name="Doc string")), + has_parameter("docstring", "0"), + ), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/description_test.py b/tests/allure_pytest_bdd/acceptance/description_test.py new file mode 100644 index 00000000..3b6f82e4 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/description_test.py @@ -0,0 +1,385 @@ +from hamcrest import assert_that +from hamcrest import equal_to + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_description +from allure_commons_test.result import has_description_html + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_description_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + + This will be overwritten by code + + Scenario: Bar + + This will be overwritten by code + + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.description("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_description_at_module_level(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenarios, given + import allure + + pytestmark = [allure.description("Lorem Ipsum")] + + scenarios("sample.feature") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_description_html_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.description_html("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description_html( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_description_html_decorator_at_module_level(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenarios, given + import allure + + pytestmark = [allure.description_html("Lorem Ipsum")] + + scenarios("sample.feature") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description_html( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_dynamic_description(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + + This will be overwritten by code + + Scenario: Bar + + This will be overwritten by code + + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.description("This will be overwritten by the runtime API") + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.description("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_dynamic_description_html(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.description_html("This will be overwritten by the runtime API") + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.description_html("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description_html( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_scenario_description(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + + Lorem Ipsum + + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_feature_description(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + + Lorem Ipsum + + Scenario: Bar + + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description( + equal_to("Lorem Ipsum"), + ) + ) + ) + + +def test_feature_and_scenario_description(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + + Lorem Ipsum + + Scenario: Bar + + Dolor Sit Amet + + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_description( + equal_to("Lorem Ipsum\n\nDolor Sit Amet"), + ) + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/__init__.py b/tests/allure_pytest_bdd/acceptance/labels/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/allure_pytest_bdd/acceptance/labels/epics_test.py b/tests/allure_pytest_bdd/acceptance/labels/epics_test.py new file mode 100644 index 00000000..73df3477 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/epics_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_epic + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_epic_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.epic("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_epic("Lorem Ipsum"), + ) + ) + + +def test_dynamic_epic(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.epic("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_epic("Lorem Ipsum"), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/features_test.py b/tests/allure_pytest_bdd/acceptance/labels/features_test.py new file mode 100644 index 00000000..554f6fbc --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/features_test.py @@ -0,0 +1,89 @@ +from hamcrest import assert_that +from hamcrest import all_of +from hamcrest import not_ + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_feature + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_feature_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.feature("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + all_of( + has_feature("Lorem Ipsum"), + not_(has_feature("Foo")), + ) + ) + ) + + +def test_dynamic_feature(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.feature("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + all_of( + has_feature("Lorem Ipsum"), + not_(has_feature("Foo")), + ) + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/ids_test.py b/tests/allure_pytest_bdd/acceptance/labels/ids_test.py new file mode 100644 index 00000000..685cca27 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/ids_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_allure_id + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_id_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.id("1009") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_allure_id("1009"), + ) + ) + + +def test_dynamic_id(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.id("1009") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_allure_id("1009"), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/labels_test.py b/tests/allure_pytest_bdd/acceptance/labels/labels_test.py new file mode 100644 index 00000000..e2890e76 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/labels_test.py @@ -0,0 +1,184 @@ +from hamcrest import assert_that +from hamcrest import equal_to +from hamcrest import all_of +from hamcrest import has_entry +from hamcrest import contains_inanyorder + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_label + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_default_labels(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_entry( + "labels", + contains_inanyorder( + has_entry("name", "host"), + has_entry("name", "thread"), + all_of( + has_entry("name", "framework"), + has_entry("value", "pytest-bdd"), + ), + has_entry("name", "language"), + all_of( + has_entry("name", "feature"), + has_entry("value", "Foo"), + ), + ), + ), + ) + ) + + +def test_label_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.label("foo", "bar", "baz") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + all_of( + has_label("foo", equal_to("bar")), + has_label("foo", equal_to("baz")), + ), + + ) + ) + + +def test_label_decorator_at_module_level(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenarios, given + import allure + + pytestmark = [allure.label("foo", "bar", "baz")] + + scenarios("sample.feature") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + all_of( + has_label("foo", equal_to("bar")), + has_label("foo", equal_to("baz")), + ), + + ) + ) + + +def test_dynamic_label(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.label("foo", "bar", "baz") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + all_of( + has_label("foo", equal_to("bar")), + has_label("foo", equal_to("baz")), + ) + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/manuals_test.py b/tests/allure_pytest_bdd/acceptance/labels/manuals_test.py new file mode 100644 index 00000000..a33b1caa --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/manuals_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_manual + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_manual_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.manual + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_manual(True), + ) + ) + + +def test_dynamic_manual(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.manual() + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_manual(True), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/parent_suites_test.py b/tests/allure_pytest_bdd/acceptance/labels/parent_suites_test.py new file mode 100644 index 00000000..f0274542 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/parent_suites_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_parent_suite + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_parent_suite_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.parent_suite("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_parent_suite("Lorem Ipsum"), + ) + ) + + +def test_dynamic_parent_suite(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parent_suite("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_parent_suite("Lorem Ipsum"), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/severities_test.py b/tests/allure_pytest_bdd/acceptance/labels/severities_test.py new file mode 100644 index 00000000..3624df0b --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/severities_test.py @@ -0,0 +1,86 @@ +import pytest +from hamcrest import assert_that + +import allure + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_severity + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +@pytest.mark.parametrize("severity", allure.severity_level) +def test_severity_decorator(allure_pytest_bdd_runner: AllurePytestRunner, severity): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + f""" + from pytest_bdd import scenario, given + import allure + + @allure.severity(allure.severity_level.{severity.name}) + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_severity(severity.value), + ) + ) + + +@pytest.mark.parametrize("severity", allure.severity_level) +def test_dynamic_severity(allure_pytest_bdd_runner: AllurePytestRunner, severity): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + f""" + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.severity(allure.severity_level.{severity.name}) + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_severity(severity.value), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/stories_test.py b/tests/allure_pytest_bdd/acceptance/labels/stories_test.py new file mode 100644 index 00000000..3e78d8ec --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/stories_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_story + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_story_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.story("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_story("Lorem Ipsum"), + ) + ) + + +def test_dynamic_story(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.story("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_story("Lorem Ipsum"), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/sub_suites_test.py b/tests/allure_pytest_bdd/acceptance/labels/sub_suites_test.py new file mode 100644 index 00000000..a40c734c --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/sub_suites_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_sub_suite + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_sub_suite_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.sub_suite("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_sub_suite("Lorem Ipsum"), + ) + ) + + +def test_dynamic_sub_suite(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.sub_suite("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_sub_suite("Lorem Ipsum"), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/suites_test.py b/tests/allure_pytest_bdd/acceptance/labels/suites_test.py new file mode 100644 index 00000000..bcc69f6e --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/suites_test.py @@ -0,0 +1,81 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_suite + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_suite_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.suite("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_suite("Lorem Ipsum"), + ) + ) + + +def test_dynamic_suite(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.suite("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_suite("Lorem Ipsum"), + ) + ) diff --git a/tests/allure_pytest_bdd/acceptance/labels/tags_test.py b/tests/allure_pytest_bdd/acceptance/labels/tags_test.py new file mode 100644 index 00000000..f54a9de2 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/labels/tags_test.py @@ -0,0 +1,396 @@ +import pytest + +from hamcrest import assert_that +from hamcrest import not_ +from hamcrest import all_of + +from allure_commons_test.report import has_test_case +from allure_commons_test.label import has_tag + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_tag_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.tag("foo", "bar") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_tag("foo"), + has_tag("bar"), + ) + ) + + +def test_dynamic_tag(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.tag("foo", "bar") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_tag("foo"), + has_tag("bar"), + ) + ) + + +def test_pytest_mark_reported(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import pytest + + @pytest.mark.foo + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + conftest_content = ( + """ + def pytest_configure(config): + config.addinivalue_line("markers", f"foo: lorem ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + conftest_literal=conftest_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_tag("foo"), + ) + ) + + +def test_pytest_marks_with_arg_not_reported(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import pytest + + @pytest.mark.foo("bar") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + conftest_content = ( + """ + def pytest_configure(config): + config.addinivalue_line("markers", f"foo: lorem ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + conftest_literal=conftest_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + not_(has_tag("foo")), + ) + ) + + +def test_pytest_marks_with_kwarg_not_reported(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import pytest + + @pytest.mark.foo(foo="bar") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + conftest_content = ( + """ + def pytest_configure(config): + config.addinivalue_line("markers", f"foo: lorem ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + conftest_literal=conftest_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + not_(has_tag("foo")), + ) + ) + + +# Can't check argless skip/skipif: skipepd tests currently not reported +@pytest.mark.parametrize("mark", ["usefixtures", "filterwarnings", "xfail"]) +def test_builtin_pytest_marks_not_reported(allure_pytest_bdd_runner: AllurePytestRunner, mark): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + f""" + from pytest_bdd import scenario, given + import pytest + + @pytest.mark.{mark} + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + not_(has_tag(mark)), + ) + ) + + +def test_parametrize_mark_not_reported(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import pytest + + @pytest.mark.parametrize("foo", ["bar"]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + not_(has_tag("parametrize")), + ) + ) + + +def test_skipif_mark_not_reported(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import pytest + + @pytest.mark.skipif(False, reason="Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + not_(has_tag("skipif")), + ) + ) + + +def test_gherkin_tags_reported(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + @foo + Feature: Foo + @bar + Scenario: Bar + Given noop + + @baz + Scenario: Baz + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenarios, given + import pytest + + scenarios("sample.feature") + + @given("noop") + def given_noop(): + pass + """ + ) + conftest_content = ( + """ + def pytest_configure(config): + config.addinivalue_line("markers", f"foo: foo") + config.addinivalue_line("markers", f"bar: bar") + config.addinivalue_line("markers", f"baz: baz") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + conftest_literal=conftest_content, + ) + + assert_that( + allure_results, + all_of( + has_test_case( + "sample.feature:Bar", + has_tag("foo"), + has_tag("bar"), + ), + has_test_case( + "sample.feature:Baz", + has_tag("foo"), + has_tag("baz"), + ), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/links/__init__.py b/tests/allure_pytest_bdd/acceptance/links/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/allure_pytest_bdd/acceptance/links/default_links_test.py b/tests/allure_pytest_bdd/acceptance/links/default_links_test.py new file mode 100644 index 00000000..7d10c707 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/links/default_links_test.py @@ -0,0 +1,268 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_link + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_link_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.link("https://allurereport.org") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="link"), + ), + ) + + +def test_link_decorator_at_module_level(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenarios, given + import allure + + pytestmark = [allure.link("https://allurereport.org")] + + scenarios("sample.feature") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="link"), + ), + ) + + +def test_named_link_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.link("https://allurereport.org", name="foo") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="link", name="foo"), + ), + ) + + +def test_custom_type_link_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.link("https://allurereport.org", link_type="foo") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="foo"), + ), + ) + + +def test_dynamic_link(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.link("https://allurereport.org") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="link"), + ), + ) + + +def test_named_dynamic_link(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.link("https://allurereport.org", name="foo") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="link", name="foo"), + ), + ) + + +def test_custom_type_dynamic_link(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.link("https://allurereport.org", link_type="foo") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("https://allurereport.org", link_type="foo"), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/links/issue_links_test.py b/tests/allure_pytest_bdd/acceptance/links/issue_links_test.py new file mode 100644 index 00000000..2c32440a --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/links/issue_links_test.py @@ -0,0 +1,156 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_issue_link + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_issue_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.issue("https://allurereport.org") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://allurereport.org"), + ), + ) + + +def test_named_issue_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.issue("https://allurereport.org", name="foo") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://allurereport.org", name="foo"), + ), + ) + + +def test_dynamic_issue(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.issue("https://allurereport.org") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://allurereport.org"), + ), + ) + + +def test_named_dynamic_issue(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.issue("https://allurereport.org", name="foo") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://allurereport.org", name="foo"), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/links/link_templates_test.py b/tests/allure_pytest_bdd/acceptance/links/link_templates_test.py new file mode 100644 index 00000000..d00d5d67 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/links/link_templates_test.py @@ -0,0 +1,216 @@ +import pytest +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_link +from allure_commons_test.result import has_issue_link + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_decorator_link_formatted(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.issue("726") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + cli_args=["--allure-link-pattern", "issue:https://github.com/allure-framework/allure-python/issues/{}"], + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://github.com/allure-framework/allure-python/issues/726"), + ), + ) + + +def test_dynamic_link_formatted(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.issue("726") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + cli_args=["--allure-link-pattern", "issue:https://github.com/allure-framework/allure-python/issues/{}"], + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://github.com/allure-framework/allure-python/issues/726"), + ), + ) + + +def test_type_mismatch_unchanged(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.link("726", link_type="foo") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + cli_args=["--allure-link-pattern", "link:https://github.com/allure-framework/allure-python/issues/{}"], + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link("726", link_type="foo"), + ), + ) + + +def test_multiple_patterns_allowed(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.issue("726", name="issue-726") + @allure.link("pytestbdd", link_type="framework", name="docs") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + cli_args=[ + "--allure-link-pattern", + "framework:https://allurereport.org/docs/{}/", + "--allure-link-pattern", + "issue:https://github.com/allure-framework/allure-python/issues/{}", + ], + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_issue_link("https://github.com/allure-framework/allure-python/issues/726", name="issue-726"), + has_link("https://allurereport.org/docs/pytestbdd/", name="docs", link_type="framework"), + ), + ) + + +@pytest.mark.parametrize("url", [ + "http://foo", + "https://foo", + "ftp://foo", + "file:///foo", + "customapp:custompath?foo=bar&baz=qux", +]) +def test_full_urls_not_formatted(allure_pytest_bdd_runner: AllurePytestRunner, url): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + f""" + from pytest_bdd import scenario, given + import allure + + @allure.link("{url}") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + cli_args=["--allure-link-pattern", "link:https://allurereport.org/{}/"], + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_link(url), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/links/tms_links_test.py b/tests/allure_pytest_bdd/acceptance/links/tms_links_test.py new file mode 100644 index 00000000..137d6208 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/links/tms_links_test.py @@ -0,0 +1,156 @@ +from hamcrest import assert_that + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_test_case_link + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_tms_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.testcase("https://allurereport.org") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_test_case_link("https://allurereport.org"), + ), + ) + + +def test_named_tms_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.testcase("https://allurereport.org", name="foo") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_test_case_link("https://allurereport.org", name="foo"), + ), + ) + + +def test_dynamic_tms(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.testcase("https://allurereport.org") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_test_case_link("https://allurereport.org"), + ), + ) + + +def test_named_dynamic_tms(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.testcase("https://allurereport.org", name="foo") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_test_case_link("https://allurereport.org", name="foo"), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/outcomes_test.py b/tests/allure_pytest_bdd/acceptance/outcomes_test.py new file mode 100644 index 00000000..14394817 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/outcomes_test.py @@ -0,0 +1,766 @@ +from hamcrest import assert_that +from hamcrest import not_ +from hamcrest import empty +from hamcrest import all_of +from hamcrest import has_entry +from hamcrest import anything + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import with_status +from allure_commons_test.result import has_status_details +from allure_commons_test.result import with_message_contains +from allure_commons_test.result import with_trace_contains + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_passed_scenario(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given pass + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("pass") + def given_pass(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("passed"), + not_(has_status_details()), + ), + ) + + +def test_scenario_fail_in_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given fail + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("fail") + def given_fail(): + assert False + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("failed"), + has_status_details( + with_message_contains("AssertionError: assert False"), + with_trace_contains("def given_fail():"), + ), + ), + ) + + +def test_scenario_fail_in_scenario(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + assert False + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("failed"), + has_status_details( + with_message_contains("AssertionError: assert False"), + with_trace_contains("def test_scenario():"), + ), + ), + ) + + +def test_scenario_break_in_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given break + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("break") + def given_break(): + raise ValueError("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("broken"), + has_status_details( + with_message_contains("ValueError: Lorem Ipsum"), + with_trace_contains("def given_break():"), + ), + ), + ) + + +def test_scenario_break_in_scenario(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + raise ValueError("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("broken"), + has_status_details( + with_message_contains("ValueError: Lorem Ipsum"), + with_trace_contains("def test_scenario():"), + ), + ), + ) + + +def test_scenario_skip_in_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given skip + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("skip") + def given_skip(): + pytest.skip("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("skipped"), + has_status_details( + with_message_contains("Skipped: Lorem Ipsum"), + with_trace_contains("test_scenario_skip_in_step.py"), + ), + ), + ) + + +def test_scenario_skip_in_scenario(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pytest.skip("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("skipped"), + has_status_details( + with_message_contains("Skipped: Lorem Ipsum"), + with_trace_contains("test_scenario_skip_in_scenario.py"), + ), + ), + ) + + +def test_scenario_skip_mark(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.mark.skip("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that(allure_results.test_cases, empty()) + + +def test_scenario_xfail_in_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given xfail + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("xfail") + def given_xfail(): + pytest.xfail("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("skipped"), + has_status_details( + all_of( + with_message_contains("XFailed: Lorem Ipsum"), + not_(with_message_contains("XFAIL reason: Lorem Ipsum\n\n")), + ), + with_trace_contains("def given_xfail():"), + ), + ), + ) + + +def test_scenario_xfail_in_scenario(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pytest.xfail("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("skipped"), + has_status_details( + all_of( + with_message_contains("XFailed: Lorem Ipsum"), + not_(with_message_contains("XFAIL reason: Lorem Ipsum\n\n")), + ), + with_trace_contains("def test_scenario():"), + ), + ), + ) + + +def test_scenario_xfail_mark(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.mark.xfail(reason="Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + assert False + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("skipped"), + has_status_details( + with_message_contains("XFAIL Lorem Ipsum\n\nAssertionError: assert False"), + with_trace_contains("def test_scenario():"), + ), + ), + ) + + +def test_scenario_xfail_mark_passed(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.mark.xfail(reason="Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("passed"), + has_status_details( + with_message_contains("XPASS Lorem Ipsum"), + not_(has_entry("trace", anything())), + ), + ), + ) + + +def test_scenario_xfail_mark_strict(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.mark.xfail(reason="Lorem Ipsum", strict=True) + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("broken"), + has_status_details( + with_message_contains("[XPASS(strict)] Lorem Ipsum"), + not_(has_entry("trace", anything())), + ), + ), + ) + + +def test_passed_setup_teardown(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.fixture + def setup(): + yield + + @scenario("sample.feature", "Bar") + def test_scenario(setup): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("passed"), + not_(has_status_details()), + ), + ) + + +def test_passed_teardown_not_overwrite_failed_status(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.fixture + def setup(): + yield + + @scenario("sample.feature", "Bar") + def test_scenario(setup): + assert False + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("failed"), + ), + ) + + +def test_failed_teardown_overwrite_passed_status(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.fixture + def setup(): + yield + assert False + + @scenario("sample.feature", "Bar") + def test_scenario(setup): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("failed"), + ), + ) + + +def test_broken_teardown_overwrite_passed_status(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.fixture + def setup(): + yield + raise ValueError("Lorem Ipsum") + + @scenario("sample.feature", "Bar") + def test_scenario(setup): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("broken"), + ), + ) + + +def test_skipped_teardown_not_overwrite_passed_status(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.fixture + def setup(): + yield + pytest.skip("Lorem Ipsum") + + @scenario("sample.feature", "Bar") + def test_scenario(setup): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_status("passed"), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/parameters_test.py b/tests/allure_pytest_bdd/acceptance/parameters_test.py new file mode 100644 index 00000000..24f61931 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/parameters_test.py @@ -0,0 +1,415 @@ +from hamcrest import assert_that +from hamcrest import all_of +from hamcrest import equal_to +from hamcrest import not_ +from hamcrest import has_length + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_parameter +from allure_commons_test.result import with_mode +from allure_commons_test.result import with_excluded +from allure_commons_test.result import has_history_id + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_parameter_added(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_parameter("foo", "'bar'"), + ), + ) + + +def test_masked_parameter(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar", mode=allure.parameter_mode.MASKED) + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_parameter("foo", "'bar'", with_mode("masked")), + ), + ) + + +def test_hidden_parameter(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar", mode=allure.parameter_mode.HIDDEN) + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_parameter("foo", "'bar'", with_mode("hidden")), + ), + ) + + +def test_excluded_parameter(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar", excluded=True) + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_parameter("foo", "'bar'", with_excluded()), + ), + ) + + +def test_parameters_affect_history_id(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + impl_with_no_parameter = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + impl_with_parameter = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar") + + @given("noop") + def given_noop(): + pass + """ + ) + + results_with_no_parameter = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_with_no_parameter, + ) + + results_with_parameter = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_with_parameter, + ) + + assert_that( + results_with_parameter, + has_test_case( + "sample.feature:Bar", + has_history_id( + not_(equal_to(results_with_no_parameter.test_cases[0]["historyId"])), + ), + ), + ) + + +def test_parameters_order_doesnt_matter(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + impl_order1 = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("baz", "qux") + allure.dynamic.parameter("foo", "bar") + + @given("noop") + def given_noop(): + pass + """ + ) + impl_order2 = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar") + allure.dynamic.parameter("baz", "qux") + + @given("noop") + def given_noop(): + pass + """ + ) + + results_order1 = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_order1, + ) + + results_order2 = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_order2, + ) + + assert_that( + results_order1, + has_test_case( + "sample.feature:Bar", + has_history_id( + equal_to(results_order2.test_cases[0]["historyId"]), + ), + ), + ) + + +def test_excluded_parameters_doesnt_affect_history_id(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + impl_no_parameter = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + impl_excluded_parameter = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.parameter("foo", "bar", excluded=True) + + @given("noop") + def given_noop(): + pass + """ + ) + + results_no_parameter = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_no_parameter, + ) + + results_excluded_parameter = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_excluded_parameter, + ) + + assert_that( + results_no_parameter, + has_test_case( + "sample.feature:Bar", + has_history_id( + equal_to(results_excluded_parameter.test_cases[0]["historyId"]), + ), + ), + ) + + +def test_pytest_parameters_added(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + impl_content = ( + """ + import pytest + from pytest_bdd import scenario, given + + @pytest.mark.parametrize("foo", ["bar", {"baz": "qux"}]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_content, + ) + + assert_that( + allure_results, + all_of( + has_test_case( + "sample.feature:Bar", + has_parameter("foo", "'bar'"), + ), + has_test_case( + "sample.feature:Bar", + has_parameter("foo", "{'baz': 'qux'}"), + ), + ), + ) + + +def test_original_pytest_parameter_values_used_to_get_history_id(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + impl_content = ( + """ + import pytest + from pytest_bdd import scenario, given + + @pytest.mark.parametrize("foo", [b"bar", b"baz"]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + impl_content, + ) + + history_ids = {tc["historyId"] for tc in allure_results.test_cases} + + assert_that(history_ids, has_length(2)) diff --git a/tests/allure_pytest_bdd/acceptance/steps/__init__.py b/tests/allure_pytest_bdd/acceptance/steps/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/allure_pytest_bdd/acceptance/steps/api_steps_test.py b/tests/allure_pytest_bdd/acceptance/steps/api_steps_test.py new file mode 100644 index 00000000..85bd6b8e --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/steps/api_steps_test.py @@ -0,0 +1,380 @@ +from hamcrest import assert_that +from hamcrest import all_of + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_title +from allure_commons_test.result import has_step +from allure_commons_test.result import with_steps +from allure_commons_test.result import with_status +from allure_commons_test.result import has_parameter +from allure_commons_test.result import has_status_details +from allure_commons_test.result import with_message_contains +from allure_commons_test.result import with_trace_contains + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_one_context_substep(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given substep + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("substep") + def given_substep(): + with allure.step("foo"): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given substep", + has_step( + "foo", + with_status("passed"), + ), + ), + ), + ) + + +def test_one_function_substep(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given substep + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.step("foo") + def fn(): + pass + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("substep") + def given_substep(): + fn() + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given substep", + has_step( + "foo", + with_status("passed"), + ), + ), + ), + ) + + +def test_nested_substeps(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given substeps + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.step("foo") + def fn(): + pass + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("substeps") + def given_substeps(): + with allure.step("1"): + with allure.step("1.1"): + pass + with allure.step("1.2"): + pass + with allure.step("2"): + with allure.step("2.1"): + pass + with allure.step("2.2"): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given substeps", + with_steps( + all_of( + has_title("1"), + with_status("passed"), + with_steps( + all_of( + has_title("1.1"), + with_status("passed"), + ), + all_of( + has_title("1.2"), + with_status("passed"), + ), + ), + ), + all_of( + has_title("2"), + with_status("passed"), + with_steps( + all_of( + has_title("2.1"), + with_status("passed"), + ), + all_of( + has_title("2.2"), + with_status("passed"), + ), + ), + ), + ), + ), + ), + ) + + +def test_substep_with_parameters(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + step = allure.step("foo") + step.params = {"foo": "bar", "baz": {"qux": "qut"}} + with step: + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given noop", + has_step( + "foo", + with_status("passed"), + has_parameter("foo", "'bar'"), + has_parameter("baz", "{'qux': 'qut'}"), + ), + ), + ), + ) + + +def test_failed_substep(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given fail + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("fail") + def given_fail(): + with allure.step("foo"): + assert False + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given fail", + has_step( + "foo", + with_status("failed"), + has_status_details( + with_message_contains("AssertionError: assert False"), + with_trace_contains("in given_fail"), + ), + ), + ), + ), + ) + + +def test_broken_substep(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given break + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("break") + def given_break(): + with allure.step("foo"): + raise ValueError("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given break", + has_step( + "foo", + with_status("broken"), + has_status_details( + with_message_contains("ValueError: Lorem Ipsum"), + with_trace_contains("in given_break"), + ), + ), + ), + ), + ) + + +def test_skipped_substep(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given skip + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("skip") + def given_skip(): + with allure.step("foo"): + pytest.skip("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given skip", + has_step( + "foo", + with_status("skipped"), + has_status_details( + with_message_contains("Skipped: Lorem Ipsum"), + with_trace_contains("in given_skip"), + ), + ), + ), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/steps/gherkin_steps_test.py b/tests/allure_pytest_bdd/acceptance/steps/gherkin_steps_test.py new file mode 100644 index 00000000..268e20a3 --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/steps/gherkin_steps_test.py @@ -0,0 +1,529 @@ +from hamcrest import assert_that +from hamcrest import not_ +from hamcrest import all_of + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_title +from allure_commons_test.result import has_step +from allure_commons_test.result import has_parameter +from allure_commons_test.result import with_steps +from allure_commons_test.result import with_status +from allure_commons_test.result import has_status_details +from allure_commons_test.result import with_message_contains +from allure_commons_test.result import with_trace_contains + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_passed_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given pass + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("pass") + def given_pass(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given pass", + with_status("passed"), + not_(has_status_details()), + ), + ), + ) + + +def test_failed_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given fail + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("fail") + def given_fail(): + assert False + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given fail", + with_status("failed"), + has_status_details( + with_message_contains("AssertionError: assert False"), + with_trace_contains("in given_fail"), + ), + ), + ), + ) + + +def test_broken_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given break + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("break") + def given_break(): + raise ValueError("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given break", + with_status("broken"), + has_status_details( + with_message_contains("ValueError: Lorem Ipsum"), + with_trace_contains("in given_break"), + ), + ), + ), + ) + + +def test_skipped_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given skip + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("skip") + def given_skip(): + pytest.skip("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given skip", + with_status("skipped"), + has_status_details( + with_message_contains("Skipped: Lorem Ipsum"), + with_trace_contains("in given_skip"), + ), + ), + ), + ) + + +def test_xfailed_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given xfail + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("xfail") + def given_xfail(): + pytest.xfail("Lorem Ipsum") + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Given xfail", + with_status("skipped"), + has_status_details( + with_message_contains("XFailed: Lorem Ipsum"), + with_trace_contains("in given_xfail"), + ), + ), + ), + ) + + +def test_remaining_steps_are_reported_after_failed(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given fail + When skip + Then skip + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given, when, then + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("fail") + def given_fail(): + assert False + + @when("skip") + def when_skip(): + pass + + @then("skip") + def then_skip(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_steps( + has_title("Given fail"), + all_of( + has_title("When skip"), + with_status("skipped"), + not_(has_status_details()), + ), + all_of( + has_title("Then skip"), + with_status("skipped"), + not_(has_status_details()), + ), + ), + ), + ) + + +def test_remaining_steps_are_reported_after_skipped(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given skip + When skip + Then skip + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, when, then + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("skip") + def given_skip(): + pytest.skip("Lorem Ipsum") + + @when("skip") + def when_skip(): + pass + + @then("skip") + def then_skip(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_steps( + has_title("Given skip"), + all_of( + has_title("When skip"), + with_status("skipped"), + not_(has_status_details()), + ), + all_of( + has_title("Then skip"), + with_status("skipped"), + not_(has_status_details()), + ), + ), + ), + ) + + +def test_remaining_steps_are_reported_after_xfailed(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given xfail + When skip + Then skip + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, when, then + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("xfail") + def given_xfail(): + pytest.xfail("Lorem Ipsum") + + @when("skip") + def when_skip(): + pass + + @then("skip") + def then_skip(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_steps( + has_title("Given xfail"), + all_of( + has_title("When skip"), + with_status("skipped"), + not_(has_status_details()), + ), + all_of( + has_title("Then skip"), + with_status("skipped"), + not_(has_status_details()), + ), + ), + ), + ) + + +def test_undefined_step(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given unknown + When skip + Then skip + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, when, then + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @when("skip") + def when_skip(): + pass + + @then("skip") + def then_skip(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_steps( + all_of( + has_title("Given unknown"), + with_status("broken"), + has_status_details( + with_message_contains("Step definition is not found: Given \"unknown\""), + ), + ), + all_of( + has_title("When skip"), + with_status("skipped"), + not_(has_status_details()), + ), + all_of( + has_title("Then skip"), + with_status("skipped"), + not_(has_status_details()), + ), + ), + ), + ) + + +def test_gherkin_step_args(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given a target fixture + Then parameters (including 'from step name') are added + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, then, parsers + import allure + + @pytest.fixture + def foo(): + yield "from fixture" + + @pytest.mark.parametrize("bar", ["from parametrize mark"]) + @scenario("sample.feature", "Bar") + def test_scenario(bar): + pass + + @given("a target fixture", target_fixture="baz") + def given_fixture(): + return "from target fixture" + + @then(parsers.parse("parameters (including '{qux}') are added")) + def then_parameters_added(foo, bar, baz, qux): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step( + "Then parameters (including 'from step name') are added", + has_parameter("foo", "'from fixture'"), + has_parameter("bar", "'from parametrize mark'"), + has_parameter("baz", "'from target fixture'"), + has_parameter("qux", "'from step name'"), + ), + ), + ) diff --git a/tests/allure_pytest_bdd/acceptance/title_test.py b/tests/allure_pytest_bdd/acceptance/title_test.py new file mode 100644 index 00000000..dfb1ce4d --- /dev/null +++ b/tests/allure_pytest_bdd/acceptance/title_test.py @@ -0,0 +1,532 @@ +from hamcrest import assert_that +from hamcrest import anything + +from allure_commons_test.report import has_test_case +from allure_commons_test.result import has_title +from allure_commons_test.result import has_step +from allure_commons_test.result import with_steps + +from tests.allure_pytest.pytest_runner import AllurePytestRunner + + +def test_title_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + from pytest_bdd import scenario, given + import allure + + @allure.title("Lorem Ipsum") + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_title("Lorem Ipsum"), + ), + ) + + +def test_title_interpolations(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario Outline: Bar + Given noop + + Examples: + | bar | + | Ipsum | + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @allure.title("{foo} {bar}") + @pytest.mark.parametrize("foo", ["Lorem"]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_title("Lorem Ipsum"), + ), + ) + + +def test_dynamic_title(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @allure.title("This will be overwritten") + @scenario("sample.feature", "Bar") + def test_scenario(): + allure.dynamic.title("Lorem Ipsum") + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_title("Lorem Ipsum"), + ), + ) + + +def test_default_title_or_parametrized_test(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + + @pytest.mark.parametrize("foo", ["bar"]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_title("Bar"), + ), + ) + + +def test_step_title_decorator(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @allure.title("Lorem Ipsum") + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step("Lorem Ipsum"), + ), + ) + + +def test_step_title_interpolation_step_args(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given the 'Lorem' string + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, parsers + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @allure.title("{foo} Ipsum") + @given(parsers.parse("the '{foo}' string")) + def given_string(foo): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step("Lorem Ipsum"), + ), + ) + + +def test_step_title_interpolation_fixture(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, then, parsers + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @pytest.fixture + def foo(): + yield "Lorem Ipsum" + + @allure.title("{foo}") + @given("noop") + def given_noop(foo): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step("Lorem Ipsum"), + ), + ) + + +def test_step_title_interpolation_target_fixtures(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given a target fixture + Then the value gets interpolated + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, then, parsers + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @given("a target fixture", target_fixture="foo") + def given_fixture(): + return "Lorem" + + @allure.title("{foo} Ipsum") + @then("the value gets interpolated") + def then_value_interpolated(foo): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_steps( + anything(), + has_title("Lorem Ipsum"), + ), + ), + ) + + +def test_step_title_interpolation_pytest_params_explicit(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.mark.parametrize("foo", ["Lorem"]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @allure.title("{foo} Ipsum") + @given("noop") + def given_noop(foo): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step("Lorem Ipsum"), + ), + ) + + +def test_step_title_interpolation_pytest_params_implicit(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario: Bar + Given noop + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @pytest.mark.parametrize("foo", ["Lorem"]) + @scenario("sample.feature", "Bar") + def test_scenario(foo): + pass + + @allure.title("{foo} Ipsum") + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step("Lorem Ipsum"), + ), + ) + + +def test_step_title_interpolation_outline_params(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario Outline: Bar + Given noop + + Examples: + | foo | bar | + | Lorem | Ipsum | + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given + import allure + + @scenario("sample.feature", "Bar") + def test_scenario(): + pass + + @allure.title("{foo} {bar}") + @given("noop") + def given_noop(): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + has_step("Lorem Ipsum"), + ), + ) + + +def test_step_title_interpolation_priority(allure_pytest_bdd_runner: AllurePytestRunner): + feature_content = ( + """ + Feature: Foo + Scenario Outline: Bar + Given target fixture + Then value 'Lorem Ipsum' received + Then target fixture received + Then outline param used + Then pytest param used + + Examples: + | foo | bar | + | Outline | Outline | + """ + ) + steps_content = ( + """ + import pytest + from pytest_bdd import scenario, given, then, parsers + import allure + + @pytest.mark.parametrize(["foo", "bar"], [("Mark", "Mark")]) + @scenario("sample.feature", "Bar") + def test_scenario(foo, bar): + pass + + @given("target fixture", target_fixture="foo") + def given_target_fixture(): + return "Target Fixture" + + @allure.title("{foo}") + @then(parsers.parse("value '{foo}' received")) + def then_value_received(foo): + pass + + @allure.title("{foo}") + @then("target fixture received") + def then_target_fixture_received(foo): + pass + + @allure.title("{foo}") + @then("outline param used") + def then_outline_param_used(): + pass + + @allure.title("{bar}") + @then("pytest param used") + def then_pytest_param_used(bar): + pass + """ + ) + + allure_results = allure_pytest_bdd_runner.run_pytest( + ("sample.feature", feature_content), + steps_content, + ) + + assert_that( + allure_results, + has_test_case( + "sample.feature:Bar", + with_steps( + anything(), + has_title("Lorem Ipsum"), + has_title("Target Fixture"), + has_title("Outline"), + has_title("Mark"), + ), + ), + ) diff --git a/tests/e2e.py b/tests/e2e.py index 0280361a..5453e6fa 100644 --- a/tests/e2e.py +++ b/tests/e2e.py @@ -13,6 +13,9 @@ import warnings from abc import abstractmethod from contextlib import contextmanager, ExitStack +from functools import lru_cache +from importlib.metadata import version as get_version_metadata +from packaging.version import parse as parse_version from pathlib import Path from pytest import FixtureRequest, Pytester, MonkeyPatch from typing import Tuple, Mapping, TypeVar, Generator, Callable, Union @@ -22,6 +25,37 @@ from allure_commons_test.report import AllureReport +@lru_cache(maxsize=None) +def version(package: str): + return parse_version(get_version_metadata(package)) + + +@lru_cache(maxsize=None) +def version_lt(package: str, major: int, minor: int = 0, micro: int = 0): + + """Returns `True` is the version of the package doesn't meet the specified requirements. + + You may call this function in a @pytest.mark.skipif condition. + """ + + package_version = version(package) + req = (major, minor, micro) + if package_version.release == req: + return package_version.is_prerelease + return package_version.release < req + + +@lru_cache(maxsize=None) +def version_gte(package: str, major: int, minor: int = 0, micro: int = 0): + + """Returns `True` is the version of the package meets the specified requirements. + + You may call this function in a @pytest.mark.skipif condition. + """ + + return not version_lt(package, major, minor, micro) + + PathlikeT = Union[str, Path]