diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 41bd025527..b0aeae44cc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,8 +20,15 @@ short summary of the most important parts: * Always use `self` for the first argument to instance methods. * Always use `cls` for the first argument to class methods. * Use one leading underscore only for non-public methods and instance variables, - such as `_data`. + such as `_data`. Do not activate name mangling with `__` unless necessary. +* If there is a pair of `get_x` and `set_x` methods, they should instead be a + proper property, which is easy to do with the built-in `@property` decorator. * Constants should be `CAPITALIZED_SNAKE_CASE`. +* When importing a function, try to avoid renaming it with `import as` because + it introduces cognitive overhead to track yet another name. +* When deriving another module’s class (such as `unittest.TestCase`), reuse the + class name to avoid confusion, such as `LisaTestCase`, instead of introducing + a different connotation like `TestSuite`. When in doubt, adhere to existing conventions, or check the style guide. @@ -252,6 +259,18 @@ Python world. If you make it through even some of these guides, you will be well on your way to being a “Pythonista” (a Python developer) writing “Pythonic” (canonically correct Python) code left and right. +### Async IO + +With Python 3.4, the Async IO pattern found in languages such as C# and Go is +available through the keywords `async` and `await`, along with the Python module +`asyncio`. Please read [Async IO in Python: A Complete +Walkthrough](https://realpython.com/async-io-python/) to understand at a high +level how asynchronous programming works. As of Python 3.7, One major “gotcha” +is that `asyncio.run(...)` should be used [exactly once in +`main`](https://docs.python.org/3/library/asyncio-task.html), it starts the +event loop. Everything else should be a coroutine or task which the event loop +schedules. + ## Future Sections Just a collection of reminders for the author to expand on later. diff --git a/Makefile b/Makefile index 11f5b1368b..d5dbe13404 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # This Makefile simply automates all our tasks. Its use is optional. -all: setup run check +all: setup run test check # Install Python packages setup: @@ -8,11 +8,11 @@ setup: # Run LISAv3 run: - @poetry run python lisa/main.py --debug + @poetry run python -X dev lisa/main.py --debug # Run unit tests test: - @poetry run python -m unittest discover lisa + @poetry run python -X dev -m unittest discover -v lisa # Generate coverage report (slow, reruns LISAv3 and tests) coverage: diff --git a/examples/testsuites/helloworld.py b/examples/testsuites/helloworld.py index fa03005fce..a3d867da01 100644 --- a/examples/testsuites/helloworld.py +++ b/examples/testsuites/helloworld.py @@ -1,9 +1,9 @@ -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.operating_system import Linux from lisa.tools import Echo, Uname -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="demo", category="simple", description=""" @@ -12,8 +12,8 @@ """, tags=["demo"], ) -class HelloWorld(TestSuite): - @TestCaseMetadata( +class HelloWorld(LisaTestCase): + @LisaTestMetadata( description=""" this test case use default node to 1. get system info @@ -43,7 +43,7 @@ def hello(self) -> None: self.assertEqual("", result.stderr) self.assertEqual(0, result.exit_code) - @TestCaseMetadata( + @LisaTestMetadata( description=""" demonstrate a simple way to run command in one line. """, diff --git a/examples/testsuites/multinodes.py b/examples/testsuites/multinodes.py index e42f754e37..becf0f36c5 100644 --- a/examples/testsuites/multinodes.py +++ b/examples/testsuites/multinodes.py @@ -1,9 +1,9 @@ -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.testsuite import simple_requirement from lisa.tools import Lscpu, Ntttcp -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="demo", category="demo", description=""" @@ -13,8 +13,8 @@ tags=["demo", "multinode"], requirement=simple_requirement(min_count=2), ) -class MutipleNodesDemo(TestSuite): - @TestCaseMetadata( +class MutipleNodesDemo(LisaTestCase): + @LisaTestMetadata( description=""" This test case send and receive data by ntttcp """, @@ -28,7 +28,7 @@ def os_info(self) -> None: core_count = lscpu.get_core_count() self.log.info(f"index: {node.index}, core_count: {core_count}") - @TestCaseMetadata( + @LisaTestMetadata( description=""" this test case send and receive data by ntttcp """, diff --git a/examples/testsuites/withscript.py b/examples/testsuites/withscript.py index a25b2309ea..76883c984d 100644 --- a/examples/testsuites/withscript.py +++ b/examples/testsuites/withscript.py @@ -1,13 +1,13 @@ from pathlib import Path -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.executable import CustomScript, CustomScriptBuilder from lisa.operating_system import Windows from lisa.testsuite import simple_requirement from lisa.util.perf_timer import create_timer -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="demo", category="simple", description=""" @@ -15,13 +15,13 @@ """, tags=["demo"], ) -class WithScript(TestSuite): +class WithScript(LisaTestCase): def before_suite(self) -> None: self._echo_script = CustomScriptBuilder( Path(__file__).parent.joinpath("scripts"), ["echo.sh"] ) - @TestCaseMetadata( + @LisaTestMetadata( description=""" this test case run script on a linux node, and demostrate 1. how to use customized script on tested node. diff --git a/lisa/__init__.py b/lisa/__init__.py index adec31b93d..84ef2afe5a 100644 --- a/lisa/__init__.py +++ b/lisa/__init__.py @@ -1,12 +1,12 @@ from __future__ import annotations -from lisa.testsuite import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa.testsuite import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.util.logger import init_loggger __all__ = [ - "TestSuiteMetadata", - "TestCaseMetadata", - "TestSuite", + "LisaTestCase", + "LisaTestCaseMetadata", + "LisaTestMetadata", ] diff --git a/lisa/action.py b/lisa/action.py deleted file mode 100644 index e6a5b68d75..0000000000 --- a/lisa/action.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import annotations - -from abc import ABCMeta, abstractmethod -from dataclasses import dataclass -from enum import Enum - -from lisa import notifier -from lisa.util import LisaException -from lisa.util.logger import get_logger -from lisa.util.perf_timer import create_timer - -ActionStatus = Enum( - "ActionStatus", - [ - "UNINITIALIZED", - "INITIALIZING", - "INITIALIZED", - "WAITING", - "RUNNING", - "SUCCESS", - "FAILED", - "STOPPING", - "STOPPED", - "UNKNOWN", - ], -) - - -@dataclass -class ActionMessage(notifier.MessageBase): - type: str = "Action" - sub_type: str = "" - status: ActionStatus = ActionStatus.UNKNOWN - total_elapsed: float = 0 - - -class Action(metaclass=ABCMeta): - def __init__(self) -> None: - self.name: str = self.__class__.__name__ - self.log = get_logger("Action") - - self.__status = ActionStatus.UNINITIALIZED - self.__is_started = False - self.__timer = create_timer() - self.__total: float = 0 - - @abstractmethod - async def start(self) -> None: - self.__is_started = True - self.set_status(ActionStatus.RUNNING) - - @abstractmethod - async def stop(self) -> None: - self.validate_started() - - @abstractmethod - async def close(self) -> None: - self.validate_started() - - def get_status(self) -> ActionStatus: - return self.__status - - def set_status(self, status: ActionStatus) -> None: - if self.__status != status: - self.log.debug( - f"{self.name} status changed from {self.__status.name} " - f"to {status.name} with {self.__timer}" - ) - self.__total += self.__timer.elapsed() - message = ActionMessage( - elapsed=self.__timer.elapsed(), - sub_type=self.name, - status=status, - total_elapsed=self.__total, - ) - notifier.notify(message=message) - self.__timer = create_timer() - self.__status = status - - def validate_started(self) -> None: - if not self.__is_started: - raise LisaException(f"action[{self.name}] is not started yet.") diff --git a/lisa/commands.py b/lisa/commands.py index dd4a2dd4c2..022e324d77 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -1,52 +1,50 @@ -import asyncio import functools from argparse import Namespace from typing import Iterable, Optional, cast +import lisa.runner from lisa import notifier -from lisa.lisarunner import LisaRunner -from lisa.parameter_parser.runbook import load as load_runbook +from lisa.parameter_parser.runbook import load_runbook from lisa.testselector import select_testcases -from lisa.testsuite import TestCaseRuntimeData +from lisa.testsuite import LisaTestRuntimeData, TestStatus from lisa.util import LisaException, constants from lisa.util.logger import get_logger _get_init_logger = functools.partial(get_logger, "init") -def run(args: Namespace) -> int: - runbook = load_runbook(args) +async def run(args: Namespace) -> int: + runbook = load_runbook(args.runbook, args.variables) if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) + try: - runner = LisaRunner(runbook) - awaitable = runner.start() - asyncio.run(awaitable) + results = await lisa.runner.run(runbook) finally: notifier.finalize() - return runner.exit_code + return sum(1 for x in results if x.status == TestStatus.FAILED) # check runbook -def check(args: Namespace) -> int: - load_runbook(args) +async def check(args: Namespace) -> int: + load_runbook(args.runbook, args.variables) return 0 -def list_start(args: Namespace) -> int: - runbook = load_runbook(args) +async def list_start(args: Namespace) -> int: + runbook = load_runbook(args.runbook, args.variables) list_all = cast(Optional[bool], args.list_all) log = _get_init_logger("list") if args.type == constants.LIST_CASE: if list_all: - cases: Iterable[TestCaseRuntimeData] = select_testcases() + cases: Iterable[LisaTestRuntimeData] = select_testcases() else: cases = select_testcases(runbook.testcase) for case_data in cases: log.info( - f"case: {case_data.name}, suite: {case_data.metadata.suite.name}, " + f"test: {case_data.name}, case: {case_data.metadata.case.name}, " f"area: {case_data.suite.area}, " f"category: {case_data.suite.category}, " f"tags: {','.join(case_data.suite.tags)}, " diff --git a/lisa/lisarunner.py b/lisa/lisarunner.py deleted file mode 100644 index da4ea2e26d..0000000000 --- a/lisa/lisarunner.py +++ /dev/null @@ -1,227 +0,0 @@ -from typing import Dict, List, Optional - -from lisa import schema, search_space -from lisa.action import Action, ActionStatus -from lisa.environment import Environment, Environments, load_environments -from lisa.platform_ import WaitMoreResourceError, load_platform -from lisa.testselector import select_testcases -from lisa.testsuite import ( - TestCaseRequirement, - TestCaseRuntimeData, - TestResult, - TestStatus, - TestSuite, - TestSuiteMetadata, -) -from lisa.util.logger import get_logger - - -class LisaRunner(Action): - def __init__(self, runbook: schema.Runbook) -> None: - super().__init__() - self.exit_code: int = 0 - - self._runbook = runbook - self._log = get_logger("runner") - - async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. - await super().start() - self.set_status(ActionStatus.RUNNING) - - # select test cases - selected_test_cases = select_testcases(self._runbook.testcase) - - # create test results - selected_test_results = self._create_test_results(selected_test_cases) - - # load predefined environments - candidate_environments = load_environments(self._runbook.environment) - - platform = load_platform(self._runbook.platform) - # get environment requirements - self._merge_test_requirements( - test_results=selected_test_results, - existing_environments=candidate_environments, - platform_type=platform.type_name(), - ) - - # there may not need to handle requirements, if all environment are predefined - prepared_environments = platform.prepare_environments(candidate_environments) - - can_run_results = selected_test_results - # request environment then run test s - for environment in prepared_environments: - try: - is_needed: bool = False - can_run_results = [x for x in can_run_results if x.can_run] - can_run_results.sort(key=lambda x: x.runtime_data.metadata.suite.name) - new_env_can_run_results = [ - x for x in can_run_results if x.runtime_data.use_new_environment - ] - - if not can_run_results: - # no left tests, break the loop - self._log.debug( - f"no more test case to run, skip env [{environment.name}]" - ) - break - - # check if any test need this environment - if any( - case.can_run and case.check_environment(environment, True) - for case in can_run_results - ): - is_needed = True - - if not is_needed: - self._log.debug( - f"env[{environment.name}] skipped " - f"as not meet any case requirement" - ) - continue - - try: - platform.deploy_environment(environment) - except WaitMoreResourceError as identifier: - self._log.warning( - f"[{environment.name}] waiting for more resource: " - f"{identifier}, skip assiging case" - ) - continue - - if not environment.is_ready: - self._log.warning( - f"[{environment.name}] is not deployed successfully, " - f"skip assiging case" - ) - continue - - # once environment is ready, check updated capability - self._log.info(f"start running cases on {environment.name}") - # try a case need new environment firstly - for new_env_result in new_env_can_run_results: - if new_env_result.check_environment(environment, True): - await self._run_suite( - environment=environment, cases=[new_env_result] - ) - break - - # grouped test results by test suite. - grouped_cases: List[TestResult] = [] - current_test_suite: Optional[TestSuiteMetadata] = None - for test_result in can_run_results: - if ( - test_result.can_run - and test_result.check_environment(environment, True) - and not test_result.runtime_data.use_new_environment - ): - if ( - test_result.runtime_data.metadata.suite - != current_test_suite - and grouped_cases - ): - # run last batch cases - await self._run_suite( - environment=environment, cases=grouped_cases - ) - grouped_cases = [] - - # append new test cases - current_test_suite = test_result.runtime_data.metadata.suite - grouped_cases.append(test_result) - - if grouped_cases: - await self._run_suite(environment=environment, cases=grouped_cases) - finally: - if environment and environment.is_ready: - platform.delete_environment(environment) - - # not run as there is no fit environment. - for case in can_run_results: - if case.can_run: - reasons = "no available environment" - if case.check_results and case.check_results.reasons: - reasons = f"{reasons}: {case.check_results.reasons}" - - case.set_status(TestStatus.SKIPPED, reasons) - - result_count_dict: Dict[TestStatus, int] = dict() - for test_result in selected_test_results: - self._log.info( - f"{test_result.runtime_data.metadata.full_name:>30}: " - f"{test_result.status.name:<8} {test_result.message}" - ) - result_count = result_count_dict.get(test_result.status, 0) - result_count += 1 - result_count_dict[test_result.status] = result_count - - self._log.info("test result summary") - self._log.info(f" TOTAL : {len(selected_test_results)}") - for key in TestStatus: - self._log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}") - - self.set_status(ActionStatus.SUCCESS) - - # pass failed count to exit code - self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) - - # for UT testability - self._latest_test_results = selected_test_results - - async def stop(self) -> None: - super().stop() - - async def close(self) -> None: - super().close() - - async def _run_suite( - self, environment: Environment, cases: List[TestResult] - ) -> None: - - assert cases - suite_metadata = cases[0].runtime_data.metadata.suite - test_suite: TestSuite = suite_metadata.test_class( - environment, - cases, - suite_metadata, - ) - for case in cases: - case.env = environment.name - await test_suite.start() - - def _create_test_results( - self, cases: List[TestCaseRuntimeData] - ) -> List[TestResult]: - test_results: List[TestResult] = [] - for x in cases: - test_results.append(TestResult(runtime_data=x)) - return test_results - - def _merge_test_requirements( - self, - test_results: List[TestResult], - existing_environments: Environments, - platform_type: str, - ) -> None: - assert platform_type - platform_type_set = search_space.SetSpace[str]( - is_allow_set=True, items=[platform_type] - ) - for test_result in test_results: - test_req: TestCaseRequirement = test_result.runtime_data.requirement - - # check if there is playform requirement on test case - if test_req.platform_type and len(test_req.platform_type) > 0: - check_result = test_req.platform_type.check(platform_type_set) - if not check_result.result: - test_result.set_status(TestStatus.SKIPPED, check_result.reasons) - - if test_result.can_run: - assert test_req.environment - # if case need a new env to run, force to create one. - # if not, get or create one. - if test_result.runtime_data.use_new_environment: - existing_environments.from_requirement(test_req.environment) - else: - existing_environments.get_or_create(test_req.environment) diff --git a/lisa/main.py b/lisa/main.py index ef00e0d75f..76b980277b 100644 --- a/lisa/main.py +++ b/lisa/main.py @@ -1,3 +1,4 @@ +import asyncio import sys import traceback from datetime import datetime @@ -23,7 +24,7 @@ def create_run_path(root_path: Path) -> Path: return run_path -def main() -> int: +async def main() -> int: total_timer = create_timer() log = get_logger() exit_code: int = 0 @@ -57,7 +58,7 @@ def main() -> int: log.info(f"command line args: {sys.argv}") log.info(f"run local path: {runtime_root}") - exit_code = args.func(args) + exit_code = await args.func(args) assert isinstance(exit_code, int), f"actual: {type(exit_code)}" finally: log.info(f"completed in {total_timer}") @@ -68,7 +69,8 @@ def main() -> int: if __name__ == "__main__": exit_code = 0 try: - exit_code = main() + # TODO: Turn off debugging when we ship this. + exit_code = asyncio.run(main(), debug=True) except Exception as exception: exit_code = -1 log = get_logger() diff --git a/lisa/parameter_parser/argparser.py b/lisa/parameter_parser/argparser.py index 5f389e1c65..5478fc23a4 100644 --- a/lisa/parameter_parser/argparser.py +++ b/lisa/parameter_parser/argparser.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser, Namespace +from pathlib import Path from lisa import commands from lisa.util import constants @@ -8,10 +9,10 @@ def support_runbook(parser: ArgumentParser, required: bool = True) -> None: parser.add_argument( "--runbook", "-r", + type=Path, required=required, - dest="runbook", - help="runbook of this run", - default="examples/runbook/hello_world.yml", + help="Path to the runbook", + default=Path("examples/runbook/hello_world.yml").absolute(), ) @@ -21,7 +22,7 @@ def support_debug(parser: ArgumentParser) -> None: "-d", dest="debug", action="store_true", - help="set log level to debug", + help="Set log level to debug", ) @@ -31,38 +32,39 @@ def support_variable(parser: ArgumentParser) -> None: "-v", dest="variables", action="append", - help="define variable from command line. format is NAME:VALUE", + help="Define one or more variables with 'NAME:VALUE'", ) def parse_args() -> Namespace: - # parse args run function. + """This wraps Python's 'ArgumentParser' to setup our CLI.""" parser = ArgumentParser() support_debug(parser) support_runbook(parser, required=False) support_variable(parser) + # Default to ‘run’ when no subcommand is given. + parser.set_defaults(func=commands.run) + subparsers = parser.add_subparsers(dest="cmd", required=False) + + # Entry point for ‘run’. run_parser = subparsers.add_parser("run") run_parser.set_defaults(func=commands.run) - support_runbook(run_parser) - support_variable(run_parser) + # Entry point for ‘list-start’. list_parser = subparsers.add_parser(constants.LIST) list_parser.set_defaults(func=commands.list_start) list_parser.add_argument("--type", "-t", dest="type", choices=["case"]) list_parser.add_argument("--all", "-a", dest="list_all", action="store_true") - support_runbook(list_parser) - support_variable(list_parser) + # Entry point for ‘check’. check_parser = subparsers.add_parser("check") check_parser.set_defaults(func=commands.check) - support_runbook(check_parser) - support_variable(check_parser) - - parser.set_defaults(func=commands.run) for sub_parser in subparsers.choices.values(): + support_runbook(sub_parser) + support_variable(sub_parser) support_debug(sub_parser) return parser.parse_args() diff --git a/lisa/parameter_parser/runbook.py b/lisa/parameter_parser/runbook.py index e47feb04f3..cb20a8e270 100644 --- a/lisa/parameter_parser/runbook.py +++ b/lisa/parameter_parser/runbook.py @@ -1,7 +1,6 @@ -from argparse import Namespace from functools import partial from pathlib import Path, PurePath -from typing import Any, Dict, Optional, cast +from typing import Any, Dict, List, Optional, cast import yaml from marshmallow import Schema @@ -58,13 +57,13 @@ def validate_data(data: Any) -> schema.Runbook: return runbook -def load(args: Namespace) -> schema.Runbook: +def load_runbook(path: Path, user_variables: Optional[List[str]]) -> schema.Runbook: + """Loads a runbook given a user-supplied path and set of variables.""" # make sure extension in lisa is loaded base_module_path = Path(__file__).parent.parent import_module(base_module_path, logDetails=False) # merge all parameters - path = Path(args.runbook).absolute() data = _load_data(path) constants.RUNBOOK_PATH = path.parent @@ -73,14 +72,14 @@ def load(args: Namespace) -> schema.Runbook: extends_runbook = schema.Extension.schema().load( # type:ignore data[constants.EXTENSION] ) - _load_extends(path.parent, extends_runbook) + _load_extends(constants.RUNBOOK_PATH, extends_runbook) # load arg variables variables: Dict[str, Any] = dict() + # TODO: This is all side-effect driven and needs to be fixed. load_from_runbook(data, variables) load_from_env(variables) - if hasattr(args, "variables"): - load_from_pairs(args.variables, variables) + load_from_pairs(user_variables, variables) # replace variables: data = replace_variables(data, variables) diff --git a/lisa/runner.py b/lisa/runner.py new file mode 100644 index 0000000000..da8cb8bc74 --- /dev/null +++ b/lisa/runner.py @@ -0,0 +1,191 @@ +from typing import Dict, List, Optional + +from lisa import schema, search_space +from lisa.environment import Environment, Environments, load_environments +from lisa.platform_ import WaitMoreResourceError, load_platform +from lisa.testselector import select_testcases +from lisa.testsuite import ( + LisaTestCase, + LisaTestCaseMetadata, + TestCaseRequirement, + TestResult, + TestStatus, +) +from lisa.util.logger import get_logger + +log = get_logger("runner") + + +# TODO: This entire function is one long string of side-effects. +# We need to reduce this function's complexity to remove the +# disabled warning, and not rely solely on side effects. +async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 + # select test cases + selected_test_cases = select_testcases(runbook.testcase) + + selected_test_results = [ + TestResult(runtime_data=case) for case in selected_test_cases + ] + + # load predefined environments + candidate_environments = load_environments(runbook.environment) + + platform = load_platform(runbook.platform) + # get environment requirements + _merge_test_requirements( + test_results=selected_test_results, + existing_environments=candidate_environments, + platform_type=platform.type_name(), + ) + + # there may not need to handle requirements, if all environment are predefined + prepared_environments = platform.prepare_environments(candidate_environments) + + can_run_results = selected_test_results + # request environment then run test s + for environment in prepared_environments: + try: + is_needed: bool = False + can_run_results = [x for x in can_run_results if x.can_run] + can_run_results.sort(key=lambda x: x.runtime_data.metadata.case.name) + new_env_can_run_results = [ + x for x in can_run_results if x.runtime_data.use_new_environment + ] + + if not can_run_results: + # no left tests, break the loop + log.debug(f"no more test case to run, skip env [{environment.name}]") + break + + # check if any test need this environment + if any( + case.can_run and case.check_environment(environment, True) + for case in can_run_results + ): + is_needed = True + + if not is_needed: + log.debug( + f"env[{environment.name}] skipped " + f"as not meet any case requirement" + ) + continue + + try: + platform.deploy_environment(environment) + except WaitMoreResourceError as identifier: + log.warning( + f"[{environment.name}] waiting for more resource: " + f"{identifier}, skip assiging case" + ) + continue + + if not environment.is_ready: + log.warning( + f"[{environment.name}] is not deployed successfully, " + f"skip assiging case" + ) + continue + + # once environment is ready, check updated capability + log.info(f"start running cases on {environment.name}") + # try a case need new environment firstly + for new_env_result in new_env_can_run_results: + if new_env_result.check_environment(environment, True): + await _run_tests(environment=environment, tests=[new_env_result]) + break + + # grouped test results by test case. + grouped_cases: List[TestResult] = [] + current_case: Optional[LisaTestCaseMetadata] = None + for test_result in can_run_results: + if ( + test_result.can_run + and test_result.check_environment(environment, True) + and not test_result.runtime_data.use_new_environment + ): + if ( + test_result.runtime_data.metadata.case != current_case + and grouped_cases + ): + # run last batch cases + await _run_tests(environment=environment, tests=grouped_cases) + grouped_cases = [] + + # append new test cases + current_case = test_result.runtime_data.metadata.case + grouped_cases.append(test_result) + + if grouped_cases: + await _run_tests(environment=environment, tests=grouped_cases) + finally: + if environment and environment.is_ready: + platform.delete_environment(environment) + + # not run as there is no fit environment. + for test in can_run_results: + if test.can_run: + reasons = "no available environment" + if test.check_results and test.check_results.reasons: + reasons = f"{reasons}: {test.check_results.reasons}" + + test.set_status(TestStatus.SKIPPED, reasons) + + result_count_dict: Dict[TestStatus, int] = dict() + for test_result in selected_test_results: + log.info( + f"{test_result.runtime_data.metadata.full_name:>30}: " + f"{test_result.status.name:<8} {test_result.message}" + ) + result_count = result_count_dict.get(test_result.status, 0) + result_count += 1 + result_count_dict[test_result.status] = result_count + + log.info("test result summary") + log.info(f" TOTAL : {len(selected_test_results)}") + for key in TestStatus: + log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}") + + return selected_test_results + + +async def _run_tests(environment: Environment, tests: List[TestResult]) -> None: + assert tests + case_metadata = tests[0].runtime_data.metadata.case + test_case: LisaTestCase = case_metadata.test_class( + environment, + tests, + case_metadata, + ) + for test in tests: + test.env = environment.name + await test_case.start() + + +def _merge_test_requirements( + test_results: List[TestResult], + existing_environments: Environments, + platform_type: str, +) -> None: + """TODO: This function modifies `test_results` and `existing_environments`.""" + assert platform_type + platform_type_set = search_space.SetSpace[str]( + is_allow_set=True, items=[platform_type] + ) + for test_result in test_results: + test_req: TestCaseRequirement = test_result.runtime_data.requirement + + # check if there is playform requirement on test case + if test_req.platform_type and len(test_req.platform_type) > 0: + check_result = test_req.platform_type.check(platform_type_set) + if not check_result.result: + test_result.set_status(TestStatus.SKIPPED, check_result.reasons) + + if test_result.can_run: + assert test_req.environment + # if case need a new env to run, force to create one. + # if not, get or create one. + if test_result.runtime_data.use_new_environment: + existing_environments.from_requirement(test_req.environment) + else: + existing_environments.get_or_create(test_req.environment) diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_lisarunner.py index 78b3c6e9d8..54a5cbfdbe 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_lisarunner.py @@ -1,12 +1,11 @@ -import asyncio -from typing import List, Optional -from unittest.case import TestCase +from typing import Any, List +from unittest import IsolatedAsyncioTestCase +import lisa.runner from lisa import schema from lisa.environment import load_environments -from lisa.lisarunner import LisaRunner from lisa.tests import test_platform, test_testsuite -from lisa.tests.test_environment import generate_runbook as generate_env_runbook +from lisa.tests.test_environment import generate_runbook from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs from lisa.tests.test_testsuite import ( cleanup_cases_metadata, @@ -17,12 +16,13 @@ from lisa.util import constants -def generate_lisarunner( - env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False -) -> LisaRunner: - runbook = schema.Runbook( +def generate_test_runbook( + case_use_new_env: bool = False, **kwargs: Any +) -> schema.Runbook: + """This wraps `generate_runbook` with a mock platform and test case.""" + return schema.Runbook( platform=[ - schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") + schema.Platform(type=constants.PLATFORM_MOCK, admin_password="do-not-use") ], testcase=[ schema.TestCase( @@ -30,41 +30,32 @@ def generate_lisarunner( use_new_environment=case_use_new_env, ) ], + environment=generate_runbook(**kwargs), ) - if env_runbook: - runbook.environment = env_runbook - runner = LisaRunner(runbook) - return runner - -class LisaRunnerTestCase(TestCase): +class RunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: - cleanup_cases_metadata() + cleanup_cases_metadata() # Necessary side effects! test_platform.return_prepared = True test_platform.deploy_is_ready = True test_platform.deploy_success = True test_platform.wait_more_resource_error = False def test_merge_req_create_on_new(self) -> None: - # if no predefined envs, can generate from requirement - env_runbook = generate_env_runbook(is_single_env=False) - envs = load_environments(env_runbook) - self.assertListEqual( - [], - [x for x in envs], - ) - runner = generate_lisarunner(None) + """Create environments based on requirements.""" + envs = load_environments(generate_runbook(is_single_env=False)) + self.assertFalse(envs) test_results = generate_cases_result() - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # 3 cases create 3 envs + # 3 cases create 3 environments. self.assertListEqual( ["req_0", "req_1", "req_2"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -74,27 +65,23 @@ def test_merge_req_create_on_new(self) -> None: ) def test_merge_req_run_not_create_on_equal(self) -> None: - # when merging requirement from test cases, - # it won't create new, if predefined exact match test case needs - env_runbook = generate_env_runbook(remote=True) - envs = load_environments(env_runbook) + """Don't create environments when already satisfied.""" + envs = load_environments(generate_runbook(remote=True)) self.assertListEqual( ["runbook_0"], - [x for x in envs], + list(envs), ) - - runner = generate_lisarunner(env_runbook) test_results = generate_cases_result() - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - - # 3 cases created only two req, as simple req meets on runbook_0 + # 3 cases created only two required environments, as the + # simple requirement was met by runbook_0. self.assertListEqual( ["runbook_0", "req_1", "req_2"], - [x for x in envs], + list(envs), ) self.assertListEqual( [TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN], @@ -102,28 +89,24 @@ def test_merge_req_run_not_create_on_equal(self) -> None: ) def test_merge_req_create_on_use_new(self) -> None: - # same runbook as test_merge_req_run_not_create_on_equal - # but all 3 cases asks a new env, so create 3 envs - # note, when running cases, predefined env will be treat as a new env. - env_runbook = generate_env_runbook(remote=True) - envs = load_environments(env_runbook) + """Always create environments when asked.""" + envs = load_environments(generate_runbook(remote=True)) self.assertListEqual( ["runbook_0"], - [x for x in envs], + list(envs), ) - runner = generate_lisarunner(env_runbook) test_results = generate_cases_result() for test_result in test_results: test_result.runtime_data.use_new_environment = True - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # every case need a new environment, so created 3 + # All 3 cases needed a new environment, so it created 3. self.assertListEqual( ["runbook_0", "req_1", "req_2", "req_3"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -133,27 +116,18 @@ def test_merge_req_create_on_use_new(self) -> None: ) def test_merge_req_not_allow_create(self) -> None: - # force to use existing env, not to create new. - # this case doesn't provide predefined env, but no case skipped on this stage. - env_runbook = generate_env_runbook(is_single_env=False) - env_runbook.allow_create = False - envs = load_environments(env_runbook) - self.assertListEqual( - [], - [x for x in envs], - ) - runner = generate_lisarunner(None) + """Do not create an existing environment when not allowed.""" + runbook = generate_runbook(is_single_env=False) + runbook.allow_create = False + envs = load_environments(runbook) + self.assertFalse(envs) test_results = generate_cases_result() - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - self.assertListEqual( - [], - [x for x in envs], - ) - + self.assertFalse(envs) self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -166,29 +140,26 @@ def test_merge_req_not_allow_create(self) -> None: ) def test_merge_req_platform_type_checked(self) -> None: - # check if current platform supported, - # for example, some case run on azure only. - # platform check happens in runner, so this case is here - # a simple check is enough. More covered by search_space.SetSpace - env_runbook = generate_env_runbook(is_single_env=False) - envs = load_environments(env_runbook) - self.assertListEqual( - [], - [x for x in envs], - ) - runner = generate_lisarunner(None) + """Ensure the platform check happens. + + For example, some cases run only on Azure. A simple check is + sufficient because more is covered by `search_space.SetSpace`. + """ + envs = load_environments(generate_runbook(is_single_env=False)) + self.assertFalse(envs) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata metadata.requirement = simple_requirement( - supported_platform_type=["notexists"] + supported_platform_type=["does-not-exist"] ) - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - + # TODO: This message should be in a localization module of + # some sort. platform_unsupported = "capability cannot support some of requirement" self.verify_test_results( expected_envs=["", "", ""], @@ -205,15 +176,22 @@ def test_merge_req_platform_type_checked(self) -> None: test_results=test_results, ) - def test_fit_a_predefined_env(self) -> None: - # predefined env can run case in below condition. - # 1. with predefined env of 1 simple node, so ut2 don't need a new env - # 2. ut3 need 8 cores, and predefined env target to meet all core requirement, - # so it can run any case with core requirements. + async def test_fit_a_predefined_env(self) -> None: + """Pre-defined environments can run with the conditions: + + 1. With pre-defined environment of 1 simple node, unit test 2 + doesn't need a new environment. + + 2. Unit test 3 needs 8 cores, but the pre-defined environment + has these and so can run all the tests. + + """ + # TODO: This function call is necessary, which means that it + # sets some unknown global state. We need to fix those side + # effects because this is unintelligible. generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runbook = generate_test_runbook(is_single_env=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2"], expected_deployed_envs=["runbook_0", "req_1"], @@ -223,16 +201,19 @@ def test_fit_a_predefined_env(self) -> None: expected_envs=["req_1", "runbook_0", "runbook_0"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) - def test_fit_a_bigger_env(self) -> None: - # similar with test_fit_a_predefined_env, but predefined 2 nodes, - # it doesn't equal to any case req, but reusable for all cases. + async def test_fit_a_bigger_env(self) -> None: + """Similar to `test_fit_a_predefined_env` but with pre-defined 2 nodes. + + While it doesn't exactly match any requirement, it's re-usable + for every test. + + """ generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -242,16 +223,16 @@ def test_fit_a_bigger_env(self) -> None: expected_envs=["runbook_0", "runbook_0", "runbook_0"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) - def test_case_new_env_run_only_1_needed(self) -> None: - # same predefined env as test_fit_a_bigger_env, - # but all case want to run on a new env + async def test_case_new_env_run_only_1_needed(self) -> None: + """Same as `test_fit_a_bigger_env` but we require a new environment.""" generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook, case_use_new_env=True) - asyncio.run(runner.start()) + runbook = generate_test_runbook( + case_use_new_env=True, is_single_env=True, local=True, remote=True + ) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_3"], @@ -261,16 +242,21 @@ def test_case_new_env_run_only_1_needed(self) -> None: expected_envs=["runbook_0", "req_1", "req_3"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) - def test_no_needed_env(self) -> None: - # two 1 node env predefined, but only runbook_0 go to deploy - # no cases assigned to runbook_1, as fit cases run on runbook_0 already + async def test_no_needed_env(self) -> None: + """No new environments need to be created. + + Two single-node environments are pre-defined, and only + `runbook_0` is deployed. The environment for `runbook_1` is + not deployed because its tests were able to run on the + environment deployed for `runbook_0`. + + """ generate_cases_metadata() - env_runbook = generate_env_runbook(local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runbook = generate_test_runbook(local=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_2"], @@ -280,19 +266,20 @@ def test_no_needed_env(self) -> None: expected_envs=["req_2", "runbook_0", "runbook_0"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) - def test_deploy_no_more_resource(self) -> None: - # platform may see no more resource, like no azure quota. - # cases skipped due to this. - # In future, will add retry on wait more resource. + async def test_deploy_no_more_resource(self) -> None: + """Skip tests if resources quotas were hit. + + TODO: In the future, we may add retry logic to wait on + resources becoming available. + + """ test_platform.wait_more_resource_error = True generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) - + runbook = generate_test_runbook(is_single_env=True, local=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=[], @@ -311,21 +298,22 @@ def test_deploy_no_more_resource(self) -> None: before_suite_failed, before_suite_failed, ], - test_results=runner._latest_test_results, + test_results=results, ) - def test_skipped_on_suite_failure(self) -> None: - # first two cases skipped due to test suite setup failed + async def test_skipped_on_suite_failure(self) -> None: + """First two tests were skipped because the setup is made to fail.""" test_testsuite.fail_on_before_suite = True generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], expected_deleted_envs=["runbook_0"], ) + # TODO: This message should be in a localization module of + # some sort. before_suite_failed = "before_suite: failed" self.verify_test_results( expected_envs=["runbook_0", "runbook_0", "runbook_0"], @@ -335,22 +323,23 @@ def test_skipped_on_suite_failure(self) -> None: TestStatus.PASSED, ], expected_message=[before_suite_failed, before_suite_failed, ""], - test_results=runner._latest_test_results, + test_results=results, ) - def test_env_skipped_no_prepared_env(self) -> None: - # test env not prepared, so test cases cannot find an env to run + async def test_env_skipped_no_prepared_env(self) -> None: + """The platform's environment isn't prepared so the tests cannot run.""" test_platform.return_prepared = False generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=[], expected_deleted_envs=[], ) - no_avaiable_env = "no available environment" + # TODO: This message should be in a localization module of + # some sort. + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -358,24 +347,24 @@ def test_env_skipped_no_prepared_env(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], - test_results=runner._latest_test_results, + expected_message=[no_available_env, no_available_env, no_available_env], + test_results=results, ) - def test_env_skipped_not_ready(self) -> None: - # env prepared, but not deployed to ready. - # so no cases can run + async def test_env_skipped_not_ready(self) -> None: + """The prepared environment is not deployed, so tests are skipped.""" test_platform.deploy_is_ready = False generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_2", "req_3"], expected_deleted_envs=[], ) - no_avaiable_env = "no available environment" + # TODO: This message should be in a localization module of + # some sort. + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -383,17 +372,14 @@ def test_env_skipped_not_ready(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], - test_results=runner._latest_test_results, + expected_message=[no_available_env, no_available_env, no_available_env], + test_results=results, ) - def test_env_skipped_no_case(self) -> None: - # no case found, as not call generate_case_metadata - # in this case, not deploy any env - env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) - # still prepare predefined, but not deploy + async def test_env_skipped_no_case(self) -> None: + """TODO: Investigate why `generate_case_metadata` side effects matter.""" + runbook = generate_test_runbook(is_single_env=True, remote=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0"], expected_deployed_envs=[], @@ -403,7 +389,7 @@ def test_env_skipped_no_case(self) -> None: expected_envs=[], expected_status=[], expected_message=[], - test_results=runner._latest_test_results, + test_results=results, ) def verify_test_results( @@ -437,15 +423,7 @@ def verify_env_results( expected_deployed_envs: List[str], expected_deleted_envs: List[str], ) -> None: - self.assertListEqual( - expected_prepared, - [x for x in prepared_envs], - ) - self.assertListEqual( - expected_deployed_envs, - [x for x in deployed_envs], - ) - self.assertListEqual( - expected_deleted_envs, - [x for x in deleted_envs], - ) + """TODO: Explain why this function works and what it does.""" + self.assertListEqual(expected_prepared, list(prepared_envs)) + self.assertListEqual(expected_deployed_envs, list(deployed_envs)) + self.assertListEqual(expected_deleted_envs, list(deleted_envs)) diff --git a/lisa/tests/test_platform.py b/lisa/tests/test_platform.py index c41eae349b..40ef3236b7 100644 --- a/lisa/tests/test_platform.py +++ b/lisa/tests/test_platform.py @@ -14,6 +14,8 @@ deploy_success = True deploy_is_ready = True wait_more_resource_error = False +# TODO: Um... this breaks things because of an implicit alphabetical +# dependency where if this runs before `RunnerTestCase` they fail. prepared_envs: List[str] = [] deployed_envs: List[str] = [] deleted_envs: List[str] = [] diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index f5e79e5760..2b7776b83d 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -1,7 +1,7 @@ -import asyncio from typing import Any, List -from unittest import TestCase +from unittest import IsolatedAsyncioTestCase, TestCase +import lisa.testsuite from lisa import schema from lisa.environment import load_environments from lisa.operating_system import Linux, Windows @@ -9,14 +9,12 @@ from lisa.tests.test_environment import generate_runbook from lisa.testselector import select_testcases from lisa.testsuite import ( - TestCaseMetadata, - TestCaseRuntimeData, + LisaTestCase, + LisaTestCaseMetadata, + LisaTestMetadata, + LisaTestRuntimeData, TestResult, TestStatus, - TestSuite, - TestSuiteMetadata, - get_cases_metadata, - get_suites_metadata, simple_requirement, ) from lisa.util import LisaException, constants @@ -29,7 +27,7 @@ fail_case_count = 0 -class MockTestSuite(TestSuite): +class MockTestSuite(LisaTestCase): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.fail_on_before_suite = fail_on_before_suite @@ -77,32 +75,32 @@ def mock_ut2(self) -> None: pass -class MockTestSuite2(TestSuite): +class MockTestSuite2(LisaTestCase): def mock_ut3(self) -> None: pass def cleanup_cases_metadata() -> None: - get_cases_metadata().clear() - get_suites_metadata().clear() + lisa.testsuite.lisa_tests_metadata.clear() + lisa.testsuite.lisa_test_cases_metadata.clear() -def generate_cases_metadata() -> List[TestCaseMetadata]: +def generate_cases_metadata() -> List[LisaTestMetadata]: ut_cases = [ - TestCaseMetadata( + LisaTestMetadata( "ut1", 0, requirement=simple_requirement(min_count=2), ), - TestCaseMetadata("ut2", 1), - TestCaseMetadata("ut3", 2), + LisaTestMetadata("ut2", 1), + LisaTestMetadata("ut3", 2), ] - suite_metadata1 = TestSuiteMetadata("a1", "c1", "des1", ["t1", "t2"]) + suite_metadata1 = LisaTestCaseMetadata("a1", "c1", "des1", ["t1", "t2"]) suite_metadata1(MockTestSuite) ut_cases[0](MockTestSuite.mock_ut1) ut_cases[1](MockTestSuite.mock_ut2) - suite_metadata2 = TestSuiteMetadata( + suite_metadata2 = LisaTestCaseMetadata( "a2", "c2", "des2", @@ -118,14 +116,14 @@ def generate_cases_metadata() -> List[TestCaseMetadata]: def generate_cases_result() -> List[TestResult]: case_metadata = generate_cases_metadata() - case_results = [TestResult(TestCaseRuntimeData(x)) for x in case_metadata] + case_results = [TestResult(LisaTestRuntimeData(x)) for x in case_metadata] return case_results def select_and_check( ut: TestCase, case_runbook: List[Any], expected_descriptions: List[str] -) -> List[TestCaseRuntimeData]: +) -> List[LisaTestRuntimeData]: runbook = validate_data({constants.TESTCASE: case_runbook}) case_metadatas = generate_cases_metadata() selected = select_testcases(runbook.testcase, case_metadatas) @@ -134,11 +132,11 @@ def select_and_check( return selected -class TestSuiteTestCase(TestCase): +class TestSuiteTestCase(IsolatedAsyncioTestCase): def generate_suite_instance(self) -> MockTestSuite: case_results = generate_cases_result() case_results = case_results[:2] - suite_metadata = case_results[0].runtime_data.metadata.suite + suite_metadata = case_results[0].runtime_data.metadata.case runbook = generate_runbook(is_single_env=True, local=True, remote=True) envs = load_environments(runbook) self.default_env = list(envs.values())[0] @@ -169,14 +167,14 @@ def test_case_override_suite(self) -> None: case2_found = False for case in cases: assert case.requirement.environment - assert case.suite.requirement.environment + assert case.case.requirement.environment if case.name == "mock_ut1": self.assertEqual(2, len(case.requirement.environment.nodes)) - self.assertEqual(1, len(case.suite.requirement.environment.nodes)) + self.assertEqual(1, len(case.case.requirement.environment.nodes)) case1_found = True if case.name == "mock_ut2": self.assertEqual(1, len(case.requirement.environment.nodes)) - self.assertEqual(1, len(case.suite.requirement.environment.nodes)) + self.assertEqual(1, len(case.case.requirement.environment.nodes)) case2_found = True self.assertEqual(True, case1_found) self.assertEqual(True, case2_found) @@ -196,42 +194,42 @@ def test_test_result_canrun(self) -> None: else: self.assertEqual(False, result.can_run) - def test_skip_before_suite_failed(self) -> None: + async def test_skip_before_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_suite: failed", result.message) - def test_pass_after_suite_failed(self) -> None: + async def test_pass_after_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_before_case_failed(self) -> None: + async def test_skip_before_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_case: failed", result.message) - def test_pass_after_case_failed(self) -> None: + async def test_pass_after_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_case_failed(self) -> None: + async def test_skip_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) - asyncio.run(test_suite.start()) + await test_suite.start() result = test_suite.case_results[0] self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) @@ -239,36 +237,36 @@ def test_skip_case_failed(self) -> None: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_passed(self) -> None: + async def test_retry_passed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_notenough_failed(self) -> None: + async def test_retry_notenough_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_attempt_ignore_failure(self) -> None: + async def test_attempt_ignore_failure(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.ignore_failure = True - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.ATTEMPTED, result.status) self.assertEqual("mock_ut1 failed", result.message) result = test_suite.case_results[1] diff --git a/lisa/testselector.py b/lisa/testselector.py index 1fdfe625c6..383fa3032a 100644 --- a/lisa/testselector.py +++ b/lisa/testselector.py @@ -2,8 +2,9 @@ from functools import partial from typing import Callable, Dict, List, Mapping, Optional, Pattern, Set, Union, cast +import lisa.testsuite from lisa import schema -from lisa.testsuite import TestCaseMetadata, TestCaseRuntimeData, get_cases_metadata +from lisa.testsuite import LisaTestMetadata, LisaTestRuntimeData from lisa.util import LisaException, constants, set_filtered_fields from lisa.util.logger import get_logger @@ -12,20 +13,20 @@ def select_testcases( filters: Optional[List[schema.TestCase]] = None, - init_cases: Optional[List[TestCaseMetadata]] = None, -) -> List[TestCaseRuntimeData]: + init_cases: Optional[List[LisaTestMetadata]] = None, +) -> List[LisaTestRuntimeData]: """ based on filters to select test cases. If filters are None, return all cases. """ log = _get_logger() if init_cases: - full_list: Dict[str, TestCaseMetadata] = dict() + full_list: Dict[str, LisaTestMetadata] = dict() for item in init_cases: full_list[item.full_name] = item else: - full_list = get_cases_metadata() + full_list = lisa.testsuite.lisa_tests_metadata if filters: - selected: Dict[str, TestCaseRuntimeData] = dict() + selected: Dict[str, LisaTestRuntimeData] = dict() force_included: Set[str] = set() force_excluded: Set[str] = set() for filter in filters: @@ -35,7 +36,7 @@ def select_testcases( ) else: log.debug(f"skip disabled rule: {filter}") - results: List[TestCaseRuntimeData] = [] + results: List[LisaTestRuntimeData] = [] for case in selected.values(): times = case.times for index in range(times): @@ -46,14 +47,14 @@ def select_testcases( else: results = [] for metadata in full_list.values(): - results.append(TestCaseRuntimeData(metadata)) + results.append(LisaTestRuntimeData(metadata)) log.info(f"selected cases count: {len(results)}") return results def _match_string( - case: Union[TestCaseRuntimeData, TestCaseMetadata], + case: Union[LisaTestRuntimeData, LisaTestMetadata], pattern: Pattern[str], attr_name: str, ) -> bool: @@ -63,7 +64,7 @@ def _match_string( def _match_priority( - case: Union[TestCaseRuntimeData, TestCaseMetadata], pattern: Union[int, List[int]] + case: Union[LisaTestRuntimeData, LisaTestMetadata], pattern: Union[int, List[int]] ) -> bool: priority = case.priority is_matched: bool = False @@ -75,7 +76,7 @@ def _match_priority( def _match_tags( - case: Union[TestCaseRuntimeData, TestCaseMetadata], + case: Union[LisaTestRuntimeData, LisaTestMetadata], criteria_tags: Union[str, List[str]], ) -> bool: case_tags = case.tags @@ -88,23 +89,23 @@ def _match_tags( def _match_cases( - candidates: Mapping[str, Union[TestCaseRuntimeData, TestCaseMetadata]], - patterns: List[Callable[[Union[TestCaseRuntimeData, TestCaseMetadata]], bool]], -) -> Dict[str, TestCaseRuntimeData]: - changed_cases: Dict[str, TestCaseRuntimeData] = dict() + candidates: Mapping[str, Union[LisaTestRuntimeData, LisaTestMetadata]], + patterns: List[Callable[[Union[LisaTestRuntimeData, LisaTestMetadata]], bool]], +) -> Dict[str, LisaTestRuntimeData]: + changed_cases: Dict[str, LisaTestRuntimeData] = dict() for candidate_name in candidates: candidate = candidates[candidate_name] is_matched = all(pattern(candidate) for pattern in patterns) if is_matched: - if isinstance(candidate, TestCaseMetadata): - candidate = TestCaseRuntimeData(candidate) + if isinstance(candidate, LisaTestMetadata): + candidate = LisaTestRuntimeData(candidate) changed_cases[candidate_name] = candidate return changed_cases def _apply_settings( - applied_case_data: TestCaseRuntimeData, case_runbook: schema.TestCase, action: str + applied_case_data: LisaTestRuntimeData, case_runbook: schema.TestCase, action: str ) -> None: fields = [ constants.TESTCASE_TIMES, @@ -141,16 +142,16 @@ def _force_check( def _apply_filter( # noqa: C901 case_runbook: schema.TestCase, - current_selected: Dict[str, TestCaseRuntimeData], + current_selected: Dict[str, LisaTestRuntimeData], force_included: Set[str], force_excluded: Set[str], - full_list: Dict[str, TestCaseMetadata], -) -> Dict[str, TestCaseRuntimeData]: + full_list: Dict[str, LisaTestMetadata], +) -> Dict[str, LisaTestRuntimeData]: # TODO: Reduce this function's complexity and remove the disabled warning. log = _get_logger() # initialize criterias - patterns: List[Callable[[Union[TestCaseRuntimeData, TestCaseMetadata]], bool]] = [] + patterns: List[Callable[[Union[LisaTestRuntimeData, LisaTestMetadata]], bool]] = [] criterias_runbook = case_runbook.criteria assert criterias_runbook, "test case criteria cannot be None" criterias_runbook_dict = criterias_runbook.__dict__ @@ -181,7 +182,7 @@ def _apply_filter( # noqa: C901 raise LisaException(f"unknown criteria key: {runbook_key}") # match by select Action: - changed_cases: Dict[str, TestCaseRuntimeData] = dict() + changed_cases: Dict[str, LisaTestRuntimeData] = dict() is_force = case_runbook.select_action in [ constants.TESTCASE_SELECT_ACTION_FORCE_INCLUDE, constants.TESTCASE_SELECT_ACTION_FORCE_EXCLUDE, diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 6f550b793b..8097214736 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -1,8 +1,7 @@ from __future__ import annotations import unittest -from abc import ABCMeta -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import Enum from functools import wraps from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union @@ -10,7 +9,6 @@ from retry.api import retry_call # type: ignore from lisa import notifier, schema, search_space -from lisa.action import Action, ActionStatus from lisa.environment import EnvironmentSpace from lisa.feature import Feature from lisa.operating_system import OperatingSystem @@ -26,8 +24,8 @@ "TestStatus", ["NOTRUN", "RUNNING", "FAILED", "PASSED", "SKIPPED", "ATTEMPTED"] ) -_all_suites: Dict[str, TestSuiteMetadata] = dict() -_all_cases: Dict[str, TestCaseMetadata] = dict() +lisa_test_cases_metadata: Dict[str, LisaTestCaseMetadata] = dict() +lisa_tests_metadata: Dict[str, LisaTestMetadata] = dict() class SkipTestCaseException(LisaException): @@ -42,9 +40,11 @@ class TestResultMessage(notifier.MessageBase): env: str = "" +# TODO: We’re shadowing `unittest.TestResult` and may just want to +# reuse it, especially if we’ve been inspired by it so much. @dataclass class TestResult: - runtime_data: TestCaseRuntimeData + runtime_data: LisaTestRuntimeData status: TestStatus = TestStatus.NOTRUN elapsed: float = 0 message: str = "" @@ -152,47 +152,41 @@ def simple_requirement( DEFAULT_REQUIREMENT = simple_requirement() -class TestSuiteMetadata: - def __init__( - self, - area: str, - category: str, - description: str, - tags: List[str], - name: str = "", - requirement: TestCaseRequirement = DEFAULT_REQUIREMENT, - ) -> None: - self.name = name - self.cases: List[TestCaseMetadata] = [] - - self.area = area - self.category = category - if tags: - self.tags = tags - else: - self.tags = [] - self.description = description - self.requirement = requirement - - def __call__(self, test_class: Type[TestSuite]) -> Callable[..., object]: +@dataclass +class LisaTestCaseMetadata: + """This decorator supplies metadata for each case of tests.""" + + area: str + category: str + # TODO: Each description should be from the docstring instead. + description: str + tags: List[str] = field(default_factory=list) + # TODO: The name should be the test case’s class name. + name: str = "" + requirement: TestCaseRequirement = DEFAULT_REQUIREMENT + cases: List[LisaTestMetadata] = field(default_factory=list) + + def __call__(self, test_class: Type[LisaTestCase]) -> Callable[..., object]: self.test_class = test_class if not self.name: self.name = test_class.__name__ - _add_suite_metadata(self) + _add_lisa_test_case_metadata(self) @wraps(self.test_class) def wrapper( - test_class: Type[TestSuite], + test_class: Type[LisaTestCase], environment: Environment, cases: List[TestResult], - metadata: TestSuiteMetadata, - ) -> TestSuite: + metadata: LisaTestCaseMetadata, + ) -> LisaTestCase: return test_class(environment, cases, metadata) return wrapper -class TestCaseMetadata: +class LisaTestMetadata: + """This decorator supplies metadata for each test.""" + def __init__( self, description: str, @@ -200,21 +194,29 @@ def __init__( requirement: Optional[TestCaseRequirement] = None, ) -> None: self.priority = priority + # TODO: Each test description should be from its docstring, + # not here. self.description = description + # TODO: Because this class is pseudo-inherited through + # attribute abuse, this optionally defined attribute causes a + # lot of typing headaches. if requirement: self.requirement = requirement + # TODO: This implies that we should actually subclass (inherit) + # `TestSuiteMetadata`, with some way of instantiating this class + # from an instance of that class. def __getattr__(self, key: str) -> Any: # inherit all attributes of test suite - assert self.suite, "suite is not set before use metadata" - return getattr(self.suite, key) + assert self.case, "suite is not set before use metadata" + return getattr(self.case, key) def __call__(self, func: Callable[..., None]) -> Callable[..., None]: self.name = func.__name__ self.full_name = func.__qualname__ self._func = func - _add_case_metadata(self) + _add_lisa_test_metadata(self) @wraps(self._func) def wrapper(*args: object) -> None: @@ -222,29 +224,34 @@ def wrapper(*args: object) -> None: return wrapper - def set_suite(self, suite: TestSuiteMetadata) -> None: - self.suite: TestSuiteMetadata = suite - - -class TestCaseRuntimeData: - def __init__(self, metadata: TestCaseMetadata): - self.metadata = metadata + # TODO: Currently set by `_add_test_to_case`, want to set + # automatically here. + def set_case(self, case: LisaTestCaseMetadata) -> None: + self.case: LisaTestCaseMetadata = case - # all runtime setting fields - self.select_action: str = "" - self.times: int = 1 - self.retry: int = 0 - self.use_new_environment: bool = False - self.ignore_failure: bool = False - self.environment_name: str = "" +@dataclass +class LisaTestRuntimeData: + """This adds runtime data to tests.""" + + metadata: LisaTestMetadata + select_action: str = "" + times: int = 1 + retry: int = 0 + use_new_environment: bool = False + ignore_failure: bool = False + environment_name: str = "" + + # TODO: This implies that we should actually subclass (inherit) + # `TestCaseMetaData`, with some way of instantiating this class + # from an instance of that class. def __getattr__(self, key: str) -> Any: # inherit all attributes of metadata assert self.metadata return getattr(self.metadata, key) - def clone(self) -> TestCaseRuntimeData: - cloned = TestCaseRuntimeData(self.metadata) + def clone(self) -> LisaTestRuntimeData: + cloned = LisaTestRuntimeData(self.metadata) fields = [ constants.TESTCASE_SELECT_ACTION, constants.TESTCASE_TIMES, @@ -257,12 +264,19 @@ def clone(self) -> TestCaseRuntimeData: return cloned -class TestSuite(unittest.TestCase, Action, metaclass=ABCMeta): +class LisaTestCase(unittest.TestCase): + """This class wraps the unittest module's 'TestCase' class. + + It should be used in the same way, where non-abstract methods + represent unit tests, usually prefixed with `test_`. + + """ + def __init__( self, environment: Environment, case_results: List[TestResult], - metadata: TestSuiteMetadata, + metadata: LisaTestCaseMetadata, ) -> None: super().__init__() self.environment = environment @@ -284,8 +298,11 @@ def before_case(self) -> None: def after_case(self) -> None: pass + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. Perhaps + # we actually just want to reuse `unittest.TestCase.run()`? async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. suite_error_message = "" is_suite_continue = True @@ -370,7 +387,6 @@ async def start(self) -> None: # noqa: C901 if self._should_stop: self.log.info("received stop message, stop run") - self.set_status(ActionStatus.STOPPED) break self.log = suite_log @@ -382,40 +398,29 @@ async def start(self) -> None: # noqa: C901 self.log.error("after_suite failed", exc_info=identifier) self.log.debug(f"after_suite end with {timer}") - async def stop(self) -> None: - self.set_status(ActionStatus.STOPPING) - self._should_stop = True - - async def close(self) -> None: - pass - - -def get_suites_metadata() -> Dict[str, TestSuiteMetadata]: - return _all_suites - -def get_cases_metadata() -> Dict[str, TestCaseMetadata]: - return _all_cases +def _add_lisa_test_case_metadata(metadata: LisaTestCaseMetadata) -> None: + """Add the metadata to the test case and matching tests. - -def _add_suite_metadata(metadata: TestSuiteMetadata) -> None: + Errors if there is a collision. + """ + # TODO: We should only use the class name as the key. if metadata.name: key = metadata.name else: key = metadata.test_class.__name__ - exist_metadata = _all_suites.get(key) - if exist_metadata is None: - _all_suites[key] = metadata + if key not in lisa_test_cases_metadata: + lisa_test_cases_metadata[key] = metadata else: raise LisaException( f"duplicate test class name: {key}, " - f"new: [{metadata}], exists: [{exist_metadata}]" + f"'{metadata}' would replace '{lisa_test_cases_metadata[key]}'" ) class_prefix = f"{key}." - for test_case in _all_cases.values(): - if test_case.full_name.startswith(class_prefix): - _add_case_to_suite(metadata, test_case) + for test in lisa_tests_metadata.values(): + if test.full_name.startswith(class_prefix): + _add_test_to_case(metadata, test) log = get_logger("init", "test") log.info( f"registered test suite '{key}' " @@ -423,28 +428,33 @@ def _add_suite_metadata(metadata: TestSuiteMetadata) -> None: ) -def _add_case_metadata(metadata: TestCaseMetadata) -> None: +def _add_lisa_test_metadata(metadata: LisaTestMetadata) -> None: + """Add the metadata to the test itself. + + Errors if there is a collision. Also adds the test to the test + case. - full_name = metadata.full_name - if _all_cases.get(full_name) is None: - _all_cases[full_name] = metadata + """ + + if metadata.full_name not in lisa_tests_metadata: + lisa_tests_metadata[metadata.full_name] = metadata else: - raise LisaException(f"duplicate test class name: {full_name}") + raise LisaException(f"duplicate test class name: {metadata.full_name}") # this should be None in current observation. # the methods are loadded prior to test class # in case logic is changed, so keep this logic # to make two collection consistent. - class_name = full_name.split(".")[0] - test_suite = _all_suites.get(class_name) - if test_suite: + prefix = metadata.full_name.split(".")[0] + if prefix in lisa_test_cases_metadata: log = get_logger("init", "test") - log.debug(f"add case '{metadata.name}' to suite '{test_suite.name}'") - _add_case_to_suite(test_suite, metadata) + log.debug( + f"add case '{metadata.name}' to " + f"suite '{lisa_test_cases_metadata[prefix].name}'" + ) + _add_test_to_case(lisa_test_cases_metadata[prefix], metadata) -def _add_case_to_suite( - test_suite: TestSuiteMetadata, test_case: TestCaseMetadata -) -> None: - test_case.suite = test_suite - test_suite.cases.append(test_case) +def _add_test_to_case(case: LisaTestCaseMetadata, test: LisaTestMetadata) -> None: + test.case = case + case.cases.append(test) diff --git a/lisa/util/__init__.py b/lisa/util/__init__.py index b67d1c7212..861b477043 100644 --- a/lisa/util/__init__.py +++ b/lisa/util/__init__.py @@ -67,6 +67,7 @@ def get_public_key_data(private_key_file_path: str) -> str: return public_key_data +# TODO: Why do we need this? def fields_to_dict(src: Any, fields: Iterable[str]) -> Dict[str, Any]: """ copy field values form src to dest, if it's not None @@ -82,6 +83,7 @@ def fields_to_dict(src: Any, fields: Iterable[str]) -> Dict[str, Any]: return result +# TODO: Why do we need this? def set_filtered_fields(src: Any, dest: Any, fields: List[str]) -> None: """ copy field values form src to dest, if it's not None diff --git a/lisa/util/process.py b/lisa/util/process.py index 862b370d2d..ca535bb886 100644 --- a/lisa/util/process.py +++ b/lisa/util/process.py @@ -1,10 +1,12 @@ import logging import pathlib import shlex +import subprocess import time from dataclasses import dataclass from typing import Dict, Optional +import spur # type: ignore from spur.errors import NoSuchCommandError # type: ignore from lisa.util.logger import Logger, LogWriter, get_logger @@ -23,6 +25,7 @@ def __str__(self) -> str: return self.stdout +# TODO: So much cleanup here. It was using duck typing. class Process: def __init__( self, @@ -36,6 +39,8 @@ def __init__( self._is_linux = shell.is_linux self._running: bool = False self._log = get_logger("cmd", id_, parent=parent_logger) + self._process: Optional[spur.local.LocalProcess] = None + self._result: Optional[ExecutableResult] = None def start( self, @@ -107,7 +112,7 @@ def start( except (FileNotFoundError, NoSuchCommandError) as identifier: # FileNotFoundError: not found command on Windows # NoSuchCommandError: not found command on remote Linux - self._process = ExecutableResult( + self._result = ExecutableResult( "", identifier.strerror, 1, self._timer.elapsed() ) self._log.debug(f"not found command: {identifier}") @@ -123,30 +128,40 @@ def wait_result(self, timeout: float = 600) -> ExecutableResult: self._log.warning(f"timeout in {timeout} sec, and killed") self.kill() - if not isinstance(self._process, ExecutableResult): + if self._result is None: + # if not isinstance(self._process, ExecutableResult): assert self._process proces_result = self._process.wait_for_result() self.stdout_writer.close() self.stderr_writer.close() - result: ExecutableResult = ExecutableResult( + # cache for future queries, in case it's queried twice. + self._result = ExecutableResult( proces_result.output.strip(), proces_result.stderr_output.strip(), proces_result.return_code, self._timer.elapsed(), ) - # cache for future queries, in case it's queried twice. - self._process = result - else: - result = self._process + # TODO: The spur library is not very good and leaves open + # resources (probably due to it starting the process with + # `bufsize=0`). We need to replace it, but for now, we + # manually close the leaks. + popen: subprocess.Popen[str] = self._process._subprocess + if popen.stdin: + popen.stdin.close() + if popen.stdout: + popen.stdout.close() + if popen.stderr: + popen.stderr.close() + self._process = None self._log.debug(f"waited with {self._timer}") - return result + return self._result def kill(self) -> None: - if self._process and not isinstance(self._process, ExecutableResult): + if self._process: self._process.send_signal(9) def is_running(self) -> bool: - if self._running: + if self._running and self._process: self._running = self._process.is_running() return self._running diff --git a/lisa/util/shell.py b/lisa/util/shell.py index b51f7ab6a7..f695e8da6d 100644 --- a/lisa/util/shell.py +++ b/lisa/util/shell.py @@ -177,7 +177,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = True, - ) -> Any: + ) -> spur.ssh.SshProcess: self.initialize() assert self._inner_shell return self._inner_shell.spawn( @@ -307,7 +307,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = False, - ) -> Any: + ) -> spur.local.LocalProcess: return self._inner_shell.spawn( command=command, update_env=update_env, diff --git a/testsuites/basic/provisioning.py b/testsuites/basic/provisioning.py index 7e18ddef3b..5ac99864a5 100644 --- a/testsuites/basic/provisioning.py +++ b/testsuites/basic/provisioning.py @@ -1,11 +1,11 @@ -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.features import StartStop from lisa.testsuite import simple_requirement from lisa.tools import Dmesg from lisa.util.perf_timer import create_timer -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="provisioning", category="functional", description=""" @@ -13,8 +13,8 @@ """, tags=[], ) -class Provisioning(TestSuite): - @TestCaseMetadata( +class Provisioning(LisaTestCase): + @LisaTestMetadata( description=""" this test uses to restart a node, and compare dmesg output. the case fails on any panic in kernel