diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 41bd025527..353e2c6af3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -19,9 +19,14 @@ short summary of the most important parts: such as `id_`. * Always use `self` for the first argument to instance methods. * Always use `cls` for the first argument to class methods. -* Use one leading underscore only for non-public methods and instance variables, - such as `_data`. +* One leading underscore like `_data` is for non-public methods and instance + variables. And it can be used by sub-classes. If it won't be used in + sub-classes, use like `__data`. +* If there is a pair of `get_x` and `set_x` methods, they should instead be a + proper property, which is easy to do with the built-in `@property` decorator. * Constants should be `CAPITALIZED_SNAKE_CASE`. +* When importing a function, try to avoid renaming it with `import as` because + it introduces cognitive overhead to track yet another name. When in doubt, adhere to existing conventions, or check the style guide. @@ -252,6 +257,18 @@ Python world. If you make it through even some of these guides, you will be well on your way to being a “Pythonista” (a Python developer) writing “Pythonic” (canonically correct Python) code left and right. +### Async IO + +With Python 3.4, the Async IO pattern found in languages such as C# and Go is +available through the keywords `async` and `await`, along with the Python module +`asyncio`. Please read [Async IO in Python: A Complete +Walkthrough](https://realpython.com/async-io-python/) to understand at a high +level how asynchronous programming works. As of Python 3.7, One major “gotcha” +is that `asyncio.run(...)` should be used [exactly once in +`main`](https://docs.python.org/3/library/asyncio-task.html), it starts the +event loop. Everything else should be a coroutine or task which the event loop +schedules. + ## Future Sections Just a collection of reminders for the author to expand on later. diff --git a/Makefile b/Makefile index 11f5b1368b..d5dbe13404 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # This Makefile simply automates all our tasks. Its use is optional. -all: setup run check +all: setup run test check # Install Python packages setup: @@ -8,11 +8,11 @@ setup: # Run LISAv3 run: - @poetry run python lisa/main.py --debug + @poetry run python -X dev lisa/main.py --debug # Run unit tests test: - @poetry run python -m unittest discover lisa + @poetry run python -X dev -m unittest discover -v lisa # Generate coverage report (slow, reruns LISAv3 and tests) coverage: diff --git a/lisa/action.py b/lisa/action.py index e6a5b68d75..105b9f8901 100644 --- a/lisa/action.py +++ b/lisa/action.py @@ -47,7 +47,7 @@ def __init__(self) -> None: @abstractmethod async def start(self) -> None: self.__is_started = True - self.set_status(ActionStatus.RUNNING) + self.status = ActionStatus.RUNNING @abstractmethod async def stop(self) -> None: @@ -57,25 +57,28 @@ async def stop(self) -> None: async def close(self) -> None: self.validate_started() - def get_status(self) -> ActionStatus: + @property + def status(self) -> ActionStatus: + """The Action's current state, for example, 'UNINITIALIZED'.""" return self.__status - def set_status(self, status: ActionStatus) -> None: - if self.__status != status: + @status.setter + def status(self, value: ActionStatus) -> None: + if self.__status != value: self.log.debug( f"{self.name} status changed from {self.__status.name} " - f"to {status.name} with {self.__timer}" + f"to {value.name} with {self.__timer}" ) self.__total += self.__timer.elapsed() message = ActionMessage( elapsed=self.__timer.elapsed(), sub_type=self.name, - status=status, + status=value, total_elapsed=self.__total, ) notifier.notify(message=message) self.__timer = create_timer() - self.__status = status + self.__status = value def validate_started(self) -> None: if not self.__is_started: diff --git a/lisa/commands.py b/lisa/commands.py index dd4a2dd4c2..acb09452e4 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -1,11 +1,10 @@ -import asyncio import functools from argparse import Namespace from typing import Iterable, Optional, cast from lisa import notifier -from lisa.lisarunner import LisaRunner -from lisa.parameter_parser.runbook import load as load_runbook +from lisa.parameter_parser.runbook import load_runbook +from lisa.runner import Runner from lisa.testselector import select_testcases from lisa.testsuite import TestCaseRuntimeData from lisa.util import LisaException, constants @@ -14,15 +13,14 @@ _get_init_logger = functools.partial(get_logger, "init") -def run(args: Namespace) -> int: - runbook = load_runbook(args) +async def run(args: Namespace) -> int: + runbook = load_runbook(args.runbook, args.variables) if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) try: - runner = LisaRunner(runbook) - awaitable = runner.start() - asyncio.run(awaitable) + runner = Runner(runbook) + await runner.start() finally: notifier.finalize() @@ -30,13 +28,13 @@ def run(args: Namespace) -> int: # check runbook -def check(args: Namespace) -> int: - load_runbook(args) +async def check(args: Namespace) -> int: + load_runbook(args.runbook, args.variables) return 0 -def list_start(args: Namespace) -> int: - runbook = load_runbook(args) +async def list_start(args: Namespace) -> int: + runbook = load_runbook(args.runbook, args.variables) list_all = cast(Optional[bool], args.list_all) log = _get_init_logger("list") if args.type == constants.LIST_CASE: diff --git a/lisa/main.py b/lisa/main.py index 3088615688..5402da681e 100644 --- a/lisa/main.py +++ b/lisa/main.py @@ -1,3 +1,4 @@ +import asyncio import sys import traceback from datetime import datetime @@ -23,7 +24,7 @@ def create_run_path(root_path: Path) -> Path: return run_path -def main() -> int: +async def main() -> int: total_timer = create_timer() log = get_logger() exit_code: int = 0 @@ -57,7 +58,7 @@ def main() -> int: log.debug(f"command line args: {sys.argv}") log.info(f"run local path: {runtime_root}") - exit_code = args.func(args) + exit_code = await args.func(args) assert isinstance(exit_code, int), f"actual: {type(exit_code)}" finally: log.info(f"completed in {total_timer}") @@ -68,7 +69,7 @@ def main() -> int: if __name__ == "__main__": exit_code = 0 try: - exit_code = main() + exit_code = asyncio.run(main()) except Exception as exception: exit_code = -1 log = get_logger() diff --git a/lisa/parameter_parser/argparser.py b/lisa/parameter_parser/argparser.py index 5f389e1c65..5478fc23a4 100644 --- a/lisa/parameter_parser/argparser.py +++ b/lisa/parameter_parser/argparser.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser, Namespace +from pathlib import Path from lisa import commands from lisa.util import constants @@ -8,10 +9,10 @@ def support_runbook(parser: ArgumentParser, required: bool = True) -> None: parser.add_argument( "--runbook", "-r", + type=Path, required=required, - dest="runbook", - help="runbook of this run", - default="examples/runbook/hello_world.yml", + help="Path to the runbook", + default=Path("examples/runbook/hello_world.yml").absolute(), ) @@ -21,7 +22,7 @@ def support_debug(parser: ArgumentParser) -> None: "-d", dest="debug", action="store_true", - help="set log level to debug", + help="Set log level to debug", ) @@ -31,38 +32,39 @@ def support_variable(parser: ArgumentParser) -> None: "-v", dest="variables", action="append", - help="define variable from command line. format is NAME:VALUE", + help="Define one or more variables with 'NAME:VALUE'", ) def parse_args() -> Namespace: - # parse args run function. + """This wraps Python's 'ArgumentParser' to setup our CLI.""" parser = ArgumentParser() support_debug(parser) support_runbook(parser, required=False) support_variable(parser) + # Default to ‘run’ when no subcommand is given. + parser.set_defaults(func=commands.run) + subparsers = parser.add_subparsers(dest="cmd", required=False) + + # Entry point for ‘run’. run_parser = subparsers.add_parser("run") run_parser.set_defaults(func=commands.run) - support_runbook(run_parser) - support_variable(run_parser) + # Entry point for ‘list-start’. list_parser = subparsers.add_parser(constants.LIST) list_parser.set_defaults(func=commands.list_start) list_parser.add_argument("--type", "-t", dest="type", choices=["case"]) list_parser.add_argument("--all", "-a", dest="list_all", action="store_true") - support_runbook(list_parser) - support_variable(list_parser) + # Entry point for ‘check’. check_parser = subparsers.add_parser("check") check_parser.set_defaults(func=commands.check) - support_runbook(check_parser) - support_variable(check_parser) - - parser.set_defaults(func=commands.run) for sub_parser in subparsers.choices.values(): + support_runbook(sub_parser) + support_variable(sub_parser) support_debug(sub_parser) return parser.parse_args() diff --git a/lisa/parameter_parser/runbook.py b/lisa/parameter_parser/runbook.py index fc9f2cc016..8c357f0b73 100644 --- a/lisa/parameter_parser/runbook.py +++ b/lisa/parameter_parser/runbook.py @@ -1,7 +1,6 @@ -from argparse import Namespace from functools import partial from pathlib import Path, PurePath -from typing import Any, Dict, Optional, cast +from typing import Any, Dict, List, Optional, cast import yaml from marshmallow import Schema @@ -58,13 +57,13 @@ def validate_data(data: Any) -> schema.Runbook: return runbook -def load(args: Namespace) -> schema.Runbook: +def load_runbook(path: Path, user_variables: Optional[List[str]]) -> schema.Runbook: + """Loads a runbook given a user-supplied path and set of variables.""" # make sure extension in lisa is loaded base_module_path = Path(__file__).parent.parent import_module(base_module_path, logDetails=False) # merge all parameters - path = Path(args.runbook).absolute() data = _load_data(path) constants.RUNBOOK_PATH = path.parent @@ -73,14 +72,14 @@ def load(args: Namespace) -> schema.Runbook: extends_runbook = schema.Extension.schema().load( # type:ignore data[constants.EXTENSION] ) - _load_extends(path.parent, extends_runbook) + _load_extends(constants.RUNBOOK_PATH, extends_runbook) # load arg variables variables: Dict[str, Any] = dict() + # TODO: This is all side-effect driven and needs to be fixed. load_from_runbook(data, variables) load_from_env(variables) - if hasattr(args, "variables"): - load_from_pairs(args.variables, variables) + load_from_pairs(user_variables, variables) # replace variables: data = replace_variables(data, variables) diff --git a/lisa/lisarunner.py b/lisa/runner.py similarity index 93% rename from lisa/lisarunner.py rename to lisa/runner.py index 378983853b..a2c06a7ff5 100644 --- a/lisa/lisarunner.py +++ b/lisa/runner.py @@ -7,7 +7,6 @@ from lisa.testselector import select_testcases from lisa.testsuite import ( TestCaseRequirement, - TestCaseRuntimeData, TestResult, TestStatus, TestSuite, @@ -16,7 +15,7 @@ from lisa.util.logger import get_logger -class LisaRunner(Action): +class Runner(Action): def __init__(self, runbook: schema.Runbook) -> None: super().__init__() self.exit_code: int = 0 @@ -24,16 +23,20 @@ def __init__(self, runbook: schema.Runbook) -> None: self._runbook = runbook self._log = get_logger("runner") + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. await super().start() - self.set_status(ActionStatus.RUNNING) + self.status = ActionStatus.RUNNING # select test cases selected_test_cases = select_testcases(self._runbook.testcase) # create test results - selected_test_results = self._create_test_results(selected_test_cases) + selected_test_results = [ + TestResult(runtime_data=case) for case in selected_test_cases + ] # load predefined environments candidate_environments = load_environments(self._runbook.environment) @@ -169,12 +172,13 @@ async def start(self) -> None: # noqa: C901 continue self._log.info(f" {key.name:<9}: {count}") - self.set_status(ActionStatus.SUCCESS) + self.status = ActionStatus.SUCCESS # pass failed count to exit code self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) # for UT testability + self._latest_platform = platform self._latest_test_results = selected_test_results async def stop(self) -> None: @@ -198,14 +202,6 @@ async def _run_suite( result.environment = environment await test_suite.start() - def _create_test_results( - self, cases: List[TestCaseRuntimeData] - ) -> List[TestResult]: - test_results: List[TestResult] = [] - for x in cases: - test_results.append(TestResult(runtime_data=x)) - return test_results - def _merge_test_requirements( self, test_results: List[TestResult], diff --git a/lisa/tests/test_platform.py b/lisa/tests/test_platform.py index f931e04f62..18116e9db8 100644 --- a/lisa/tests/test_platform.py +++ b/lisa/tests/test_platform.py @@ -1,6 +1,9 @@ -from typing import List, Type +from dataclasses import dataclass, field +from typing import Any, List, Type from unittest.case import TestCase +from dataclasses_json import LetterCase, dataclass_json # type: ignore + from lisa import schema from lisa.environment import Environment, Environments, load_environments from lisa.feature import Feature @@ -9,28 +12,28 @@ from lisa.util import LisaException, constants from lisa.util.logger import Logger -# for other UT to set value -return_prepared = True -deploy_success = True -deploy_is_ready = True -wait_more_resource_error = False -prepared_envs: List[str] = [] -deployed_envs: List[str] = [] -deleted_envs: List[str] = [] + +@dataclass +class MockPlatformTestData: + prepared_envs: List[str] = field(default_factory=list) + deployed_envs: List[str] = field(default_factory=list) + deleted_envs: List[str] = field(default_factory=list) + + +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass +class MockPlatformSchema: + # for other UT to set value + return_prepared: bool = True + deploy_success: bool = True + deploy_is_ready: bool = True + wait_more_resource_error: bool = False class MockPlatform(Platform): def __init__(self, runbook: schema.Platform) -> None: super().__init__(runbook=runbook) - prepared_envs.clear() - deployed_envs.clear() - deleted_envs.clear() - self.set_test_config( - return_prepared=return_prepared, - deploy_success=deploy_success, - deploy_is_ready=deploy_is_ready, - wait_more_resource_error=wait_more_resource_error, - ) + self.test_data = MockPlatformTestData() @classmethod def type_name(cls) -> str: @@ -47,39 +50,45 @@ def set_test_config( deploy_is_ready: bool = True, wait_more_resource_error: bool = False, ) -> None: - self.return_prepared = return_prepared - self.deploy_success = deploy_success - self.deploy_is_ready = deploy_is_ready - self.wait_more_resource_error = wait_more_resource_error + self.initialize() + self._mock_runbook.return_prepared = return_prepared + self._mock_runbook.deploy_success = deploy_success + self._mock_runbook.deploy_is_ready = deploy_is_ready + self._mock_runbook.wait_more_resource_error = wait_more_resource_error + + def _initialize(self, *args: Any, **kwargs: Any) -> None: + self._mock_runbook: MockPlatformSchema = self._runbook.get_extended_runbook( + MockPlatformSchema, constants.PLATFORM_MOCK + ) def _prepare_environment(self, environment: Environment, log: Logger) -> bool: - prepared_envs.append(environment.name) + self.test_data.prepared_envs.append(environment.name) requirements = environment.runbook.nodes_requirement - if self.return_prepared and requirements: + if self._mock_runbook.return_prepared and requirements: min_capabilities: List[schema.NodeSpace] = [] for node_space in requirements: min_capabilities.append(node_space.generate_min_capability(node_space)) environment.runbook.nodes_requirement = min_capabilities - return self.return_prepared + return self._mock_runbook.return_prepared def _deploy_environment(self, environment: Environment, log: Logger) -> None: - if self.wait_more_resource_error: + if self._mock_runbook.wait_more_resource_error: raise WaitMoreResourceError("wait more resource") - if not self.deploy_success: + if not self._mock_runbook.deploy_success: raise LisaException("mock deploy failed") - if self.return_prepared and environment.runbook.nodes_requirement: + if self._mock_runbook.return_prepared and environment.runbook.nodes_requirement: requirements = environment.runbook.nodes_requirement for node_space in requirements: environment.nodes.from_requirement(node_requirement=node_space) for node in environment.nodes.list(): # prevent real calls node._node_information_hooks.clear() - deployed_envs.append(environment.name) + self.test_data.deployed_envs.append(environment.name) environment._is_initialized = True - environment.is_ready = self.deploy_is_ready + environment.is_ready = self._mock_runbook.deploy_is_ready def _delete_environment(self, environment: Environment, log: Logger) -> None: - deleted_envs.append(environment.name) + self.test_data.deleted_envs.append(environment.name) self.delete_called = True diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_runner.py similarity index 79% rename from lisa/tests/test_lisarunner.py rename to lisa/tests/test_runner.py index 8182138454..6d43e1a995 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_runner.py @@ -1,13 +1,11 @@ -import asyncio -from typing import List, Optional -from unittest.case import TestCase +from typing import List, Optional, cast +from unittest import IsolatedAsyncioTestCase from lisa import schema from lisa.environment import load_environments -from lisa.lisarunner import LisaRunner +from lisa.runner import Runner from lisa.tests import test_platform, test_testsuite from lisa.tests.test_environment import generate_runbook as generate_env_runbook -from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs from lisa.tests.test_testsuite import ( cleanup_cases_metadata, generate_cases_metadata, @@ -17,13 +15,20 @@ from lisa.util import constants -def generate_lisarunner( - env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False -) -> LisaRunner: +def generate_runner( + env_runbook: Optional[schema.EnvironmentRoot] = None, + case_use_new_env: bool = False, + platform_schema: Optional[test_platform.MockPlatformSchema] = None, +) -> Runner: + platform_runbook = schema.Platform( + type=constants.PLATFORM_MOCK, admin_password="do-not-use" + ) + if platform_schema: + platform_runbook.extended_schemas = { + constants.PLATFORM_MOCK: platform_schema.to_dict() # type:ignore + } runbook = schema.Runbook( - platform=[ - schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") - ], + platform=[platform_runbook], testcase=[ schema.TestCase( criteria=schema.Criteria(priority=[0, 1, 2]), @@ -33,18 +38,14 @@ def generate_lisarunner( ) if env_runbook: runbook.environment = env_runbook - runner = LisaRunner(runbook) + runner = Runner(runbook) return runner -class LisaRunnerTestCase(TestCase): +class RunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: - cleanup_cases_metadata() - test_platform.return_prepared = True - test_platform.deploy_is_ready = True - test_platform.deploy_success = True - test_platform.wait_more_resource_error = False + cleanup_cases_metadata() # Necessary side effects! def test_merge_req_create_on_new(self) -> None: # if no predefined envs, can generate from requirement @@ -54,17 +55,17 @@ def test_merge_req_create_on_new(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # 3 cases create 3 envs + # 3 cases create 3 environments. self.assertListEqual( ["generated_0", "generated_1", "generated_2"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -80,21 +81,21 @@ def test_merge_req_run_not_create_on_equal(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( ["customized_0"], - [x for x in envs], + list(envs), ) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - - # 3 cases created only two req, as simple req meets on customized_0 + # 3 cases created only two required environments, as the + # simple requirement was met by runbook_0. self.assertListEqual( ["customized_0", "generated_1", "generated_2"], - [x for x in envs], + list(envs), ) self.assertListEqual( [TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN], @@ -109,9 +110,9 @@ def test_merge_req_create_on_use_new(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( ["customized_0"], - [x for x in envs], + list(envs), ) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) test_results = generate_cases_result() for test_result in test_results: test_result.runtime_data.use_new_environment = True @@ -120,10 +121,10 @@ def test_merge_req_create_on_use_new(self) -> None: existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # every case need a new environment, so created 3 + # All 3 cases needed a new environment, so it created 3. self.assertListEqual( ["customized_0", "generated_1", "generated_2", "generated_3"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -140,9 +141,9 @@ def test_merge_req_not_allow_create(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( [], - [x for x in envs], + list(envs), ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -151,7 +152,7 @@ def test_merge_req_not_allow_create(self) -> None: ) self.assertListEqual( [], - [x for x in envs], + list(envs), ) self.verify_test_results( @@ -174,21 +175,20 @@ def test_merge_req_platform_type_checked(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( [], - [x for x in envs], + list(envs), ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata metadata.requirement = simple_requirement( - supported_platform_type=["notexists"] + supported_platform_type=["does-not-exist"] ) runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - platform_unsupported = "capability cannot support some of requirement" self.verify_test_results( expected_envs=["", "", ""], @@ -205,19 +205,20 @@ def test_merge_req_platform_type_checked(self) -> None: test_results=test_results, ) - def test_fit_a_predefined_env(self) -> None: + async def test_fit_a_predefined_env(self) -> None: # predefined env can run case in below condition. # 1. with predefined env of 1 simple node, so ut2 don't need a new env # 2. ut3 need 8 cores, and predefined env target to meet all core requirement, # so it can run any case with core requirements. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook) + await runner.start() self.verify_env_results( expected_prepared=["customized_0", "generated_1", "generated_2"], expected_deployed_envs=["customized_0", "generated_1"], expected_deleted_envs=["customized_0", "generated_1"], + runner=runner, ) self.verify_test_results( expected_envs=["generated_1", "customized_0", "customized_0"], @@ -226,13 +227,14 @@ def test_fit_a_predefined_env(self) -> None: test_results=runner._latest_test_results, ) - def test_fit_a_bigger_env(self) -> None: + async def test_fit_a_bigger_env(self) -> None: # similar with test_fit_a_predefined_env, but predefined 2 nodes, # it doesn't equal to any case req, but reusable for all cases. + generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -242,6 +244,7 @@ def test_fit_a_bigger_env(self) -> None: ], expected_deployed_envs=["customized_0"], expected_deleted_envs=["customized_0"], + runner=runner, ) self.verify_test_results( expected_envs=["customized_0", "customized_0", "customized_0"], @@ -250,13 +253,13 @@ def test_fit_a_bigger_env(self) -> None: test_results=runner._latest_test_results, ) - def test_case_new_env_run_only_1_needed(self) -> None: + async def test_case_new_env_run_only_1_needed(self) -> None: # same predefined env as test_fit_a_bigger_env, # but all case want to run on a new env generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook, case_use_new_env=True) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook, case_use_new_env=True) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -266,6 +269,7 @@ def test_case_new_env_run_only_1_needed(self) -> None: ], expected_deployed_envs=["customized_0", "generated_1", "generated_3"], expected_deleted_envs=["customized_0", "generated_1", "generated_3"], + runner=runner, ) self.verify_test_results( expected_envs=["customized_0", "generated_1", "generated_3"], @@ -274,13 +278,14 @@ def test_case_new_env_run_only_1_needed(self) -> None: test_results=runner._latest_test_results, ) - def test_no_needed_env(self) -> None: + async def test_no_needed_env(self) -> None: # two 1 node env predefined, but only customized_0 go to deploy # no cases assigned to customized_1, as fit cases run on customized_0 already + generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -290,6 +295,7 @@ def test_no_needed_env(self) -> None: ], expected_deployed_envs=["customized_0", "generated_2"], expected_deleted_envs=["customized_0", "generated_2"], + runner=runner, ) self.verify_test_results( expected_envs=["generated_2", "customized_0", "customized_0"], @@ -298,15 +304,16 @@ def test_no_needed_env(self) -> None: test_results=runner._latest_test_results, ) - def test_deploy_no_more_resource(self) -> None: + async def test_deploy_no_more_resource(self) -> None: # platform may see no more resource, like no azure quota. # cases skipped due to this. # In future, will add retry on wait more resource. - test_platform.wait_more_resource_error = True + platform_schema = test_platform.MockPlatformSchema() + platform_schema.wait_more_resource_error = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook, platform_schema=platform_schema) + await runner.start() self.verify_env_results( expected_prepared=[ @@ -317,6 +324,7 @@ def test_deploy_no_more_resource(self) -> None: ], expected_deployed_envs=[], expected_deleted_envs=[], + runner=runner, ) before_suite_failed = "no available environment" self.verify_test_results( @@ -334,13 +342,13 @@ def test_deploy_no_more_resource(self) -> None: test_results=runner._latest_test_results, ) - def test_skipped_on_suite_failure(self) -> None: - # first two cases skipped due to test suite setup failed + async def test_skipped_on_suite_failure(self) -> None: + # First two tests were skipped because the setup is made to fail. test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -350,7 +358,9 @@ def test_skipped_on_suite_failure(self) -> None: ], expected_deployed_envs=["customized_0"], expected_deleted_envs=["customized_0"], + runner=runner, ) + before_suite_failed = "before_suite: failed" self.verify_test_results( expected_envs=["customized_0", "customized_0", "customized_0"], @@ -363,13 +373,14 @@ def test_skipped_on_suite_failure(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_no_prepared_env(self) -> None: + async def test_env_skipped_no_prepared_env(self) -> None: # test env not prepared, so test cases cannot find an env to run - test_platform.return_prepared = False + platform_schema = test_platform.MockPlatformSchema() + platform_schema.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook, platform_schema=platform_schema) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -379,8 +390,10 @@ def test_env_skipped_no_prepared_env(self) -> None: ], expected_deployed_envs=[], expected_deleted_envs=[], + runner=runner, ) - no_avaiable_env = "no available environment" + + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -388,18 +401,19 @@ def test_env_skipped_no_prepared_env(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], + expected_message=[no_available_env, no_available_env, no_available_env], test_results=runner._latest_test_results, ) - def test_env_skipped_not_ready(self) -> None: + async def test_env_skipped_not_ready(self) -> None: # env prepared, but not deployed to ready. # so no cases can run - test_platform.deploy_is_ready = False + platform_schema = test_platform.MockPlatformSchema() + platform_schema.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook, platform_schema=platform_schema) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -414,8 +428,9 @@ def test_env_skipped_not_ready(self) -> None: "generated_3", ], expected_deleted_envs=[], + runner=runner, ) - no_avaiable_env = "no available environment" + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -423,21 +438,22 @@ def test_env_skipped_not_ready(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], + expected_message=[no_available_env, no_available_env, no_available_env], test_results=runner._latest_test_results, ) - def test_env_skipped_no_case(self) -> None: + async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + runner = generate_runner(env_runbook) + await runner.start() # still prepare predefined, but not deploy self.verify_env_results( expected_prepared=["customized_0"], expected_deployed_envs=[], expected_deleted_envs=[], + runner=runner, ) self.verify_test_results( expected_envs=[], @@ -453,6 +469,7 @@ def verify_test_results( expected_message: List[str], test_results: List[TestResult], ) -> None: + self.assertListEqual( expected_envs, [ @@ -479,16 +496,20 @@ def verify_env_results( expected_prepared: List[str], expected_deployed_envs: List[str], expected_deleted_envs: List[str], + runner: Runner, ) -> None: + platform = cast(test_platform.MockPlatform, runner._latest_platform) + platform_test_data = platform.test_data + self.assertListEqual( expected_prepared, - [x for x in prepared_envs], + list(platform_test_data.prepared_envs), ) self.assertListEqual( expected_deployed_envs, - [x for x in deployed_envs], + list(platform_test_data.deployed_envs), ) self.assertListEqual( expected_deleted_envs, - [x for x in deleted_envs], + list(platform_test_data.deleted_envs), ) diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index f5e79e5760..183d33816f 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -1,6 +1,5 @@ -import asyncio from typing import Any, List -from unittest import TestCase +from unittest import IsolatedAsyncioTestCase, TestCase from lisa import schema from lisa.environment import load_environments @@ -134,7 +133,7 @@ def select_and_check( return selected -class TestSuiteTestCase(TestCase): +class TestSuiteTestCase(IsolatedAsyncioTestCase): def generate_suite_instance(self) -> MockTestSuite: case_results = generate_cases_result() case_results = case_results[:2] @@ -196,42 +195,42 @@ def test_test_result_canrun(self) -> None: else: self.assertEqual(False, result.can_run) - def test_skip_before_suite_failed(self) -> None: + async def test_skip_before_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_suite: failed", result.message) - def test_pass_after_suite_failed(self) -> None: + async def test_pass_after_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_before_case_failed(self) -> None: + async def test_skip_before_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_case: failed", result.message) - def test_pass_after_case_failed(self) -> None: + async def test_pass_after_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_case_failed(self) -> None: + async def test_skip_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) - asyncio.run(test_suite.start()) + await test_suite.start() result = test_suite.case_results[0] self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) @@ -239,36 +238,36 @@ def test_skip_case_failed(self) -> None: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_passed(self) -> None: + async def test_retry_passed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_notenough_failed(self) -> None: + async def test_retry_notenough_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_attempt_ignore_failure(self) -> None: + async def test_attempt_ignore_failure(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.ignore_failure = True - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.ATTEMPTED, result.status) self.assertEqual("mock_ut1 failed", result.message) result = test_suite.case_results[1] diff --git a/lisa/testsuite.py b/lisa/testsuite.py index c06e708138..bbed2fcae4 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -1,6 +1,5 @@ from __future__ import annotations -from abc import ABCMeta from dataclasses import dataclass, field from enum import Enum from functools import wraps @@ -214,13 +213,15 @@ def __init__( priority: int = 2, requirement: Optional[TestCaseRequirement] = None, ) -> None: + self.suite: TestSuiteMetadata + self.priority = priority self.description = description if requirement: self.requirement = requirement def __getattr__(self, key: str) -> Any: - # inherit all attributes of test suite + # return attributes of test suite, if it's not redefined in case level assert self.suite, "suite is not set before use metadata" return getattr(self.suite, key) @@ -237,9 +238,6 @@ def wrapper(*args: object) -> None: return wrapper - def set_suite(self, suite: TestSuiteMetadata) -> None: - self.suite: TestSuiteMetadata = suite - class TestCaseRuntimeData: def __init__(self, metadata: TestCaseMetadata): @@ -254,7 +252,7 @@ def __init__(self, metadata: TestCaseMetadata): self.environment_name: str = "" def __getattr__(self, key: str) -> Any: - # inherit all attributes of metadata + # return attributes of metadata for convenient assert self.metadata return getattr(self.metadata, key) @@ -272,7 +270,7 @@ def clone(self) -> TestCaseRuntimeData: return cloned -class TestSuite(Action, metaclass=ABCMeta): +class TestSuite(Action): def __init__( self, environment: Environment, @@ -299,8 +297,10 @@ def before_case(self) -> None: def after_case(self) -> None: pass + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. suite_error_message = "" is_suite_continue = True @@ -387,7 +387,7 @@ async def start(self) -> None: # noqa: C901 if self._should_stop: self.log.info("received stop message, stop run") - self.set_status(ActionStatus.STOPPED) + self.status = ActionStatus.STOPPED break self.log = suite_log @@ -400,7 +400,7 @@ async def start(self) -> None: # noqa: C901 self.log.debug(f"after_suite end with {timer}") async def stop(self) -> None: - self.set_status(ActionStatus.STOPPING) + self.status = ActionStatus.STOPPING self._should_stop = True async def close(self) -> None: diff --git a/lisa/util/process.py b/lisa/util/process.py index 862b370d2d..798c1c7913 100644 --- a/lisa/util/process.py +++ b/lisa/util/process.py @@ -1,10 +1,12 @@ import logging import pathlib import shlex +import subprocess import time from dataclasses import dataclass from typing import Dict, Optional +import spur # type: ignore from spur.errors import NoSuchCommandError # type: ignore from lisa.util.logger import Logger, LogWriter, get_logger @@ -23,6 +25,7 @@ def __str__(self) -> str: return self.stdout +# TODO: So much cleanup here. It was using duck typing. class Process: def __init__( self, @@ -36,6 +39,8 @@ def __init__( self._is_linux = shell.is_linux self._running: bool = False self._log = get_logger("cmd", id_, parent=parent_logger) + self._process: Optional[spur.local.LocalProcess] = None + self._result: Optional[ExecutableResult] = None def start( self, @@ -107,7 +112,7 @@ def start( except (FileNotFoundError, NoSuchCommandError) as identifier: # FileNotFoundError: not found command on Windows # NoSuchCommandError: not found command on remote Linux - self._process = ExecutableResult( + self._result = ExecutableResult( "", identifier.strerror, 1, self._timer.elapsed() ) self._log.debug(f"not found command: {identifier}") @@ -123,30 +128,48 @@ def wait_result(self, timeout: float = 600) -> ExecutableResult: self._log.warning(f"timeout in {timeout} sec, and killed") self.kill() - if not isinstance(self._process, ExecutableResult): + if self._result is None: + # if not isinstance(self._process, ExecutableResult): assert self._process proces_result = self._process.wait_for_result() self.stdout_writer.close() self.stderr_writer.close() - result: ExecutableResult = ExecutableResult( + # cache for future queries, in case it's queried twice. + self._result = ExecutableResult( proces_result.output.strip(), proces_result.stderr_output.strip(), proces_result.return_code, self._timer.elapsed(), ) - # cache for future queries, in case it's queried twice. - self._process = result - else: - result = self._process + # TODO: The spur library is not very good and leaves open + # resources (probably due to it starting the process with + # `bufsize=0`). We need to replace it, but for now, we + # manually close the leaks. + if isinstance(self._process, spur.local.LocalProcess): + popen: subprocess.Popen[str] = self._process._subprocess + if popen.stdin: + popen.stdin.close() + if popen.stdout: + popen.stdout.close() + if popen.stderr: + popen.stderr.close() + elif isinstance(self._process, spur.ssh.SshProcess): + if self._process._stdin: + self._process._stdin.close() + if self._process._stdout: + self._process._stdout.close() + if self._process._stderr: + self._process._stderr.close() + self._process = None self._log.debug(f"waited with {self._timer}") - return result + return self._result def kill(self) -> None: - if self._process and not isinstance(self._process, ExecutableResult): + if self._process: self._process.send_signal(9) def is_running(self) -> bool: - if self._running: + if self._running and self._process: self._running = self._process.is_running() return self._running diff --git a/lisa/util/shell.py b/lisa/util/shell.py index b51f7ab6a7..f695e8da6d 100644 --- a/lisa/util/shell.py +++ b/lisa/util/shell.py @@ -177,7 +177,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = True, - ) -> Any: + ) -> spur.ssh.SshProcess: self.initialize() assert self._inner_shell return self._inner_shell.spawn( @@ -307,7 +307,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = False, - ) -> Any: + ) -> spur.local.LocalProcess: return self._inner_shell.spawn( command=command, update_env=update_env,