From 305f4e539e9181ca67cf8f0eccdfa010ce10f1d5 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 30 Sep 2020 18:21:25 -0700 Subject: [PATCH 01/27] Run Python in developer mode The `-X dev` option reveals useful information such as leaked system resources. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 11f5b1368b..f6a4a69924 100644 --- a/Makefile +++ b/Makefile @@ -8,11 +8,11 @@ setup: # Run LISAv3 run: - @poetry run python lisa/main.py --debug + @poetry run python -X dev lisa/main.py --debug # Run unit tests test: - @poetry run python -m unittest discover lisa + @poetry run python -X dev -m unittest discover lisa # Generate coverage report (slow, reruns LISAv3 and tests) coverage: From 79c875b35fff5b0d6ec60ff6f9b2cc17525c9afc Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:30:01 -0700 Subject: [PATCH 02/27] Add `test` to `make all` Forgot this. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f6a4a69924..d5dbe13404 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # This Makefile simply automates all our tasks. Its use is optional. -all: setup run check +all: setup run test check # Install Python packages setup: @@ -12,7 +12,7 @@ run: # Run unit tests test: - @poetry run python -X dev -m unittest discover lisa + @poetry run python -X dev -m unittest discover -v lisa # Generate coverage report (slow, reruns LISAv3 and tests) coverage: From 10ad3b3eef6c4991658c8d9a4cd2491cd28bc216 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 30 Sep 2020 18:22:45 -0700 Subject: [PATCH 03/27] Use asyncio correctly At minimum this converts the usage to the documented approach where this is exactly one asyncio event loop (started in main) and everything that currently is written async actually becomes a coroutine. The unit tests which use coroutines now use `IsolatedAsyncioTestCase` instead of just `TestCase`, which ensures they continue to work. --- lisa/commands.py | 10 ++++----- lisa/main.py | 8 ++++--- lisa/tests/test_lisarunner.py | 41 +++++++++++++++++------------------ lisa/tests/test_testsuite.py | 37 +++++++++++++++---------------- 4 files changed, 47 insertions(+), 49 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index dd4a2dd4c2..ad33301162 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -1,4 +1,3 @@ -import asyncio import functools from argparse import Namespace from typing import Iterable, Optional, cast @@ -14,15 +13,14 @@ _get_init_logger = functools.partial(get_logger, "init") -def run(args: Namespace) -> int: +async def run(args: Namespace) -> int: runbook = load_runbook(args) if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) try: runner = LisaRunner(runbook) - awaitable = runner.start() - asyncio.run(awaitable) + await runner.start() finally: notifier.finalize() @@ -30,12 +28,12 @@ def run(args: Namespace) -> int: # check runbook -def check(args: Namespace) -> int: +async def check(args: Namespace) -> int: load_runbook(args) return 0 -def list_start(args: Namespace) -> int: +async def list_start(args: Namespace) -> int: runbook = load_runbook(args) list_all = cast(Optional[bool], args.list_all) log = _get_init_logger("list") diff --git a/lisa/main.py b/lisa/main.py index ef00e0d75f..76b980277b 100644 --- a/lisa/main.py +++ b/lisa/main.py @@ -1,3 +1,4 @@ +import asyncio import sys import traceback from datetime import datetime @@ -23,7 +24,7 @@ def create_run_path(root_path: Path) -> Path: return run_path -def main() -> int: +async def main() -> int: total_timer = create_timer() log = get_logger() exit_code: int = 0 @@ -57,7 +58,7 @@ def main() -> int: log.info(f"command line args: {sys.argv}") log.info(f"run local path: {runtime_root}") - exit_code = args.func(args) + exit_code = await args.func(args) assert isinstance(exit_code, int), f"actual: {type(exit_code)}" finally: log.info(f"completed in {total_timer}") @@ -68,7 +69,8 @@ def main() -> int: if __name__ == "__main__": exit_code = 0 try: - exit_code = main() + # TODO: Turn off debugging when we ship this. + exit_code = asyncio.run(main(), debug=True) except Exception as exception: exit_code = -1 log = get_logger() diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_lisarunner.py index 78b3c6e9d8..3e40e870ff 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_lisarunner.py @@ -1,6 +1,5 @@ -import asyncio from typing import List, Optional -from unittest.case import TestCase +from unittest import IsolatedAsyncioTestCase from lisa import schema from lisa.environment import load_environments @@ -38,7 +37,7 @@ def generate_lisarunner( return runner -class LisaRunnerTestCase(TestCase): +class LisaRunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: cleanup_cases_metadata() test_platform.return_prepared = True @@ -205,7 +204,7 @@ def test_merge_req_platform_type_checked(self) -> None: test_results=test_results, ) - def test_fit_a_predefined_env(self) -> None: + async def test_fit_a_predefined_env(self) -> None: # predefined env can run case in below condition. # 1. with predefined env of 1 simple node, so ut2 don't need a new env # 2. ut3 need 8 cores, and predefined env target to meet all core requirement, @@ -213,7 +212,7 @@ def test_fit_a_predefined_env(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2"], expected_deployed_envs=["runbook_0", "req_1"], @@ -226,13 +225,13 @@ def test_fit_a_predefined_env(self) -> None: test_results=runner._latest_test_results, ) - def test_fit_a_bigger_env(self) -> None: + async def test_fit_a_bigger_env(self) -> None: # similar with test_fit_a_predefined_env, but predefined 2 nodes, # it doesn't equal to any case req, but reusable for all cases. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -245,13 +244,13 @@ def test_fit_a_bigger_env(self) -> None: test_results=runner._latest_test_results, ) - def test_case_new_env_run_only_1_needed(self) -> None: + async def test_case_new_env_run_only_1_needed(self) -> None: # same predefined env as test_fit_a_bigger_env, # but all case want to run on a new env generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook, case_use_new_env=True) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_3"], @@ -264,13 +263,13 @@ def test_case_new_env_run_only_1_needed(self) -> None: test_results=runner._latest_test_results, ) - def test_no_needed_env(self) -> None: + async def test_no_needed_env(self) -> None: # two 1 node env predefined, but only runbook_0 go to deploy # no cases assigned to runbook_1, as fit cases run on runbook_0 already generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_2"], @@ -283,7 +282,7 @@ def test_no_needed_env(self) -> None: test_results=runner._latest_test_results, ) - def test_deploy_no_more_resource(self) -> None: + async def test_deploy_no_more_resource(self) -> None: # platform may see no more resource, like no azure quota. # cases skipped due to this. # In future, will add retry on wait more resource. @@ -291,7 +290,7 @@ def test_deploy_no_more_resource(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -314,13 +313,13 @@ def test_deploy_no_more_resource(self) -> None: test_results=runner._latest_test_results, ) - def test_skipped_on_suite_failure(self) -> None: + async def test_skipped_on_suite_failure(self) -> None: # first two cases skipped due to test suite setup failed test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -338,13 +337,13 @@ def test_skipped_on_suite_failure(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_no_prepared_env(self) -> None: + async def test_env_skipped_no_prepared_env(self) -> None: # test env not prepared, so test cases cannot find an env to run test_platform.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=[], @@ -362,14 +361,14 @@ def test_env_skipped_no_prepared_env(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_not_ready(self) -> None: + async def test_env_skipped_not_ready(self) -> None: # env prepared, but not deployed to ready. # so no cases can run test_platform.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_2", "req_3"], @@ -387,12 +386,12 @@ def test_env_skipped_not_ready(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_no_case(self) -> None: + async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() # still prepare predefined, but not deploy self.verify_env_results( expected_prepared=["runbook_0"], diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index f5e79e5760..183d33816f 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -1,6 +1,5 @@ -import asyncio from typing import Any, List -from unittest import TestCase +from unittest import IsolatedAsyncioTestCase, TestCase from lisa import schema from lisa.environment import load_environments @@ -134,7 +133,7 @@ def select_and_check( return selected -class TestSuiteTestCase(TestCase): +class TestSuiteTestCase(IsolatedAsyncioTestCase): def generate_suite_instance(self) -> MockTestSuite: case_results = generate_cases_result() case_results = case_results[:2] @@ -196,42 +195,42 @@ def test_test_result_canrun(self) -> None: else: self.assertEqual(False, result.can_run) - def test_skip_before_suite_failed(self) -> None: + async def test_skip_before_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_suite: failed", result.message) - def test_pass_after_suite_failed(self) -> None: + async def test_pass_after_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_before_case_failed(self) -> None: + async def test_skip_before_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_case: failed", result.message) - def test_pass_after_case_failed(self) -> None: + async def test_pass_after_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_case_failed(self) -> None: + async def test_skip_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) - asyncio.run(test_suite.start()) + await test_suite.start() result = test_suite.case_results[0] self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) @@ -239,36 +238,36 @@ def test_skip_case_failed(self) -> None: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_passed(self) -> None: + async def test_retry_passed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_notenough_failed(self) -> None: + async def test_retry_notenough_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_attempt_ignore_failure(self) -> None: + async def test_attempt_ignore_failure(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.ignore_failure = True - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.ATTEMPTED, result.status) self.assertEqual("mock_ut1 failed", result.message) result = test_suite.case_results[1] From 1e4c141a8bc232fb79dd7f9e5cf28119cb5cda27 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:09:27 -0700 Subject: [PATCH 04/27] Add info on Async IO to contributing guidelines --- CONTRIBUTING.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 41bd025527..55e28147c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -252,6 +252,18 @@ Python world. If you make it through even some of these guides, you will be well on your way to being a “Pythonista” (a Python developer) writing “Pythonic” (canonically correct Python) code left and right. +### Async IO + +With Python 3.4, the Async IO pattern found in languages such as C# and Go is +available through the keywords `async` and `await`, along with the Python module +`asyncio`. Please read [Async IO in Python: A Complete +Walkthrough](https://realpython.com/async-io-python/) to understand at a high +level how asynchronous programming works. As of Python 3.7, One major “gotcha” +is that `asyncio.run(...)` should be used [exactly once in +`main`](https://docs.python.org/3/library/asyncio-task.html), it starts the +event loop. Everything else should be a coroutine or task which the event loop +schedules. + ## Future Sections Just a collection of reminders for the author to expand on later. From 111d17dc9379f2fd5abc1f3a446b368e5e91eb18 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:40:05 -0700 Subject: [PATCH 05/27] Clean up `parse_args` --- lisa/parameter_parser/argparser.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lisa/parameter_parser/argparser.py b/lisa/parameter_parser/argparser.py index 5f389e1c65..7a79cd4358 100644 --- a/lisa/parameter_parser/argparser.py +++ b/lisa/parameter_parser/argparser.py @@ -36,33 +36,34 @@ def support_variable(parser: ArgumentParser) -> None: def parse_args() -> Namespace: - # parse args run function. + """This wraps Python's 'ArgumentParser' to setup our CLI.""" parser = ArgumentParser() support_debug(parser) support_runbook(parser, required=False) support_variable(parser) + # Default to ‘run’ when no subcommand is given. + parser.set_defaults(func=commands.run) + subparsers = parser.add_subparsers(dest="cmd", required=False) + + # Entry point for ‘run’. run_parser = subparsers.add_parser("run") run_parser.set_defaults(func=commands.run) - support_runbook(run_parser) - support_variable(run_parser) + # Entry point for ‘list-start’. list_parser = subparsers.add_parser(constants.LIST) list_parser.set_defaults(func=commands.list_start) list_parser.add_argument("--type", "-t", dest="type", choices=["case"]) list_parser.add_argument("--all", "-a", dest="list_all", action="store_true") - support_runbook(list_parser) - support_variable(list_parser) + # Entry point for ‘check’. check_parser = subparsers.add_parser("check") check_parser.set_defaults(func=commands.check) - support_runbook(check_parser) - support_variable(check_parser) - - parser.set_defaults(func=commands.run) for sub_parser in subparsers.choices.values(): + support_runbook(sub_parser) + support_variable(sub_parser) support_debug(sub_parser) return parser.parse_args() From de55e9a99b4eba79af21a534844581d81da70c45 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:48:37 -0700 Subject: [PATCH 06/27] Clean up `load_runbook` Instead of aliasing this function, we should just rename it entirely. Instead of delegating the parsing of an `argparse.Namespace` object, we just setup the `runbook` argument to be a `Path` instead of a string, and have `load_runbook` accept what it needs instead of digging it out of the `Namespace`. --- CONTRIBUTING.md | 2 ++ lisa/commands.py | 8 ++++---- lisa/parameter_parser/argparser.py | 11 ++++++----- lisa/parameter_parser/runbook.py | 13 ++++++------- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 55e28147c7..07e54cc8a9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,8 @@ short summary of the most important parts: * Use one leading underscore only for non-public methods and instance variables, such as `_data`. * Constants should be `CAPITALIZED_SNAKE_CASE`. +* When importing a function, try to avoid renaming it with `import as` because + it introduces cognitive overhead to track yet another name. When in doubt, adhere to existing conventions, or check the style guide. diff --git a/lisa/commands.py b/lisa/commands.py index ad33301162..c718716623 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -4,7 +4,7 @@ from lisa import notifier from lisa.lisarunner import LisaRunner -from lisa.parameter_parser.runbook import load as load_runbook +from lisa.parameter_parser.runbook import load_runbook from lisa.testselector import select_testcases from lisa.testsuite import TestCaseRuntimeData from lisa.util import LisaException, constants @@ -14,7 +14,7 @@ async def run(args: Namespace) -> int: - runbook = load_runbook(args) + runbook = load_runbook(args.runbook, args.variables) if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) @@ -29,12 +29,12 @@ async def run(args: Namespace) -> int: # check runbook async def check(args: Namespace) -> int: - load_runbook(args) + load_runbook(args.runbook, args.variables) return 0 async def list_start(args: Namespace) -> int: - runbook = load_runbook(args) + runbook = load_runbook(args.runbook, args.variables) list_all = cast(Optional[bool], args.list_all) log = _get_init_logger("list") if args.type == constants.LIST_CASE: diff --git a/lisa/parameter_parser/argparser.py b/lisa/parameter_parser/argparser.py index 7a79cd4358..5478fc23a4 100644 --- a/lisa/parameter_parser/argparser.py +++ b/lisa/parameter_parser/argparser.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser, Namespace +from pathlib import Path from lisa import commands from lisa.util import constants @@ -8,10 +9,10 @@ def support_runbook(parser: ArgumentParser, required: bool = True) -> None: parser.add_argument( "--runbook", "-r", + type=Path, required=required, - dest="runbook", - help="runbook of this run", - default="examples/runbook/hello_world.yml", + help="Path to the runbook", + default=Path("examples/runbook/hello_world.yml").absolute(), ) @@ -21,7 +22,7 @@ def support_debug(parser: ArgumentParser) -> None: "-d", dest="debug", action="store_true", - help="set log level to debug", + help="Set log level to debug", ) @@ -31,7 +32,7 @@ def support_variable(parser: ArgumentParser) -> None: "-v", dest="variables", action="append", - help="define variable from command line. format is NAME:VALUE", + help="Define one or more variables with 'NAME:VALUE'", ) diff --git a/lisa/parameter_parser/runbook.py b/lisa/parameter_parser/runbook.py index e47feb04f3..cb20a8e270 100644 --- a/lisa/parameter_parser/runbook.py +++ b/lisa/parameter_parser/runbook.py @@ -1,7 +1,6 @@ -from argparse import Namespace from functools import partial from pathlib import Path, PurePath -from typing import Any, Dict, Optional, cast +from typing import Any, Dict, List, Optional, cast import yaml from marshmallow import Schema @@ -58,13 +57,13 @@ def validate_data(data: Any) -> schema.Runbook: return runbook -def load(args: Namespace) -> schema.Runbook: +def load_runbook(path: Path, user_variables: Optional[List[str]]) -> schema.Runbook: + """Loads a runbook given a user-supplied path and set of variables.""" # make sure extension in lisa is loaded base_module_path = Path(__file__).parent.parent import_module(base_module_path, logDetails=False) # merge all parameters - path = Path(args.runbook).absolute() data = _load_data(path) constants.RUNBOOK_PATH = path.parent @@ -73,14 +72,14 @@ def load(args: Namespace) -> schema.Runbook: extends_runbook = schema.Extension.schema().load( # type:ignore data[constants.EXTENSION] ) - _load_extends(path.parent, extends_runbook) + _load_extends(constants.RUNBOOK_PATH, extends_runbook) # load arg variables variables: Dict[str, Any] = dict() + # TODO: This is all side-effect driven and needs to be fixed. load_from_runbook(data, variables) load_from_env(variables) - if hasattr(args, "variables"): - load_from_pairs(args.variables, variables) + load_from_pairs(user_variables, variables) # replace variables: data = replace_variables(data, variables) From 3474153c0b1fae56966c57b556874b2e1f40a207 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 24 Sep 2020 15:55:44 -0700 Subject: [PATCH 07/27] Turn `status` into a property of `Action` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And use single underscore to avoid invoking name mangling. If the name is reused in a subclass, we likely do not intend to have a duplicate variable, but are intending to use it. For variables we expect to be used, we should use Python’s built-in `property` class/decorator. --- CONTRIBUTING.md | 4 +++- lisa/action.py | 41 ++++++++++++++++++++++------------------- lisa/lisarunner.py | 4 ++-- lisa/testsuite.py | 4 ++-- 4 files changed, 29 insertions(+), 24 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 07e54cc8a9..79562b4b2c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,7 +20,9 @@ short summary of the most important parts: * Always use `self` for the first argument to instance methods. * Always use `cls` for the first argument to class methods. * Use one leading underscore only for non-public methods and instance variables, - such as `_data`. + such as `_data`. Do not activate name mangling with `__` unless necessary. +* If there is a pair of `get_x` and `set_x` methods, they should instead be a + proper property, which is easy to do with the built-in `@property` decorator. * Constants should be `CAPITALIZED_SNAKE_CASE`. * When importing a function, try to avoid renaming it with `import as` because it introduces cognitive overhead to track yet another name. diff --git a/lisa/action.py b/lisa/action.py index e6a5b68d75..285e4eb424 100644 --- a/lisa/action.py +++ b/lisa/action.py @@ -39,15 +39,15 @@ def __init__(self) -> None: self.name: str = self.__class__.__name__ self.log = get_logger("Action") - self.__status = ActionStatus.UNINITIALIZED - self.__is_started = False - self.__timer = create_timer() - self.__total: float = 0 + self._status = ActionStatus.UNINITIALIZED + self._is_started = False + self._timer = create_timer() + self._total: float = 0 @abstractmethod async def start(self) -> None: - self.__is_started = True - self.set_status(ActionStatus.RUNNING) + self._is_started = True + self.status = ActionStatus.RUNNING @abstractmethod async def stop(self) -> None: @@ -57,26 +57,29 @@ async def stop(self) -> None: async def close(self) -> None: self.validate_started() - def get_status(self) -> ActionStatus: - return self.__status + @property + def status(self) -> ActionStatus: + """The Action's current state, for example, 'UNINITIALIZED'.""" + return self._status - def set_status(self, status: ActionStatus) -> None: - if self.__status != status: + @status.setter + def status(self, value: ActionStatus) -> None: + if self._status != value: self.log.debug( - f"{self.name} status changed from {self.__status.name} " - f"to {status.name} with {self.__timer}" + f"{self.name} status changed from {self._status.name} " + f"to {value.name} with {self._timer}" ) - self.__total += self.__timer.elapsed() + self._total += self._timer.elapsed() message = ActionMessage( - elapsed=self.__timer.elapsed(), + elapsed=self._timer.elapsed(), sub_type=self.name, - status=status, - total_elapsed=self.__total, + status=value, + total_elapsed=self._total, ) notifier.notify(message=message) - self.__timer = create_timer() - self.__status = status + self._timer = create_timer() + self._status = value def validate_started(self) -> None: - if not self.__is_started: + if not self._is_started: raise LisaException(f"action[{self.name}] is not started yet.") diff --git a/lisa/lisarunner.py b/lisa/lisarunner.py index da4ea2e26d..9b1d89eadd 100644 --- a/lisa/lisarunner.py +++ b/lisa/lisarunner.py @@ -27,7 +27,7 @@ def __init__(self, runbook: schema.Runbook) -> None: async def start(self) -> None: # noqa: C901 # TODO: Reduce this function's complexity and remove the disabled warning. await super().start() - self.set_status(ActionStatus.RUNNING) + self.status = ActionStatus.RUNNING # select test cases selected_test_cases = select_testcases(self._runbook.testcase) @@ -161,7 +161,7 @@ async def start(self) -> None: # noqa: C901 for key in TestStatus: self._log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}") - self.set_status(ActionStatus.SUCCESS) + self.status = ActionStatus.SUCCESS # pass failed count to exit code self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 6f550b793b..bfc67e0019 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -370,7 +370,7 @@ async def start(self) -> None: # noqa: C901 if self._should_stop: self.log.info("received stop message, stop run") - self.set_status(ActionStatus.STOPPED) + self.status = ActionStatus.STOPPED break self.log = suite_log @@ -383,7 +383,7 @@ async def start(self) -> None: # noqa: C901 self.log.debug(f"after_suite end with {timer}") async def stop(self) -> None: - self.set_status(ActionStatus.STOPPING) + self.status = ActionStatus.STOPPING self._should_stop = True async def close(self) -> None: From 3fd10c885eba32fdbaeea20e06352943c5fa9d61 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 16:56:48 -0700 Subject: [PATCH 08/27] Make `LisaRunner` not subclass `Action` It was not being used for anything useful. --- lisa/lisarunner.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/lisa/lisarunner.py b/lisa/lisarunner.py index 9b1d89eadd..42e62c0ae5 100644 --- a/lisa/lisarunner.py +++ b/lisa/lisarunner.py @@ -1,7 +1,6 @@ from typing import Dict, List, Optional from lisa import schema, search_space -from lisa.action import Action, ActionStatus from lisa.environment import Environment, Environments, load_environments from lisa.platform_ import WaitMoreResourceError, load_platform from lisa.testselector import select_testcases @@ -16,18 +15,14 @@ from lisa.util.logger import get_logger -class LisaRunner(Action): +class LisaRunner: def __init__(self, runbook: schema.Runbook) -> None: - super().__init__() self.exit_code: int = 0 - self._runbook = runbook self._log = get_logger("runner") async def start(self) -> None: # noqa: C901 # TODO: Reduce this function's complexity and remove the disabled warning. - await super().start() - self.status = ActionStatus.RUNNING # select test cases selected_test_cases = select_testcases(self._runbook.testcase) @@ -161,20 +156,12 @@ async def start(self) -> None: # noqa: C901 for key in TestStatus: self._log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}") - self.status = ActionStatus.SUCCESS - # pass failed count to exit code self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) # for UT testability self._latest_test_results = selected_test_results - async def stop(self) -> None: - super().stop() - - async def close(self) -> None: - super().close() - async def _run_suite( self, environment: Environment, cases: List[TestResult] ) -> None: From 53bb89ff850a896166c5bd2831469c8d8d02facf Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 17:01:54 -0700 Subject: [PATCH 09/27] Delete unnecessary `Action` class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was an abstract class that provided no value, just extra abstraction. Originally it was going to be used by multiple runner implementations, and the notifier, and for artifacts. However, we only have one runner which no longer uses it, the notifier never used it, and artifacts aren’t implemented so it’s too early to say if it’s useful there. Hence we should delete this to reduce complexity. --- lisa/action.py | 85 ----------------------------------------------- lisa/testsuite.py | 11 +----- 2 files changed, 1 insertion(+), 95 deletions(-) delete mode 100644 lisa/action.py diff --git a/lisa/action.py b/lisa/action.py deleted file mode 100644 index 285e4eb424..0000000000 --- a/lisa/action.py +++ /dev/null @@ -1,85 +0,0 @@ -from __future__ import annotations - -from abc import ABCMeta, abstractmethod -from dataclasses import dataclass -from enum import Enum - -from lisa import notifier -from lisa.util import LisaException -from lisa.util.logger import get_logger -from lisa.util.perf_timer import create_timer - -ActionStatus = Enum( - "ActionStatus", - [ - "UNINITIALIZED", - "INITIALIZING", - "INITIALIZED", - "WAITING", - "RUNNING", - "SUCCESS", - "FAILED", - "STOPPING", - "STOPPED", - "UNKNOWN", - ], -) - - -@dataclass -class ActionMessage(notifier.MessageBase): - type: str = "Action" - sub_type: str = "" - status: ActionStatus = ActionStatus.UNKNOWN - total_elapsed: float = 0 - - -class Action(metaclass=ABCMeta): - def __init__(self) -> None: - self.name: str = self.__class__.__name__ - self.log = get_logger("Action") - - self._status = ActionStatus.UNINITIALIZED - self._is_started = False - self._timer = create_timer() - self._total: float = 0 - - @abstractmethod - async def start(self) -> None: - self._is_started = True - self.status = ActionStatus.RUNNING - - @abstractmethod - async def stop(self) -> None: - self.validate_started() - - @abstractmethod - async def close(self) -> None: - self.validate_started() - - @property - def status(self) -> ActionStatus: - """The Action's current state, for example, 'UNINITIALIZED'.""" - return self._status - - @status.setter - def status(self, value: ActionStatus) -> None: - if self._status != value: - self.log.debug( - f"{self.name} status changed from {self._status.name} " - f"to {value.name} with {self._timer}" - ) - self._total += self._timer.elapsed() - message = ActionMessage( - elapsed=self._timer.elapsed(), - sub_type=self.name, - status=value, - total_elapsed=self._total, - ) - notifier.notify(message=message) - self._timer = create_timer() - self._status = value - - def validate_started(self) -> None: - if not self._is_started: - raise LisaException(f"action[{self.name}] is not started yet.") diff --git a/lisa/testsuite.py b/lisa/testsuite.py index bfc67e0019..7db8c1b93e 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -10,7 +10,6 @@ from retry.api import retry_call # type: ignore from lisa import notifier, schema, search_space -from lisa.action import Action, ActionStatus from lisa.environment import EnvironmentSpace from lisa.feature import Feature from lisa.operating_system import OperatingSystem @@ -257,7 +256,7 @@ def clone(self) -> TestCaseRuntimeData: return cloned -class TestSuite(unittest.TestCase, Action, metaclass=ABCMeta): +class TestSuite(unittest.TestCase, metaclass=ABCMeta): def __init__( self, environment: Environment, @@ -370,7 +369,6 @@ async def start(self) -> None: # noqa: C901 if self._should_stop: self.log.info("received stop message, stop run") - self.status = ActionStatus.STOPPED break self.log = suite_log @@ -382,13 +380,6 @@ async def start(self) -> None: # noqa: C901 self.log.error("after_suite failed", exc_info=identifier) self.log.debug(f"after_suite end with {timer}") - async def stop(self) -> None: - self.status = ActionStatus.STOPPING - self._should_stop = True - - async def close(self) -> None: - pass - def get_suites_metadata() -> Dict[str, TestSuiteMetadata]: return _all_suites From af9addfd55a8a5df41f37ac1d176a5bf97cc53e9 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 17:06:35 -0700 Subject: [PATCH 10/27] Rename `LisaRunner` to `Runner` since we only have one --- lisa/commands.py | 4 ++-- lisa/lisarunner.py | 2 +- lisa/tests/test_lisarunner.py | 38 +++++++++++++++++------------------ 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index c718716623..bf929ae492 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -3,7 +3,7 @@ from typing import Iterable, Optional, cast from lisa import notifier -from lisa.lisarunner import LisaRunner +from lisa.lisarunner import Runner from lisa.parameter_parser.runbook import load_runbook from lisa.testselector import select_testcases from lisa.testsuite import TestCaseRuntimeData @@ -19,7 +19,7 @@ async def run(args: Namespace) -> int: if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) try: - runner = LisaRunner(runbook) + runner = Runner(runbook) await runner.start() finally: notifier.finalize() diff --git a/lisa/lisarunner.py b/lisa/lisarunner.py index 42e62c0ae5..97e23a8b28 100644 --- a/lisa/lisarunner.py +++ b/lisa/lisarunner.py @@ -15,7 +15,7 @@ from lisa.util.logger import get_logger -class LisaRunner: +class Runner: def __init__(self, runbook: schema.Runbook) -> None: self.exit_code: int = 0 self._runbook = runbook diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_lisarunner.py index 3e40e870ff..6c318fbd73 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_lisarunner.py @@ -3,7 +3,7 @@ from lisa import schema from lisa.environment import load_environments -from lisa.lisarunner import LisaRunner +from lisa.lisarunner import Runner from lisa.tests import test_platform, test_testsuite from lisa.tests.test_environment import generate_runbook as generate_env_runbook from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs @@ -16,9 +16,9 @@ from lisa.util import constants -def generate_lisarunner( +def generate_runner( env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False -) -> LisaRunner: +) -> Runner: runbook = schema.Runbook( platform=[ schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") @@ -32,12 +32,12 @@ def generate_lisarunner( ) if env_runbook: runbook.environment = env_runbook - runner = LisaRunner(runbook) + runner = Runner(runbook) return runner -class LisaRunnerTestCase(IsolatedAsyncioTestCase): +class RunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: cleanup_cases_metadata() test_platform.return_prepared = True @@ -53,7 +53,7 @@ def test_merge_req_create_on_new(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -82,7 +82,7 @@ def test_merge_req_run_not_create_on_equal(self) -> None: [x for x in envs], ) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -110,7 +110,7 @@ def test_merge_req_create_on_use_new(self) -> None: ["runbook_0"], [x for x in envs], ) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) test_results = generate_cases_result() for test_result in test_results: test_result.runtime_data.use_new_environment = True @@ -141,7 +141,7 @@ def test_merge_req_not_allow_create(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -175,7 +175,7 @@ def test_merge_req_platform_type_checked(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata @@ -211,7 +211,7 @@ async def test_fit_a_predefined_env(self) -> None: # so it can run any case with core requirements. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2"], @@ -230,7 +230,7 @@ async def test_fit_a_bigger_env(self) -> None: # it doesn't equal to any case req, but reusable for all cases. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -249,7 +249,7 @@ async def test_case_new_env_run_only_1_needed(self) -> None: # but all case want to run on a new env generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook, case_use_new_env=True) + runner = generate_runner(env_runbook, case_use_new_env=True) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -268,7 +268,7 @@ async def test_no_needed_env(self) -> None: # no cases assigned to runbook_1, as fit cases run on runbook_0 already generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"], @@ -289,7 +289,7 @@ async def test_deploy_no_more_resource(self) -> None: test_platform.wait_more_resource_error = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( @@ -318,7 +318,7 @@ async def test_skipped_on_suite_failure(self) -> None: test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -342,7 +342,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: test_platform.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -367,7 +367,7 @@ async def test_env_skipped_not_ready(self) -> None: test_platform.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -390,7 +390,7 @@ async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() # still prepare predefined, but not deploy self.verify_env_results( From 4fa6d9930268fb2c4dbf43a207ee57ca22174c8f Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 17:08:46 -0700 Subject: [PATCH 11/27] Also rename the files --- lisa/commands.py | 2 +- lisa/{lisarunner.py => runner.py} | 0 lisa/tests/test_platform.py | 2 ++ lisa/tests/{test_lisarunner.py => test_runner.py} | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) rename lisa/{lisarunner.py => runner.py} (100%) rename lisa/tests/{test_lisarunner.py => test_runner.py} (99%) diff --git a/lisa/commands.py b/lisa/commands.py index bf929ae492..acb09452e4 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -3,8 +3,8 @@ from typing import Iterable, Optional, cast from lisa import notifier -from lisa.lisarunner import Runner from lisa.parameter_parser.runbook import load_runbook +from lisa.runner import Runner from lisa.testselector import select_testcases from lisa.testsuite import TestCaseRuntimeData from lisa.util import LisaException, constants diff --git a/lisa/lisarunner.py b/lisa/runner.py similarity index 100% rename from lisa/lisarunner.py rename to lisa/runner.py diff --git a/lisa/tests/test_platform.py b/lisa/tests/test_platform.py index c41eae349b..40ef3236b7 100644 --- a/lisa/tests/test_platform.py +++ b/lisa/tests/test_platform.py @@ -14,6 +14,8 @@ deploy_success = True deploy_is_ready = True wait_more_resource_error = False +# TODO: Um... this breaks things because of an implicit alphabetical +# dependency where if this runs before `RunnerTestCase` they fail. prepared_envs: List[str] = [] deployed_envs: List[str] = [] deleted_envs: List[str] = [] diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_runner.py similarity index 99% rename from lisa/tests/test_lisarunner.py rename to lisa/tests/test_runner.py index 6c318fbd73..16e30134e3 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_runner.py @@ -3,7 +3,7 @@ from lisa import schema from lisa.environment import load_environments -from lisa.lisarunner import Runner +from lisa.runner import Runner from lisa.tests import test_platform, test_testsuite from lisa.tests.test_environment import generate_runbook as generate_env_runbook from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs From ac46da08b3a30e4ee28f1e92eca583ae01d64b86 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 17:13:08 -0700 Subject: [PATCH 12/27] Rename `start` to `run` for `Runner` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since there is no longer a corresponding `stop` (and when there was a `stop` it didn’t actually do anything). --- lisa/commands.py | 2 +- lisa/runner.py | 2 +- lisa/tests/test_runner.py | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index acb09452e4..91e527b696 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -20,7 +20,7 @@ async def run(args: Namespace) -> int: notifier.initialize(runbooks=runbook.notifier) try: runner = Runner(runbook) - await runner.start() + await runner.run() finally: notifier.finalize() diff --git a/lisa/runner.py b/lisa/runner.py index 97e23a8b28..766aaf7c78 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -21,7 +21,7 @@ def __init__(self, runbook: schema.Runbook) -> None: self._runbook = runbook self._log = get_logger("runner") - async def start(self) -> None: # noqa: C901 + async def run(self) -> None: # noqa: C901 # TODO: Reduce this function's complexity and remove the disabled warning. # select test cases diff --git a/lisa/tests/test_runner.py b/lisa/tests/test_runner.py index 16e30134e3..1ee56daeb5 100644 --- a/lisa/tests/test_runner.py +++ b/lisa/tests/test_runner.py @@ -212,7 +212,7 @@ async def test_fit_a_predefined_env(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2"], expected_deployed_envs=["runbook_0", "req_1"], @@ -231,7 +231,7 @@ async def test_fit_a_bigger_env(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -250,7 +250,7 @@ async def test_case_new_env_run_only_1_needed(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_runner(env_runbook, case_use_new_env=True) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_3"], @@ -269,7 +269,7 @@ async def test_no_needed_env(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_2"], @@ -290,7 +290,7 @@ async def test_deploy_no_more_resource(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -319,7 +319,7 @@ async def test_skipped_on_suite_failure(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -343,7 +343,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=[], @@ -368,7 +368,7 @@ async def test_env_skipped_not_ready(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_2", "req_3"], @@ -391,7 +391,7 @@ async def test_env_skipped_no_case(self) -> None: # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) runner = generate_runner(env_runbook) - await runner.start() + await runner.run() # still prepare predefined, but not deploy self.verify_env_results( expected_prepared=["runbook_0"], From bfaecf9215525d9563a8e290dbcd3d263b646c37 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 18:09:05 -0700 Subject: [PATCH 13/27] Delete fluffy `_create_test_results` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since it’s really just a list comprehension. Also add a bunch of notes identifying things to investigate further. --- lisa/runner.py | 19 ++++++------------- lisa/testsuite.py | 16 +++++++++++++++- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/lisa/runner.py b/lisa/runner.py index 766aaf7c78..87924cac15 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -6,7 +6,6 @@ from lisa.testselector import select_testcases from lisa.testsuite import ( TestCaseRequirement, - TestCaseRuntimeData, TestResult, TestStatus, TestSuite, @@ -21,14 +20,16 @@ def __init__(self, runbook: schema.Runbook) -> None: self._runbook = runbook self._log = get_logger("runner") + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. async def run(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. - # select test cases selected_test_cases = select_testcases(self._runbook.testcase) - # create test results - selected_test_results = self._create_test_results(selected_test_cases) + selected_test_results = [ + TestResult(runtime_data=case) for case in selected_test_cases + ] # load predefined environments candidate_environments = load_environments(self._runbook.environment) @@ -177,14 +178,6 @@ async def _run_suite( case.env = environment.name await test_suite.start() - def _create_test_results( - self, cases: List[TestCaseRuntimeData] - ) -> List[TestResult]: - test_results: List[TestResult] = [] - for x in cases: - test_results.append(TestResult(runtime_data=x)) - return test_results - def _merge_test_requirements( self, test_results: List[TestResult], diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 7db8c1b93e..526cd337e2 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -41,6 +41,8 @@ class TestResultMessage(notifier.MessageBase): env: str = "" +# TODO: We’re shadowing `unittest.TestResult` and may just want to +# reuse it, especially if we’ve been inspired by it so much. @dataclass class TestResult: runtime_data: TestCaseRuntimeData @@ -200,9 +202,15 @@ def __init__( ) -> None: self.priority = priority self.description = description + # TODO: Because this class is pseudo-inherited through + # attribute abuse, this optionally defined attribute causes a + # lot of typing headaches. if requirement: self.requirement = requirement + # TODO: This implies that we should actually subclass (inherit) + # `TestSuiteMetadata`, with some way of instantiating this class + # from an instance of that class. def __getattr__(self, key: str) -> Any: # inherit all attributes of test suite assert self.suite, "suite is not set before use metadata" @@ -237,6 +245,9 @@ def __init__(self, metadata: TestCaseMetadata): self.ignore_failure: bool = False self.environment_name: str = "" + # TODO: This implies that we should actually subclass (inherit) + # `TestCaseMetaData`, with some way of instantiating this class + # from an instance of that class. def __getattr__(self, key: str) -> Any: # inherit all attributes of metadata assert self.metadata @@ -283,8 +294,11 @@ def before_case(self) -> None: def after_case(self) -> None: pass + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. Perhaps + # we actually just want to reuse `unittest.TestCase.run()`? async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. suite_error_message = "" is_suite_continue = True From 823457c5caf449f1970ae0341c67bf79b2ab06e5 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 5 Oct 2020 15:47:51 -0700 Subject: [PATCH 14/27] Remove useless `Runner` class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These functions did not need a class, they can run be themselves. There’s some work to fix the functions which still rely entirely on side-effects, but this reduces a lot of complexity (as evidenced by the changes in the tests, half of which no longer generate a `Runner` instance at all). --- lisa/commands.py | 10 +- lisa/runner.py | 353 ++++++++++++++++++-------------------- lisa/tests/test_runner.py | 82 ++++----- 3 files changed, 210 insertions(+), 235 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index 91e527b696..38da8fef6f 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -2,11 +2,11 @@ from argparse import Namespace from typing import Iterable, Optional, cast +import lisa.runner from lisa import notifier from lisa.parameter_parser.runbook import load_runbook -from lisa.runner import Runner from lisa.testselector import select_testcases -from lisa.testsuite import TestCaseRuntimeData +from lisa.testsuite import TestCaseRuntimeData, TestStatus from lisa.util import LisaException, constants from lisa.util.logger import get_logger @@ -18,13 +18,13 @@ async def run(args: Namespace) -> int: if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) + try: - runner = Runner(runbook) - await runner.run() + results = await lisa.runner.run(runbook) finally: notifier.finalize() - return runner.exit_code + return sum(1 for x in results if x.status == TestStatus.FAILED) # check runbook diff --git a/lisa/runner.py b/lisa/runner.py index 87924cac15..3d952f6e40 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -13,195 +13,180 @@ ) from lisa.util.logger import get_logger +log = get_logger("runner") + + +# TODO: This entire function is one long string of side-effects. +# We need to reduce this function's complexity to remove the +# disabled warning, and not rely solely on side effects. +async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 + # select test cases + selected_test_cases = select_testcases(runbook.testcase) + + selected_test_results = [ + TestResult(runtime_data=case) for case in selected_test_cases + ] + + # load predefined environments + candidate_environments = load_environments(runbook.environment) + + platform = load_platform(runbook.platform) + # get environment requirements + _merge_test_requirements( + test_results=selected_test_results, + existing_environments=candidate_environments, + platform_type=platform.type_name(), + ) + + # there may not need to handle requirements, if all environment are predefined + prepared_environments = platform.prepare_environments(candidate_environments) + + can_run_results = selected_test_results + # request environment then run test s + for environment in prepared_environments: + try: + is_needed: bool = False + can_run_results = [x for x in can_run_results if x.can_run] + can_run_results.sort(key=lambda x: x.runtime_data.metadata.suite.name) + new_env_can_run_results = [ + x for x in can_run_results if x.runtime_data.use_new_environment + ] + + if not can_run_results: + # no left tests, break the loop + log.debug(f"no more test case to run, skip env [{environment.name}]") + break + + # check if any test need this environment + if any( + case.can_run and case.check_environment(environment, True) + for case in can_run_results + ): + is_needed = True + + if not is_needed: + log.debug( + f"env[{environment.name}] skipped " + f"as not meet any case requirement" + ) + continue -class Runner: - def __init__(self, runbook: schema.Runbook) -> None: - self.exit_code: int = 0 - self._runbook = runbook - self._log = get_logger("runner") - - # TODO: This entire function is one long string of side-effects. - # We need to reduce this function's complexity to remove the - # disabled warning, and not rely solely on side effects. - async def run(self) -> None: # noqa: C901 - # select test cases - selected_test_cases = select_testcases(self._runbook.testcase) - - selected_test_results = [ - TestResult(runtime_data=case) for case in selected_test_cases - ] - - # load predefined environments - candidate_environments = load_environments(self._runbook.environment) - - platform = load_platform(self._runbook.platform) - # get environment requirements - self._merge_test_requirements( - test_results=selected_test_results, - existing_environments=candidate_environments, - platform_type=platform.type_name(), - ) - - # there may not need to handle requirements, if all environment are predefined - prepared_environments = platform.prepare_environments(candidate_environments) - - can_run_results = selected_test_results - # request environment then run test s - for environment in prepared_environments: try: - is_needed: bool = False - can_run_results = [x for x in can_run_results if x.can_run] - can_run_results.sort(key=lambda x: x.runtime_data.metadata.suite.name) - new_env_can_run_results = [ - x for x in can_run_results if x.runtime_data.use_new_environment - ] - - if not can_run_results: - # no left tests, break the loop - self._log.debug( - f"no more test case to run, skip env [{environment.name}]" - ) + platform.deploy_environment(environment) + except WaitMoreResourceError as identifier: + log.warning( + f"[{environment.name}] waiting for more resource: " + f"{identifier}, skip assiging case" + ) + continue + + if not environment.is_ready: + log.warning( + f"[{environment.name}] is not deployed successfully, " + f"skip assiging case" + ) + continue + + # once environment is ready, check updated capability + log.info(f"start running cases on {environment.name}") + # try a case need new environment firstly + for new_env_result in new_env_can_run_results: + if new_env_result.check_environment(environment, True): + await _run_suite(environment=environment, cases=[new_env_result]) break - # check if any test need this environment - if any( - case.can_run and case.check_environment(environment, True) - for case in can_run_results + # grouped test results by test suite. + grouped_cases: List[TestResult] = [] + current_test_suite: Optional[TestSuiteMetadata] = None + for test_result in can_run_results: + if ( + test_result.can_run + and test_result.check_environment(environment, True) + and not test_result.runtime_data.use_new_environment ): - is_needed = True - - if not is_needed: - self._log.debug( - f"env[{environment.name}] skipped " - f"as not meet any case requirement" - ) - continue - - try: - platform.deploy_environment(environment) - except WaitMoreResourceError as identifier: - self._log.warning( - f"[{environment.name}] waiting for more resource: " - f"{identifier}, skip assiging case" - ) - continue - - if not environment.is_ready: - self._log.warning( - f"[{environment.name}] is not deployed successfully, " - f"skip assiging case" - ) - continue - - # once environment is ready, check updated capability - self._log.info(f"start running cases on {environment.name}") - # try a case need new environment firstly - for new_env_result in new_env_can_run_results: - if new_env_result.check_environment(environment, True): - await self._run_suite( - environment=environment, cases=[new_env_result] - ) - break - - # grouped test results by test suite. - grouped_cases: List[TestResult] = [] - current_test_suite: Optional[TestSuiteMetadata] = None - for test_result in can_run_results: if ( - test_result.can_run - and test_result.check_environment(environment, True) - and not test_result.runtime_data.use_new_environment + test_result.runtime_data.metadata.suite != current_test_suite + and grouped_cases ): - if ( - test_result.runtime_data.metadata.suite - != current_test_suite - and grouped_cases - ): - # run last batch cases - await self._run_suite( - environment=environment, cases=grouped_cases - ) - grouped_cases = [] - - # append new test cases - current_test_suite = test_result.runtime_data.metadata.suite - grouped_cases.append(test_result) - - if grouped_cases: - await self._run_suite(environment=environment, cases=grouped_cases) - finally: - if environment and environment.is_ready: - platform.delete_environment(environment) - - # not run as there is no fit environment. - for case in can_run_results: - if case.can_run: - reasons = "no available environment" - if case.check_results and case.check_results.reasons: - reasons = f"{reasons}: {case.check_results.reasons}" - - case.set_status(TestStatus.SKIPPED, reasons) - - result_count_dict: Dict[TestStatus, int] = dict() - for test_result in selected_test_results: - self._log.info( - f"{test_result.runtime_data.metadata.full_name:>30}: " - f"{test_result.status.name:<8} {test_result.message}" - ) - result_count = result_count_dict.get(test_result.status, 0) - result_count += 1 - result_count_dict[test_result.status] = result_count - - self._log.info("test result summary") - self._log.info(f" TOTAL : {len(selected_test_results)}") - for key in TestStatus: - self._log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}") - - # pass failed count to exit code - self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) - - # for UT testability - self._latest_test_results = selected_test_results - - async def _run_suite( - self, environment: Environment, cases: List[TestResult] - ) -> None: - - assert cases - suite_metadata = cases[0].runtime_data.metadata.suite - test_suite: TestSuite = suite_metadata.test_class( - environment, - cases, - suite_metadata, - ) - for case in cases: - case.env = environment.name - await test_suite.start() - - def _merge_test_requirements( - self, - test_results: List[TestResult], - existing_environments: Environments, - platform_type: str, - ) -> None: - assert platform_type - platform_type_set = search_space.SetSpace[str]( - is_allow_set=True, items=[platform_type] + # run last batch cases + await _run_suite(environment=environment, cases=grouped_cases) + grouped_cases = [] + + # append new test cases + current_test_suite = test_result.runtime_data.metadata.suite + grouped_cases.append(test_result) + + if grouped_cases: + await _run_suite(environment=environment, cases=grouped_cases) + finally: + if environment and environment.is_ready: + platform.delete_environment(environment) + + # not run as there is no fit environment. + for case in can_run_results: + if case.can_run: + reasons = "no available environment" + if case.check_results and case.check_results.reasons: + reasons = f"{reasons}: {case.check_results.reasons}" + + case.set_status(TestStatus.SKIPPED, reasons) + + result_count_dict: Dict[TestStatus, int] = dict() + for test_result in selected_test_results: + log.info( + f"{test_result.runtime_data.metadata.full_name:>30}: " + f"{test_result.status.name:<8} {test_result.message}" ) - for test_result in test_results: - test_req: TestCaseRequirement = test_result.runtime_data.requirement - - # check if there is playform requirement on test case - if test_req.platform_type and len(test_req.platform_type) > 0: - check_result = test_req.platform_type.check(platform_type_set) - if not check_result.result: - test_result.set_status(TestStatus.SKIPPED, check_result.reasons) - - if test_result.can_run: - assert test_req.environment - # if case need a new env to run, force to create one. - # if not, get or create one. - if test_result.runtime_data.use_new_environment: - existing_environments.from_requirement(test_req.environment) - else: - existing_environments.get_or_create(test_req.environment) + result_count = result_count_dict.get(test_result.status, 0) + result_count += 1 + result_count_dict[test_result.status] = result_count + + log.info("test result summary") + log.info(f" TOTAL : {len(selected_test_results)}") + for key in TestStatus: + log.info(f" {key.name:<9}: {result_count_dict.get(key, 0)}") + + return selected_test_results + + +async def _run_suite(environment: Environment, cases: List[TestResult]) -> None: + + assert cases + suite_metadata = cases[0].runtime_data.metadata.suite + test_suite: TestSuite = suite_metadata.test_class( + environment, + cases, + suite_metadata, + ) + for case in cases: + case.env = environment.name + await test_suite.start() + + +def _merge_test_requirements( + test_results: List[TestResult], + existing_environments: Environments, + platform_type: str, +) -> None: + """TODO: This function modifies `test_results` and `existing_environments`.""" + assert platform_type + platform_type_set = search_space.SetSpace[str]( + is_allow_set=True, items=[platform_type] + ) + for test_result in test_results: + test_req: TestCaseRequirement = test_result.runtime_data.requirement + + # check if there is playform requirement on test case + if test_req.platform_type and len(test_req.platform_type) > 0: + check_result = test_req.platform_type.check(platform_type_set) + if not check_result.result: + test_result.set_status(TestStatus.SKIPPED, check_result.reasons) + + if test_result.can_run: + assert test_req.environment + # if case need a new env to run, force to create one. + # if not, get or create one. + if test_result.runtime_data.use_new_environment: + existing_environments.from_requirement(test_req.environment) + else: + existing_environments.get_or_create(test_req.environment) diff --git a/lisa/tests/test_runner.py b/lisa/tests/test_runner.py index 1ee56daeb5..21e33a7d78 100644 --- a/lisa/tests/test_runner.py +++ b/lisa/tests/test_runner.py @@ -1,9 +1,9 @@ from typing import List, Optional from unittest import IsolatedAsyncioTestCase +import lisa.runner from lisa import schema from lisa.environment import load_environments -from lisa.runner import Runner from lisa.tests import test_platform, test_testsuite from lisa.tests.test_environment import generate_runbook as generate_env_runbook from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs @@ -16,9 +16,9 @@ from lisa.util import constants -def generate_runner( +def generate_test_runbook( env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False -) -> Runner: +) -> schema.Runbook: runbook = schema.Runbook( platform=[ schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") @@ -32,9 +32,7 @@ def generate_runner( ) if env_runbook: runbook.environment = env_runbook - runner = Runner(runbook) - - return runner + return runbook class RunnerTestCase(IsolatedAsyncioTestCase): @@ -53,9 +51,8 @@ def test_merge_req_create_on_new(self) -> None: [], [x for x in envs], ) - runner = generate_runner(None) test_results = generate_cases_result() - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, @@ -81,10 +78,8 @@ def test_merge_req_run_not_create_on_equal(self) -> None: ["runbook_0"], [x for x in envs], ) - - runner = generate_runner(env_runbook) test_results = generate_cases_result() - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, @@ -110,11 +105,10 @@ def test_merge_req_create_on_use_new(self) -> None: ["runbook_0"], [x for x in envs], ) - runner = generate_runner(env_runbook) test_results = generate_cases_result() for test_result in test_results: test_result.runtime_data.use_new_environment = True - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, @@ -141,9 +135,8 @@ def test_merge_req_not_allow_create(self) -> None: [], [x for x in envs], ) - runner = generate_runner(None) test_results = generate_cases_result() - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, @@ -152,7 +145,6 @@ def test_merge_req_not_allow_create(self) -> None: [], [x for x in envs], ) - self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -175,19 +167,17 @@ def test_merge_req_platform_type_checked(self) -> None: [], [x for x in envs], ) - runner = generate_runner(None) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata metadata.requirement = simple_requirement( supported_platform_type=["notexists"] ) - runner._merge_test_requirements( + lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - platform_unsupported = "capability cannot support some of requirement" self.verify_test_results( expected_envs=["", "", ""], @@ -211,8 +201,8 @@ async def test_fit_a_predefined_env(self) -> None: # so it can run any case with core requirements. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2"], expected_deployed_envs=["runbook_0", "req_1"], @@ -222,7 +212,7 @@ async def test_fit_a_predefined_env(self) -> None: expected_envs=["req_1", "runbook_0", "runbook_0"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) async def test_fit_a_bigger_env(self) -> None: @@ -230,8 +220,8 @@ async def test_fit_a_bigger_env(self) -> None: # it doesn't equal to any case req, but reusable for all cases. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -241,7 +231,7 @@ async def test_fit_a_bigger_env(self) -> None: expected_envs=["runbook_0", "runbook_0", "runbook_0"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) async def test_case_new_env_run_only_1_needed(self) -> None: @@ -249,8 +239,8 @@ async def test_case_new_env_run_only_1_needed(self) -> None: # but all case want to run on a new env generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_runner(env_runbook, case_use_new_env=True) - await runner.run() + runbook = generate_test_runbook(env_runbook, case_use_new_env=True) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_3"], @@ -260,7 +250,7 @@ async def test_case_new_env_run_only_1_needed(self) -> None: expected_envs=["runbook_0", "req_1", "req_3"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) async def test_no_needed_env(self) -> None: @@ -268,8 +258,8 @@ async def test_no_needed_env(self) -> None: # no cases assigned to runbook_1, as fit cases run on runbook_0 already generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_2"], @@ -279,7 +269,7 @@ async def test_no_needed_env(self) -> None: expected_envs=["req_2", "runbook_0", "runbook_0"], expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED], expected_message=["", "", ""], - test_results=runner._latest_test_results, + test_results=results, ) async def test_deploy_no_more_resource(self) -> None: @@ -289,8 +279,8 @@ async def test_deploy_no_more_resource(self) -> None: test_platform.wait_more_resource_error = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -310,7 +300,7 @@ async def test_deploy_no_more_resource(self) -> None: before_suite_failed, before_suite_failed, ], - test_results=runner._latest_test_results, + test_results=results, ) async def test_skipped_on_suite_failure(self) -> None: @@ -318,8 +308,8 @@ async def test_skipped_on_suite_failure(self) -> None: test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0"], @@ -334,7 +324,7 @@ async def test_skipped_on_suite_failure(self) -> None: TestStatus.PASSED, ], expected_message=[before_suite_failed, before_suite_failed, ""], - test_results=runner._latest_test_results, + test_results=results, ) async def test_env_skipped_no_prepared_env(self) -> None: @@ -342,8 +332,8 @@ async def test_env_skipped_no_prepared_env(self) -> None: test_platform.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=[], @@ -358,7 +348,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: TestStatus.SKIPPED, ], expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], - test_results=runner._latest_test_results, + test_results=results, ) async def test_env_skipped_not_ready(self) -> None: @@ -367,8 +357,8 @@ async def test_env_skipped_not_ready(self) -> None: test_platform.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=["runbook_0", "req_1", "req_2", "req_3"], @@ -383,15 +373,15 @@ async def test_env_skipped_not_ready(self) -> None: TestStatus.SKIPPED, ], expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], - test_results=runner._latest_test_results, + test_results=results, ) async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_runner(env_runbook) - await runner.run() + runbook = generate_test_runbook(env_runbook) + results = await lisa.runner.run(runbook) # still prepare predefined, but not deploy self.verify_env_results( expected_prepared=["runbook_0"], @@ -402,7 +392,7 @@ async def test_env_skipped_no_case(self) -> None: expected_envs=[], expected_status=[], expected_message=[], - test_results=runner._latest_test_results, + test_results=results, ) def verify_test_results( From 170cf1ce7b8b7be98f96c2725554d4ccab5744c6 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 5 Oct 2020 17:32:09 -0700 Subject: [PATCH 15/27] Simplify `generate_test_runbook` Since it now always expects an `env_runbook` we can just construct it on-the-fly. --- lisa/tests/test_runner.py | 60 ++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/lisa/tests/test_runner.py b/lisa/tests/test_runner.py index 21e33a7d78..6997dc096f 100644 --- a/lisa/tests/test_runner.py +++ b/lisa/tests/test_runner.py @@ -1,11 +1,11 @@ -from typing import List, Optional +from typing import Any, List from unittest import IsolatedAsyncioTestCase import lisa.runner from lisa import schema from lisa.environment import load_environments from lisa.tests import test_platform, test_testsuite -from lisa.tests.test_environment import generate_runbook as generate_env_runbook +from lisa.tests.test_environment import generate_runbook from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs from lisa.tests.test_testsuite import ( cleanup_cases_metadata, @@ -17,9 +17,10 @@ def generate_test_runbook( - env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False + case_use_new_env: bool = False, **kwargs: Any ) -> schema.Runbook: - runbook = schema.Runbook( + """This wraps `generate_runbook` with a mock platform and test case.""" + return schema.Runbook( platform=[ schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") ], @@ -29,10 +30,8 @@ def generate_test_runbook( use_new_environment=case_use_new_env, ) ], + environment=generate_runbook(**kwargs), ) - if env_runbook: - runbook.environment = env_runbook - return runbook class RunnerTestCase(IsolatedAsyncioTestCase): @@ -45,8 +44,7 @@ def tearDown(self) -> None: def test_merge_req_create_on_new(self) -> None: # if no predefined envs, can generate from requirement - env_runbook = generate_env_runbook(is_single_env=False) - envs = load_environments(env_runbook) + envs = load_environments(generate_runbook(is_single_env=False)) self.assertListEqual( [], [x for x in envs], @@ -72,8 +70,7 @@ def test_merge_req_create_on_new(self) -> None: def test_merge_req_run_not_create_on_equal(self) -> None: # when merging requirement from test cases, # it won't create new, if predefined exact match test case needs - env_runbook = generate_env_runbook(remote=True) - envs = load_environments(env_runbook) + envs = load_environments(generate_runbook(remote=True)) self.assertListEqual( ["runbook_0"], [x for x in envs], @@ -99,8 +96,7 @@ def test_merge_req_create_on_use_new(self) -> None: # same runbook as test_merge_req_run_not_create_on_equal # but all 3 cases asks a new env, so create 3 envs # note, when running cases, predefined env will be treat as a new env. - env_runbook = generate_env_runbook(remote=True) - envs = load_environments(env_runbook) + envs = load_environments(generate_runbook(remote=True)) self.assertListEqual( ["runbook_0"], [x for x in envs], @@ -128,9 +124,9 @@ def test_merge_req_create_on_use_new(self) -> None: def test_merge_req_not_allow_create(self) -> None: # force to use existing env, not to create new. # this case doesn't provide predefined env, but no case skipped on this stage. - env_runbook = generate_env_runbook(is_single_env=False) - env_runbook.allow_create = False - envs = load_environments(env_runbook) + runbook = generate_runbook(is_single_env=False) + runbook.allow_create = False + envs = load_environments(runbook) self.assertListEqual( [], [x for x in envs], @@ -161,8 +157,7 @@ def test_merge_req_platform_type_checked(self) -> None: # for example, some case run on azure only. # platform check happens in runner, so this case is here # a simple check is enough. More covered by search_space.SetSpace - env_runbook = generate_env_runbook(is_single_env=False) - envs = load_environments(env_runbook) + envs = load_environments(generate_runbook(is_single_env=False)) self.assertListEqual( [], [x for x in envs], @@ -200,8 +195,7 @@ async def test_fit_a_predefined_env(self) -> None: # 2. ut3 need 8 cores, and predefined env target to meet all core requirement, # so it can run any case with core requirements. generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, remote=True) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2"], @@ -219,8 +213,7 @@ async def test_fit_a_bigger_env(self) -> None: # similar with test_fit_a_predefined_env, but predefined 2 nodes, # it doesn't equal to any case req, but reusable for all cases. generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -238,8 +231,9 @@ async def test_case_new_env_run_only_1_needed(self) -> None: # same predefined env as test_fit_a_bigger_env, # but all case want to run on a new env generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runbook = generate_test_runbook(env_runbook, case_use_new_env=True) + runbook = generate_test_runbook( + case_use_new_env=True, is_single_env=True, local=True, remote=True + ) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -257,8 +251,7 @@ async def test_no_needed_env(self) -> None: # two 1 node env predefined, but only runbook_0 go to deploy # no cases assigned to runbook_1, as fit cases run on runbook_0 already generate_cases_metadata() - env_runbook = generate_env_runbook(local=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(local=True, remote=True) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "runbook_1", "req_2", "req_3"], @@ -278,8 +271,7 @@ async def test_deploy_no_more_resource(self) -> None: # In future, will add retry on wait more resource. test_platform.wait_more_resource_error = True generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, local=True) results = await lisa.runner.run(runbook) self.verify_env_results( @@ -307,8 +299,7 @@ async def test_skipped_on_suite_failure(self) -> None: # first two cases skipped due to test suite setup failed test_testsuite.fail_on_before_suite = True generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -331,8 +322,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: # test env not prepared, so test cases cannot find an env to run test_platform.return_prepared = False generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -356,8 +346,7 @@ async def test_env_skipped_not_ready(self) -> None: # so no cases can run test_platform.deploy_is_ready = False generate_cases_metadata() - env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) results = await lisa.runner.run(runbook) self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], @@ -379,8 +368,7 @@ async def test_env_skipped_not_ready(self) -> None: async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env - env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runbook = generate_test_runbook(env_runbook) + runbook = generate_test_runbook(is_single_env=True, remote=True) results = await lisa.runner.run(runbook) # still prepare predefined, but not deploy self.verify_env_results( From e4463205aa18ee27bc2c9ac1132cdd8a515a024f Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 5 Oct 2020 18:59:44 -0700 Subject: [PATCH 16/27] Clean up tests --- lisa/tests/test_runner.py | 155 +++++++++++++++++++------------------- 1 file changed, 78 insertions(+), 77 deletions(-) diff --git a/lisa/tests/test_runner.py b/lisa/tests/test_runner.py index 6997dc096f..54a5cbfdbe 100644 --- a/lisa/tests/test_runner.py +++ b/lisa/tests/test_runner.py @@ -22,7 +22,7 @@ def generate_test_runbook( """This wraps `generate_runbook` with a mock platform and test case.""" return schema.Runbook( platform=[ - schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") + schema.Platform(type=constants.PLATFORM_MOCK, admin_password="do-not-use") ], testcase=[ schema.TestCase( @@ -36,29 +36,26 @@ def generate_test_runbook( class RunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: - cleanup_cases_metadata() + cleanup_cases_metadata() # Necessary side effects! test_platform.return_prepared = True test_platform.deploy_is_ready = True test_platform.deploy_success = True test_platform.wait_more_resource_error = False def test_merge_req_create_on_new(self) -> None: - # if no predefined envs, can generate from requirement + """Create environments based on requirements.""" envs = load_environments(generate_runbook(is_single_env=False)) - self.assertListEqual( - [], - [x for x in envs], - ) + self.assertFalse(envs) test_results = generate_cases_result() lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # 3 cases create 3 envs + # 3 cases create 3 environments. self.assertListEqual( ["req_0", "req_1", "req_2"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -68,12 +65,11 @@ def test_merge_req_create_on_new(self) -> None: ) def test_merge_req_run_not_create_on_equal(self) -> None: - # when merging requirement from test cases, - # it won't create new, if predefined exact match test case needs + """Don't create environments when already satisfied.""" envs = load_environments(generate_runbook(remote=True)) self.assertListEqual( ["runbook_0"], - [x for x in envs], + list(envs), ) test_results = generate_cases_result() lisa.runner._merge_test_requirements( @@ -81,11 +77,11 @@ def test_merge_req_run_not_create_on_equal(self) -> None: existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - - # 3 cases created only two req, as simple req meets on runbook_0 + # 3 cases created only two required environments, as the + # simple requirement was met by runbook_0. self.assertListEqual( ["runbook_0", "req_1", "req_2"], - [x for x in envs], + list(envs), ) self.assertListEqual( [TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN], @@ -93,13 +89,11 @@ def test_merge_req_run_not_create_on_equal(self) -> None: ) def test_merge_req_create_on_use_new(self) -> None: - # same runbook as test_merge_req_run_not_create_on_equal - # but all 3 cases asks a new env, so create 3 envs - # note, when running cases, predefined env will be treat as a new env. + """Always create environments when asked.""" envs = load_environments(generate_runbook(remote=True)) self.assertListEqual( ["runbook_0"], - [x for x in envs], + list(envs), ) test_results = generate_cases_result() for test_result in test_results: @@ -109,10 +103,10 @@ def test_merge_req_create_on_use_new(self) -> None: existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # every case need a new environment, so created 3 + # All 3 cases needed a new environment, so it created 3. self.assertListEqual( ["runbook_0", "req_1", "req_2", "req_3"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -122,25 +116,18 @@ def test_merge_req_create_on_use_new(self) -> None: ) def test_merge_req_not_allow_create(self) -> None: - # force to use existing env, not to create new. - # this case doesn't provide predefined env, but no case skipped on this stage. + """Do not create an existing environment when not allowed.""" runbook = generate_runbook(is_single_env=False) runbook.allow_create = False envs = load_environments(runbook) - self.assertListEqual( - [], - [x for x in envs], - ) + self.assertFalse(envs) test_results = generate_cases_result() lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - self.assertListEqual( - [], - [x for x in envs], - ) + self.assertFalse(envs) self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -153,26 +140,26 @@ def test_merge_req_not_allow_create(self) -> None: ) def test_merge_req_platform_type_checked(self) -> None: - # check if current platform supported, - # for example, some case run on azure only. - # platform check happens in runner, so this case is here - # a simple check is enough. More covered by search_space.SetSpace + """Ensure the platform check happens. + + For example, some cases run only on Azure. A simple check is + sufficient because more is covered by `search_space.SetSpace`. + """ envs = load_environments(generate_runbook(is_single_env=False)) - self.assertListEqual( - [], - [x for x in envs], - ) + self.assertFalse(envs) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata metadata.requirement = simple_requirement( - supported_platform_type=["notexists"] + supported_platform_type=["does-not-exist"] ) lisa.runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) + # TODO: This message should be in a localization module of + # some sort. platform_unsupported = "capability cannot support some of requirement" self.verify_test_results( expected_envs=["", "", ""], @@ -190,10 +177,18 @@ def test_merge_req_platform_type_checked(self) -> None: ) async def test_fit_a_predefined_env(self) -> None: - # predefined env can run case in below condition. - # 1. with predefined env of 1 simple node, so ut2 don't need a new env - # 2. ut3 need 8 cores, and predefined env target to meet all core requirement, - # so it can run any case with core requirements. + """Pre-defined environments can run with the conditions: + + 1. With pre-defined environment of 1 simple node, unit test 2 + doesn't need a new environment. + + 2. Unit test 3 needs 8 cores, but the pre-defined environment + has these and so can run all the tests. + + """ + # TODO: This function call is necessary, which means that it + # sets some unknown global state. We need to fix those side + # effects because this is unintelligible. generate_cases_metadata() runbook = generate_test_runbook(is_single_env=True, remote=True) results = await lisa.runner.run(runbook) @@ -210,8 +205,12 @@ async def test_fit_a_predefined_env(self) -> None: ) async def test_fit_a_bigger_env(self) -> None: - # similar with test_fit_a_predefined_env, but predefined 2 nodes, - # it doesn't equal to any case req, but reusable for all cases. + """Similar to `test_fit_a_predefined_env` but with pre-defined 2 nodes. + + While it doesn't exactly match any requirement, it's re-usable + for every test. + + """ generate_cases_metadata() runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) results = await lisa.runner.run(runbook) @@ -228,8 +227,7 @@ async def test_fit_a_bigger_env(self) -> None: ) async def test_case_new_env_run_only_1_needed(self) -> None: - # same predefined env as test_fit_a_bigger_env, - # but all case want to run on a new env + """Same as `test_fit_a_bigger_env` but we require a new environment.""" generate_cases_metadata() runbook = generate_test_runbook( case_use_new_env=True, is_single_env=True, local=True, remote=True @@ -248,8 +246,14 @@ async def test_case_new_env_run_only_1_needed(self) -> None: ) async def test_no_needed_env(self) -> None: - # two 1 node env predefined, but only runbook_0 go to deploy - # no cases assigned to runbook_1, as fit cases run on runbook_0 already + """No new environments need to be created. + + Two single-node environments are pre-defined, and only + `runbook_0` is deployed. The environment for `runbook_1` is + not deployed because its tests were able to run on the + environment deployed for `runbook_0`. + + """ generate_cases_metadata() runbook = generate_test_runbook(local=True, remote=True) results = await lisa.runner.run(runbook) @@ -266,14 +270,16 @@ async def test_no_needed_env(self) -> None: ) async def test_deploy_no_more_resource(self) -> None: - # platform may see no more resource, like no azure quota. - # cases skipped due to this. - # In future, will add retry on wait more resource. + """Skip tests if resources quotas were hit. + + TODO: In the future, we may add retry logic to wait on + resources becoming available. + + """ test_platform.wait_more_resource_error = True generate_cases_metadata() runbook = generate_test_runbook(is_single_env=True, local=True) results = await lisa.runner.run(runbook) - self.verify_env_results( expected_prepared=["runbook_0", "req_1", "req_2", "req_3"], expected_deployed_envs=[], @@ -296,7 +302,7 @@ async def test_deploy_no_more_resource(self) -> None: ) async def test_skipped_on_suite_failure(self) -> None: - # first two cases skipped due to test suite setup failed + """First two tests were skipped because the setup is made to fail.""" test_testsuite.fail_on_before_suite = True generate_cases_metadata() runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) @@ -306,6 +312,8 @@ async def test_skipped_on_suite_failure(self) -> None: expected_deployed_envs=["runbook_0"], expected_deleted_envs=["runbook_0"], ) + # TODO: This message should be in a localization module of + # some sort. before_suite_failed = "before_suite: failed" self.verify_test_results( expected_envs=["runbook_0", "runbook_0", "runbook_0"], @@ -319,7 +327,7 @@ async def test_skipped_on_suite_failure(self) -> None: ) async def test_env_skipped_no_prepared_env(self) -> None: - # test env not prepared, so test cases cannot find an env to run + """The platform's environment isn't prepared so the tests cannot run.""" test_platform.return_prepared = False generate_cases_metadata() runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) @@ -329,7 +337,9 @@ async def test_env_skipped_no_prepared_env(self) -> None: expected_deployed_envs=[], expected_deleted_envs=[], ) - no_avaiable_env = "no available environment" + # TODO: This message should be in a localization module of + # some sort. + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -337,13 +347,12 @@ async def test_env_skipped_no_prepared_env(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], + expected_message=[no_available_env, no_available_env, no_available_env], test_results=results, ) async def test_env_skipped_not_ready(self) -> None: - # env prepared, but not deployed to ready. - # so no cases can run + """The prepared environment is not deployed, so tests are skipped.""" test_platform.deploy_is_ready = False generate_cases_metadata() runbook = generate_test_runbook(is_single_env=True, local=True, remote=True) @@ -353,7 +362,9 @@ async def test_env_skipped_not_ready(self) -> None: expected_deployed_envs=["runbook_0", "req_1", "req_2", "req_3"], expected_deleted_envs=[], ) - no_avaiable_env = "no available environment" + # TODO: This message should be in a localization module of + # some sort. + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -361,16 +372,14 @@ async def test_env_skipped_not_ready(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], + expected_message=[no_available_env, no_available_env, no_available_env], test_results=results, ) async def test_env_skipped_no_case(self) -> None: - # no case found, as not call generate_case_metadata - # in this case, not deploy any env + """TODO: Investigate why `generate_case_metadata` side effects matter.""" runbook = generate_test_runbook(is_single_env=True, remote=True) results = await lisa.runner.run(runbook) - # still prepare predefined, but not deploy self.verify_env_results( expected_prepared=["runbook_0"], expected_deployed_envs=[], @@ -414,15 +423,7 @@ def verify_env_results( expected_deployed_envs: List[str], expected_deleted_envs: List[str], ) -> None: - self.assertListEqual( - expected_prepared, - [x for x in prepared_envs], - ) - self.assertListEqual( - expected_deployed_envs, - [x for x in deployed_envs], - ) - self.assertListEqual( - expected_deleted_envs, - [x for x in deleted_envs], - ) + """TODO: Explain why this function works and what it does.""" + self.assertListEqual(expected_prepared, list(prepared_envs)) + self.assertListEqual(expected_deployed_envs, list(deployed_envs)) + self.assertListEqual(expected_deleted_envs, list(deleted_envs)) From b3334422c969b2ede9d6b2301751acd2d424801a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 5 Oct 2020 20:18:48 -0700 Subject: [PATCH 17/27] Temporarily rename `test_runner` back to `test_lisarunner` Because an implicit alphabetical dependency in the test module names breaks the tests. --- lisa/tests/{test_runner.py => test_lisarunner.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lisa/tests/{test_runner.py => test_lisarunner.py} (100%) diff --git a/lisa/tests/test_runner.py b/lisa/tests/test_lisarunner.py similarity index 100% rename from lisa/tests/test_runner.py rename to lisa/tests/test_lisarunner.py From 1d784124a5eb60ae3dcd4664137a8977fc4d27b5 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 6 Oct 2020 18:29:28 -0700 Subject: [PATCH 18/27] Fix some types in process and shell MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There’s more work to do because this was using duck typing. It’s a little better now, but it’s just a massive wrapper for a library that’s just another wrapper for Python’s actual APIs (which are just wrappers for the system’s APIs). --- lisa/util/process.py | 23 +++++++++++++---------- lisa/util/shell.py | 4 ++-- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/lisa/util/process.py b/lisa/util/process.py index 862b370d2d..8cc4bf085a 100644 --- a/lisa/util/process.py +++ b/lisa/util/process.py @@ -5,6 +5,7 @@ from dataclasses import dataclass from typing import Dict, Optional +import spur # type: ignore from spur.errors import NoSuchCommandError # type: ignore from lisa.util.logger import Logger, LogWriter, get_logger @@ -23,6 +24,7 @@ def __str__(self) -> str: return self.stdout +# TODO: So much cleanup here. It was using duck typing. class Process: def __init__( self, @@ -36,6 +38,8 @@ def __init__( self._is_linux = shell.is_linux self._running: bool = False self._log = get_logger("cmd", id_, parent=parent_logger) + self._process: Optional[spur.local.LocalProcess] = None + self._result: Optional[ExecutableResult] = None def start( self, @@ -107,7 +111,7 @@ def start( except (FileNotFoundError, NoSuchCommandError) as identifier: # FileNotFoundError: not found command on Windows # NoSuchCommandError: not found command on remote Linux - self._process = ExecutableResult( + self._result = ExecutableResult( "", identifier.strerror, 1, self._timer.elapsed() ) self._log.debug(f"not found command: {identifier}") @@ -123,30 +127,29 @@ def wait_result(self, timeout: float = 600) -> ExecutableResult: self._log.warning(f"timeout in {timeout} sec, and killed") self.kill() - if not isinstance(self._process, ExecutableResult): + if self._result is None: + # if not isinstance(self._process, ExecutableResult): assert self._process proces_result = self._process.wait_for_result() self.stdout_writer.close() self.stderr_writer.close() - result: ExecutableResult = ExecutableResult( + # cache for future queries, in case it's queried twice. + self._result = ExecutableResult( proces_result.output.strip(), proces_result.stderr_output.strip(), proces_result.return_code, self._timer.elapsed(), ) - # cache for future queries, in case it's queried twice. - self._process = result - else: - result = self._process + self._process = None self._log.debug(f"waited with {self._timer}") - return result + return self._result def kill(self) -> None: - if self._process and not isinstance(self._process, ExecutableResult): + if self._process: self._process.send_signal(9) def is_running(self) -> bool: - if self._running: + if self._running and self._process: self._running = self._process.is_running() return self._running diff --git a/lisa/util/shell.py b/lisa/util/shell.py index b51f7ab6a7..f695e8da6d 100644 --- a/lisa/util/shell.py +++ b/lisa/util/shell.py @@ -177,7 +177,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = True, - ) -> Any: + ) -> spur.ssh.SshProcess: self.initialize() assert self._inner_shell return self._inner_shell.spawn( @@ -307,7 +307,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = False, - ) -> Any: + ) -> spur.local.LocalProcess: return self._inner_shell.spawn( command=command, update_env=update_env, From 7cbe0e642eb7f5f9cd4d603aa4ddf7e4294e0cac Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 6 Oct 2020 18:30:25 -0700 Subject: [PATCH 19/27] Manually close the fds which spur leaks --- lisa/util/process.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lisa/util/process.py b/lisa/util/process.py index 8cc4bf085a..ca535bb886 100644 --- a/lisa/util/process.py +++ b/lisa/util/process.py @@ -1,6 +1,7 @@ import logging import pathlib import shlex +import subprocess import time from dataclasses import dataclass from typing import Dict, Optional @@ -140,6 +141,17 @@ def wait_result(self, timeout: float = 600) -> ExecutableResult: proces_result.return_code, self._timer.elapsed(), ) + # TODO: The spur library is not very good and leaves open + # resources (probably due to it starting the process with + # `bufsize=0`). We need to replace it, but for now, we + # manually close the leaks. + popen: subprocess.Popen[str] = self._process._subprocess + if popen.stdin: + popen.stdin.close() + if popen.stdout: + popen.stdout.close() + if popen.stderr: + popen.stderr.close() self._process = None self._log.debug(f"waited with {self._timer}") From 806f662b15814fc8167824ecd7a34f37be69d4bd Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 14:03:28 -0700 Subject: [PATCH 20/27] Rename `TestSuite` to `LisaTestCase` to avoid confusion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we’re reusing the unittest module’s `TestCase` class, we should not introduce confusing new nomenclature like `TestSuite`, since a “suite” is different from a “case” in usage. --- CONTRIBUTING.md | 3 +++ examples/testsuites/helloworld.py | 4 ++-- examples/testsuites/multinodes.py | 4 ++-- examples/testsuites/withscript.py | 4 ++-- lisa/__init__.py | 4 ++-- lisa/runner.py | 4 ++-- lisa/tests/test_testsuite.py | 6 +++--- lisa/testsuite.py | 15 +++++++++++---- testsuites/basic/provisioning.py | 4 ++-- 9 files changed, 29 insertions(+), 19 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 79562b4b2c..b0aeae44cc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,6 +26,9 @@ short summary of the most important parts: * Constants should be `CAPITALIZED_SNAKE_CASE`. * When importing a function, try to avoid renaming it with `import as` because it introduces cognitive overhead to track yet another name. +* When deriving another module’s class (such as `unittest.TestCase`), reuse the + class name to avoid confusion, such as `LisaTestCase`, instead of introducing + a different connotation like `TestSuite`. When in doubt, adhere to existing conventions, or check the style guide. diff --git a/examples/testsuites/helloworld.py b/examples/testsuites/helloworld.py index fa03005fce..96cae2b319 100644 --- a/examples/testsuites/helloworld.py +++ b/examples/testsuites/helloworld.py @@ -1,4 +1,4 @@ -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata from lisa.operating_system import Linux from lisa.tools import Echo, Uname @@ -12,7 +12,7 @@ """, tags=["demo"], ) -class HelloWorld(TestSuite): +class HelloWorld(LisaTestCase): @TestCaseMetadata( description=""" this test case use default node to diff --git a/examples/testsuites/multinodes.py b/examples/testsuites/multinodes.py index e42f754e37..196d345eaa 100644 --- a/examples/testsuites/multinodes.py +++ b/examples/testsuites/multinodes.py @@ -1,4 +1,4 @@ -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata from lisa.testsuite import simple_requirement from lisa.tools import Lscpu, Ntttcp @@ -13,7 +13,7 @@ tags=["demo", "multinode"], requirement=simple_requirement(min_count=2), ) -class MutipleNodesDemo(TestSuite): +class MutipleNodesDemo(LisaTestCase): @TestCaseMetadata( description=""" This test case send and receive data by ntttcp diff --git a/examples/testsuites/withscript.py b/examples/testsuites/withscript.py index a25b2309ea..9098ce917c 100644 --- a/examples/testsuites/withscript.py +++ b/examples/testsuites/withscript.py @@ -1,6 +1,6 @@ from pathlib import Path -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata from lisa.executable import CustomScript, CustomScriptBuilder from lisa.operating_system import Windows from lisa.testsuite import simple_requirement @@ -15,7 +15,7 @@ """, tags=["demo"], ) -class WithScript(TestSuite): +class WithScript(LisaTestCase): def before_suite(self) -> None: self._echo_script = CustomScriptBuilder( Path(__file__).parent.joinpath("scripts"), ["echo.sh"] diff --git a/lisa/__init__.py b/lisa/__init__.py index adec31b93d..f728bdab12 100644 --- a/lisa/__init__.py +++ b/lisa/__init__.py @@ -1,12 +1,12 @@ from __future__ import annotations -from lisa.testsuite import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa.testsuite import LisaTestCase, TestCaseMetadata, TestSuiteMetadata from lisa.util.logger import init_loggger __all__ = [ "TestSuiteMetadata", "TestCaseMetadata", - "TestSuite", + "LisaTestCase", ] diff --git a/lisa/runner.py b/lisa/runner.py index 3d952f6e40..6b42a5f81d 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -5,10 +5,10 @@ from lisa.platform_ import WaitMoreResourceError, load_platform from lisa.testselector import select_testcases from lisa.testsuite import ( + LisaTestCase, TestCaseRequirement, TestResult, TestStatus, - TestSuite, TestSuiteMetadata, ) from lisa.util.logger import get_logger @@ -153,7 +153,7 @@ async def _run_suite(environment: Environment, cases: List[TestResult]) -> None: assert cases suite_metadata = cases[0].runtime_data.metadata.suite - test_suite: TestSuite = suite_metadata.test_class( + test_suite: LisaTestCase = suite_metadata.test_class( environment, cases, suite_metadata, diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index 183d33816f..df526d57bd 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -8,11 +8,11 @@ from lisa.tests.test_environment import generate_runbook from lisa.testselector import select_testcases from lisa.testsuite import ( + LisaTestCase, TestCaseMetadata, TestCaseRuntimeData, TestResult, TestStatus, - TestSuite, TestSuiteMetadata, get_cases_metadata, get_suites_metadata, @@ -28,7 +28,7 @@ fail_case_count = 0 -class MockTestSuite(TestSuite): +class MockTestSuite(LisaTestCase): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.fail_on_before_suite = fail_on_before_suite @@ -76,7 +76,7 @@ def mock_ut2(self) -> None: pass -class MockTestSuite2(TestSuite): +class MockTestSuite2(LisaTestCase): def mock_ut3(self) -> None: pass diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 526cd337e2..bc30f87647 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -175,7 +175,7 @@ def __init__( self.description = description self.requirement = requirement - def __call__(self, test_class: Type[TestSuite]) -> Callable[..., object]: + def __call__(self, test_class: Type[LisaTestCase]) -> Callable[..., object]: self.test_class = test_class if not self.name: self.name = test_class.__name__ @@ -183,11 +183,11 @@ def __call__(self, test_class: Type[TestSuite]) -> Callable[..., object]: @wraps(self.test_class) def wrapper( - test_class: Type[TestSuite], + test_class: Type[LisaTestCase], environment: Environment, cases: List[TestResult], metadata: TestSuiteMetadata, - ) -> TestSuite: + ) -> LisaTestCase: return test_class(environment, cases, metadata) return wrapper @@ -267,7 +267,14 @@ def clone(self) -> TestCaseRuntimeData: return cloned -class TestSuite(unittest.TestCase, metaclass=ABCMeta): +class LisaTestCase(unittest.TestCase, metaclass=ABCMeta): + """This class wraps the unittest module's 'TestCase' class. + + It should be used in the same way, where non-abstract methods + represent unit tests, usually prefixed with `test_`. + + """ + def __init__( self, environment: Environment, diff --git a/testsuites/basic/provisioning.py b/testsuites/basic/provisioning.py index 7e18ddef3b..057b75bf28 100644 --- a/testsuites/basic/provisioning.py +++ b/testsuites/basic/provisioning.py @@ -1,4 +1,4 @@ -from lisa import TestCaseMetadata, TestSuite, TestSuiteMetadata +from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata from lisa.features import StartStop from lisa.testsuite import simple_requirement from lisa.tools import Dmesg @@ -13,7 +13,7 @@ """, tags=[], ) -class Provisioning(TestSuite): +class Provisioning(LisaTestCase): @TestCaseMetadata( description=""" this test uses to restart a node, and compare dmesg output. From c5bac19327ff78814ff4ceb7d634a086574fd153 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 15:36:00 -0700 Subject: [PATCH 21/27] Rename `TestCaseMetadata` to `LisaTestMetadata` for consistency --- examples/testsuites/helloworld.py | 6 +++--- examples/testsuites/multinodes.py | 6 +++--- examples/testsuites/withscript.py | 4 ++-- lisa/__init__.py | 4 ++-- lisa/tests/test_testsuite.py | 10 +++++----- lisa/testselector.py | 22 +++++++++++----------- lisa/testsuite.py | 18 +++++++++++------- testsuites/basic/provisioning.py | 4 ++-- 8 files changed, 39 insertions(+), 35 deletions(-) diff --git a/examples/testsuites/helloworld.py b/examples/testsuites/helloworld.py index 96cae2b319..a66b6547d9 100644 --- a/examples/testsuites/helloworld.py +++ b/examples/testsuites/helloworld.py @@ -1,4 +1,4 @@ -from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata from lisa.operating_system import Linux from lisa.tools import Echo, Uname @@ -13,7 +13,7 @@ tags=["demo"], ) class HelloWorld(LisaTestCase): - @TestCaseMetadata( + @LisaTestMetadata( description=""" this test case use default node to 1. get system info @@ -43,7 +43,7 @@ def hello(self) -> None: self.assertEqual("", result.stderr) self.assertEqual(0, result.exit_code) - @TestCaseMetadata( + @LisaTestMetadata( description=""" demonstrate a simple way to run command in one line. """, diff --git a/examples/testsuites/multinodes.py b/examples/testsuites/multinodes.py index 196d345eaa..32162a6115 100644 --- a/examples/testsuites/multinodes.py +++ b/examples/testsuites/multinodes.py @@ -1,4 +1,4 @@ -from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata from lisa.testsuite import simple_requirement from lisa.tools import Lscpu, Ntttcp @@ -14,7 +14,7 @@ requirement=simple_requirement(min_count=2), ) class MutipleNodesDemo(LisaTestCase): - @TestCaseMetadata( + @LisaTestMetadata( description=""" This test case send and receive data by ntttcp """, @@ -28,7 +28,7 @@ def os_info(self) -> None: core_count = lscpu.get_core_count() self.log.info(f"index: {node.index}, core_count: {core_count}") - @TestCaseMetadata( + @LisaTestMetadata( description=""" this test case send and receive data by ntttcp """, diff --git a/examples/testsuites/withscript.py b/examples/testsuites/withscript.py index 9098ce917c..a7e8829c67 100644 --- a/examples/testsuites/withscript.py +++ b/examples/testsuites/withscript.py @@ -1,6 +1,6 @@ from pathlib import Path -from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata from lisa.executable import CustomScript, CustomScriptBuilder from lisa.operating_system import Windows from lisa.testsuite import simple_requirement @@ -21,7 +21,7 @@ def before_suite(self) -> None: Path(__file__).parent.joinpath("scripts"), ["echo.sh"] ) - @TestCaseMetadata( + @LisaTestMetadata( description=""" this test case run script on a linux node, and demostrate 1. how to use customized script on tested node. diff --git a/lisa/__init__.py b/lisa/__init__.py index f728bdab12..a6cf501fbb 100644 --- a/lisa/__init__.py +++ b/lisa/__init__.py @@ -1,11 +1,11 @@ from __future__ import annotations -from lisa.testsuite import LisaTestCase, TestCaseMetadata, TestSuiteMetadata +from lisa.testsuite import LisaTestCase, LisaTestMetadata, TestSuiteMetadata from lisa.util.logger import init_loggger __all__ = [ "TestSuiteMetadata", - "TestCaseMetadata", + "LisaTestMetadata", "LisaTestCase", ] diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index df526d57bd..5be794f90a 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -9,7 +9,7 @@ from lisa.testselector import select_testcases from lisa.testsuite import ( LisaTestCase, - TestCaseMetadata, + LisaTestMetadata, TestCaseRuntimeData, TestResult, TestStatus, @@ -86,15 +86,15 @@ def cleanup_cases_metadata() -> None: get_suites_metadata().clear() -def generate_cases_metadata() -> List[TestCaseMetadata]: +def generate_cases_metadata() -> List[LisaTestMetadata]: ut_cases = [ - TestCaseMetadata( + LisaTestMetadata( "ut1", 0, requirement=simple_requirement(min_count=2), ), - TestCaseMetadata("ut2", 1), - TestCaseMetadata("ut3", 2), + LisaTestMetadata("ut2", 1), + LisaTestMetadata("ut3", 2), ] suite_metadata1 = TestSuiteMetadata("a1", "c1", "des1", ["t1", "t2"]) suite_metadata1(MockTestSuite) diff --git a/lisa/testselector.py b/lisa/testselector.py index 1fdfe625c6..b7b7db8fd7 100644 --- a/lisa/testselector.py +++ b/lisa/testselector.py @@ -3,7 +3,7 @@ from typing import Callable, Dict, List, Mapping, Optional, Pattern, Set, Union, cast from lisa import schema -from lisa.testsuite import TestCaseMetadata, TestCaseRuntimeData, get_cases_metadata +from lisa.testsuite import LisaTestMetadata, TestCaseRuntimeData, get_cases_metadata from lisa.util import LisaException, constants, set_filtered_fields from lisa.util.logger import get_logger @@ -12,14 +12,14 @@ def select_testcases( filters: Optional[List[schema.TestCase]] = None, - init_cases: Optional[List[TestCaseMetadata]] = None, + init_cases: Optional[List[LisaTestMetadata]] = None, ) -> List[TestCaseRuntimeData]: """ based on filters to select test cases. If filters are None, return all cases. """ log = _get_logger() if init_cases: - full_list: Dict[str, TestCaseMetadata] = dict() + full_list: Dict[str, LisaTestMetadata] = dict() for item in init_cases: full_list[item.full_name] = item else: @@ -53,7 +53,7 @@ def select_testcases( def _match_string( - case: Union[TestCaseRuntimeData, TestCaseMetadata], + case: Union[TestCaseRuntimeData, LisaTestMetadata], pattern: Pattern[str], attr_name: str, ) -> bool: @@ -63,7 +63,7 @@ def _match_string( def _match_priority( - case: Union[TestCaseRuntimeData, TestCaseMetadata], pattern: Union[int, List[int]] + case: Union[TestCaseRuntimeData, LisaTestMetadata], pattern: Union[int, List[int]] ) -> bool: priority = case.priority is_matched: bool = False @@ -75,7 +75,7 @@ def _match_priority( def _match_tags( - case: Union[TestCaseRuntimeData, TestCaseMetadata], + case: Union[TestCaseRuntimeData, LisaTestMetadata], criteria_tags: Union[str, List[str]], ) -> bool: case_tags = case.tags @@ -88,8 +88,8 @@ def _match_tags( def _match_cases( - candidates: Mapping[str, Union[TestCaseRuntimeData, TestCaseMetadata]], - patterns: List[Callable[[Union[TestCaseRuntimeData, TestCaseMetadata]], bool]], + candidates: Mapping[str, Union[TestCaseRuntimeData, LisaTestMetadata]], + patterns: List[Callable[[Union[TestCaseRuntimeData, LisaTestMetadata]], bool]], ) -> Dict[str, TestCaseRuntimeData]: changed_cases: Dict[str, TestCaseRuntimeData] = dict() @@ -97,7 +97,7 @@ def _match_cases( candidate = candidates[candidate_name] is_matched = all(pattern(candidate) for pattern in patterns) if is_matched: - if isinstance(candidate, TestCaseMetadata): + if isinstance(candidate, LisaTestMetadata): candidate = TestCaseRuntimeData(candidate) changed_cases[candidate_name] = candidate return changed_cases @@ -144,13 +144,13 @@ def _apply_filter( # noqa: C901 current_selected: Dict[str, TestCaseRuntimeData], force_included: Set[str], force_excluded: Set[str], - full_list: Dict[str, TestCaseMetadata], + full_list: Dict[str, LisaTestMetadata], ) -> Dict[str, TestCaseRuntimeData]: # TODO: Reduce this function's complexity and remove the disabled warning. log = _get_logger() # initialize criterias - patterns: List[Callable[[Union[TestCaseRuntimeData, TestCaseMetadata]], bool]] = [] + patterns: List[Callable[[Union[TestCaseRuntimeData, LisaTestMetadata]], bool]] = [] criterias_runbook = case_runbook.criteria assert criterias_runbook, "test case criteria cannot be None" criterias_runbook_dict = criterias_runbook.__dict__ diff --git a/lisa/testsuite.py b/lisa/testsuite.py index bc30f87647..797175bd29 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -26,7 +26,7 @@ ) _all_suites: Dict[str, TestSuiteMetadata] = dict() -_all_cases: Dict[str, TestCaseMetadata] = dict() +_all_cases: Dict[str, LisaTestMetadata] = dict() class SkipTestCaseException(LisaException): @@ -164,7 +164,7 @@ def __init__( requirement: TestCaseRequirement = DEFAULT_REQUIREMENT, ) -> None: self.name = name - self.cases: List[TestCaseMetadata] = [] + self.cases: List[LisaTestMetadata] = [] self.area = area self.category = category @@ -193,7 +193,9 @@ def wrapper( return wrapper -class TestCaseMetadata: +class LisaTestMetadata: + """This decorator supplies metadata for each test.""" + def __init__( self, description: str, @@ -201,6 +203,8 @@ def __init__( requirement: Optional[TestCaseRequirement] = None, ) -> None: self.priority = priority + # TODO: Each test description should be from its docstring, + # not here. self.description = description # TODO: Because this class is pseudo-inherited through # attribute abuse, this optionally defined attribute causes a @@ -234,7 +238,7 @@ def set_suite(self, suite: TestSuiteMetadata) -> None: class TestCaseRuntimeData: - def __init__(self, metadata: TestCaseMetadata): + def __init__(self, metadata: LisaTestMetadata): self.metadata = metadata # all runtime setting fields @@ -406,7 +410,7 @@ def get_suites_metadata() -> Dict[str, TestSuiteMetadata]: return _all_suites -def get_cases_metadata() -> Dict[str, TestCaseMetadata]: +def get_cases_metadata() -> Dict[str, LisaTestMetadata]: return _all_cases @@ -435,7 +439,7 @@ def _add_suite_metadata(metadata: TestSuiteMetadata) -> None: ) -def _add_case_metadata(metadata: TestCaseMetadata) -> None: +def _add_case_metadata(metadata: LisaTestMetadata) -> None: full_name = metadata.full_name if _all_cases.get(full_name) is None: @@ -456,7 +460,7 @@ def _add_case_metadata(metadata: TestCaseMetadata) -> None: def _add_case_to_suite( - test_suite: TestSuiteMetadata, test_case: TestCaseMetadata + test_suite: TestSuiteMetadata, test_case: LisaTestMetadata ) -> None: test_case.suite = test_suite test_suite.cases.append(test_case) diff --git a/testsuites/basic/provisioning.py b/testsuites/basic/provisioning.py index 057b75bf28..c36165c42c 100644 --- a/testsuites/basic/provisioning.py +++ b/testsuites/basic/provisioning.py @@ -1,4 +1,4 @@ -from lisa import LisaTestCase, TestCaseMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata from lisa.features import StartStop from lisa.testsuite import simple_requirement from lisa.tools import Dmesg @@ -14,7 +14,7 @@ tags=[], ) class Provisioning(LisaTestCase): - @TestCaseMetadata( + @LisaTestMetadata( description=""" this test uses to restart a node, and compare dmesg output. the case fails on any panic in kernel From 86d84e20b68a624e96953095538cb72e037d0ee6 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 15:40:12 -0700 Subject: [PATCH 22/27] Rename `TestSuiteMetadata` to `LisaTestCaseMetadata` for consistency --- examples/testsuites/helloworld.py | 4 ++-- examples/testsuites/multinodes.py | 4 ++-- examples/testsuites/withscript.py | 4 ++-- lisa/__init__.py | 6 +++--- lisa/runner.py | 4 ++-- lisa/tests/test_testsuite.py | 6 +++--- lisa/testsuite.py | 23 ++++++++++++++--------- testsuites/basic/provisioning.py | 4 ++-- 8 files changed, 30 insertions(+), 25 deletions(-) diff --git a/examples/testsuites/helloworld.py b/examples/testsuites/helloworld.py index a66b6547d9..a3d867da01 100644 --- a/examples/testsuites/helloworld.py +++ b/examples/testsuites/helloworld.py @@ -1,9 +1,9 @@ -from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.operating_system import Linux from lisa.tools import Echo, Uname -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="demo", category="simple", description=""" diff --git a/examples/testsuites/multinodes.py b/examples/testsuites/multinodes.py index 32162a6115..becf0f36c5 100644 --- a/examples/testsuites/multinodes.py +++ b/examples/testsuites/multinodes.py @@ -1,9 +1,9 @@ -from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.testsuite import simple_requirement from lisa.tools import Lscpu, Ntttcp -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="demo", category="demo", description=""" diff --git a/examples/testsuites/withscript.py b/examples/testsuites/withscript.py index a7e8829c67..76883c984d 100644 --- a/examples/testsuites/withscript.py +++ b/examples/testsuites/withscript.py @@ -1,13 +1,13 @@ from pathlib import Path -from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.executable import CustomScript, CustomScriptBuilder from lisa.operating_system import Windows from lisa.testsuite import simple_requirement from lisa.util.perf_timer import create_timer -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="demo", category="simple", description=""" diff --git a/lisa/__init__.py b/lisa/__init__.py index a6cf501fbb..84ef2afe5a 100644 --- a/lisa/__init__.py +++ b/lisa/__init__.py @@ -1,12 +1,12 @@ from __future__ import annotations -from lisa.testsuite import LisaTestCase, LisaTestMetadata, TestSuiteMetadata +from lisa.testsuite import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.util.logger import init_loggger __all__ = [ - "TestSuiteMetadata", - "LisaTestMetadata", "LisaTestCase", + "LisaTestCaseMetadata", + "LisaTestMetadata", ] diff --git a/lisa/runner.py b/lisa/runner.py index 6b42a5f81d..b2f02be07e 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -6,10 +6,10 @@ from lisa.testselector import select_testcases from lisa.testsuite import ( LisaTestCase, + LisaTestCaseMetadata, TestCaseRequirement, TestResult, TestStatus, - TestSuiteMetadata, ) from lisa.util.logger import get_logger @@ -97,7 +97,7 @@ async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 # grouped test results by test suite. grouped_cases: List[TestResult] = [] - current_test_suite: Optional[TestSuiteMetadata] = None + current_test_suite: Optional[LisaTestCaseMetadata] = None for test_result in can_run_results: if ( test_result.can_run diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index 5be794f90a..1f7de54f80 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -9,11 +9,11 @@ from lisa.testselector import select_testcases from lisa.testsuite import ( LisaTestCase, + LisaTestCaseMetadata, LisaTestMetadata, TestCaseRuntimeData, TestResult, TestStatus, - TestSuiteMetadata, get_cases_metadata, get_suites_metadata, simple_requirement, @@ -96,12 +96,12 @@ def generate_cases_metadata() -> List[LisaTestMetadata]: LisaTestMetadata("ut2", 1), LisaTestMetadata("ut3", 2), ] - suite_metadata1 = TestSuiteMetadata("a1", "c1", "des1", ["t1", "t2"]) + suite_metadata1 = LisaTestCaseMetadata("a1", "c1", "des1", ["t1", "t2"]) suite_metadata1(MockTestSuite) ut_cases[0](MockTestSuite.mock_ut1) ut_cases[1](MockTestSuite.mock_ut2) - suite_metadata2 = TestSuiteMetadata( + suite_metadata2 = LisaTestCaseMetadata( "a2", "c2", "des2", diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 797175bd29..1c2d4b7225 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -25,7 +25,7 @@ "TestStatus", ["NOTRUN", "RUNNING", "FAILED", "PASSED", "SKIPPED", "ATTEMPTED"] ) -_all_suites: Dict[str, TestSuiteMetadata] = dict() +_all_suites: Dict[str, LisaTestCaseMetadata] = dict() _all_cases: Dict[str, LisaTestMetadata] = dict() @@ -153,7 +153,9 @@ def simple_requirement( DEFAULT_REQUIREMENT = simple_requirement() -class TestSuiteMetadata: +class LisaTestCaseMetadata: + """This decorator supplies metadata for each case of tests.""" + def __init__( self, area: str, @@ -163,6 +165,7 @@ def __init__( name: str = "", requirement: TestCaseRequirement = DEFAULT_REQUIREMENT, ) -> None: + # TODO: The name should be the test case’s class name. self.name = name self.cases: List[LisaTestMetadata] = [] @@ -172,6 +175,8 @@ def __init__( self.tags = tags else: self.tags = [] + # TODO: Each test case description should be from its + # docstring, not here. self.description = description self.requirement = requirement @@ -186,7 +191,7 @@ def wrapper( test_class: Type[LisaTestCase], environment: Environment, cases: List[TestResult], - metadata: TestSuiteMetadata, + metadata: LisaTestCaseMetadata, ) -> LisaTestCase: return test_class(environment, cases, metadata) @@ -233,8 +238,8 @@ def wrapper(*args: object) -> None: return wrapper - def set_suite(self, suite: TestSuiteMetadata) -> None: - self.suite: TestSuiteMetadata = suite + def set_suite(self, suite: LisaTestCaseMetadata) -> None: + self.suite: LisaTestCaseMetadata = suite class TestCaseRuntimeData: @@ -283,7 +288,7 @@ def __init__( self, environment: Environment, case_results: List[TestResult], - metadata: TestSuiteMetadata, + metadata: LisaTestCaseMetadata, ) -> None: super().__init__() self.environment = environment @@ -406,7 +411,7 @@ async def start(self) -> None: # noqa: C901 self.log.debug(f"after_suite end with {timer}") -def get_suites_metadata() -> Dict[str, TestSuiteMetadata]: +def get_suites_metadata() -> Dict[str, LisaTestCaseMetadata]: return _all_suites @@ -414,7 +419,7 @@ def get_cases_metadata() -> Dict[str, LisaTestMetadata]: return _all_cases -def _add_suite_metadata(metadata: TestSuiteMetadata) -> None: +def _add_suite_metadata(metadata: LisaTestCaseMetadata) -> None: if metadata.name: key = metadata.name else: @@ -460,7 +465,7 @@ def _add_case_metadata(metadata: LisaTestMetadata) -> None: def _add_case_to_suite( - test_suite: TestSuiteMetadata, test_case: LisaTestMetadata + test_suite: LisaTestCaseMetadata, test_case: LisaTestMetadata ) -> None: test_case.suite = test_suite test_suite.cases.append(test_case) diff --git a/testsuites/basic/provisioning.py b/testsuites/basic/provisioning.py index c36165c42c..5ac99864a5 100644 --- a/testsuites/basic/provisioning.py +++ b/testsuites/basic/provisioning.py @@ -1,11 +1,11 @@ -from lisa import LisaTestCase, LisaTestMetadata, TestSuiteMetadata +from lisa import LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata from lisa.features import StartStop from lisa.testsuite import simple_requirement from lisa.tools import Dmesg from lisa.util.perf_timer import create_timer -@TestSuiteMetadata( +@LisaTestCaseMetadata( area="provisioning", category="functional", description=""" From 5a914a043c558e89a24904509345a3b5dacd2768 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 16:24:39 -0700 Subject: [PATCH 23/27] Rename `_all_suites` and `_all_cases` for consistency Also rename (and delete where appropriate) the respective functions (and start cleaning them up, though they need more work). --- lisa/tests/test_testsuite.py | 7 ++-- lisa/testselector.py | 5 +-- lisa/testsuite.py | 69 ++++++++++++++++++------------------ 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index 1f7de54f80..0c437a3782 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -1,6 +1,7 @@ from typing import Any, List from unittest import IsolatedAsyncioTestCase, TestCase +import lisa.testsuite from lisa import schema from lisa.environment import load_environments from lisa.operating_system import Linux, Windows @@ -14,8 +15,6 @@ TestCaseRuntimeData, TestResult, TestStatus, - get_cases_metadata, - get_suites_metadata, simple_requirement, ) from lisa.util import LisaException, constants @@ -82,8 +81,8 @@ def mock_ut3(self) -> None: def cleanup_cases_metadata() -> None: - get_cases_metadata().clear() - get_suites_metadata().clear() + lisa.testsuite.lisa_tests_metadata.clear() + lisa.testsuite.lisa_test_cases_metadata.clear() def generate_cases_metadata() -> List[LisaTestMetadata]: diff --git a/lisa/testselector.py b/lisa/testselector.py index b7b7db8fd7..8041719bef 100644 --- a/lisa/testselector.py +++ b/lisa/testselector.py @@ -2,8 +2,9 @@ from functools import partial from typing import Callable, Dict, List, Mapping, Optional, Pattern, Set, Union, cast +import lisa.testsuite from lisa import schema -from lisa.testsuite import LisaTestMetadata, TestCaseRuntimeData, get_cases_metadata +from lisa.testsuite import LisaTestMetadata, TestCaseRuntimeData from lisa.util import LisaException, constants, set_filtered_fields from lisa.util.logger import get_logger @@ -23,7 +24,7 @@ def select_testcases( for item in init_cases: full_list[item.full_name] = item else: - full_list = get_cases_metadata() + full_list = lisa.testsuite.lisa_tests_metadata if filters: selected: Dict[str, TestCaseRuntimeData] = dict() force_included: Set[str] = set() diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 1c2d4b7225..189fd7347a 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -25,8 +25,8 @@ "TestStatus", ["NOTRUN", "RUNNING", "FAILED", "PASSED", "SKIPPED", "ATTEMPTED"] ) -_all_suites: Dict[str, LisaTestCaseMetadata] = dict() -_all_cases: Dict[str, LisaTestMetadata] = dict() +lisa_test_cases_metadata: Dict[str, LisaTestCaseMetadata] = dict() +lisa_tests_metadata: Dict[str, LisaTestMetadata] = dict() class SkipTestCaseException(LisaException): @@ -184,7 +184,7 @@ def __call__(self, test_class: Type[LisaTestCase]) -> Callable[..., object]: self.test_class = test_class if not self.name: self.name = test_class.__name__ - _add_suite_metadata(self) + _add_lisa_test_case_metadata(self) @wraps(self.test_class) def wrapper( @@ -230,7 +230,7 @@ def __call__(self, func: Callable[..., None]) -> Callable[..., None]: self.full_name = func.__qualname__ self._func = func - _add_case_metadata(self) + _add_lisa_test_metadata(self) @wraps(self._func) def wrapper(*args: object) -> None: @@ -411,32 +411,28 @@ async def start(self) -> None: # noqa: C901 self.log.debug(f"after_suite end with {timer}") -def get_suites_metadata() -> Dict[str, LisaTestCaseMetadata]: - return _all_suites +def _add_lisa_test_case_metadata(metadata: LisaTestCaseMetadata) -> None: + """Add the metadata to the test case and matching tests. - -def get_cases_metadata() -> Dict[str, LisaTestMetadata]: - return _all_cases - - -def _add_suite_metadata(metadata: LisaTestCaseMetadata) -> None: + Errors if there is a collision. + """ + # TODO: We should only use the class name as the key. if metadata.name: key = metadata.name else: key = metadata.test_class.__name__ - exist_metadata = _all_suites.get(key) - if exist_metadata is None: - _all_suites[key] = metadata + if key not in lisa_test_cases_metadata: + lisa_test_cases_metadata[key] = metadata else: raise LisaException( f"duplicate test class name: {key}, " - f"new: [{metadata}], exists: [{exist_metadata}]" + f"'{metadata}' would replace '{lisa_test_cases_metadata[key]}'" ) class_prefix = f"{key}." - for test_case in _all_cases.values(): - if test_case.full_name.startswith(class_prefix): - _add_case_to_suite(metadata, test_case) + for test in lisa_tests_metadata.values(): + if test.full_name.startswith(class_prefix): + _add_test_to_case(metadata, test) log = get_logger("init", "test") log.info( f"registered test suite '{key}' " @@ -444,28 +440,33 @@ def _add_suite_metadata(metadata: LisaTestCaseMetadata) -> None: ) -def _add_case_metadata(metadata: LisaTestMetadata) -> None: +def _add_lisa_test_metadata(metadata: LisaTestMetadata) -> None: + """Add the metadata to the test itself. - full_name = metadata.full_name - if _all_cases.get(full_name) is None: - _all_cases[full_name] = metadata + Errors if there is a collision. Also adds the test to the test + case. + + """ + + if metadata.full_name not in lisa_tests_metadata: + lisa_tests_metadata[metadata.full_name] = metadata else: - raise LisaException(f"duplicate test class name: {full_name}") + raise LisaException(f"duplicate test class name: {metadata.full_name}") # this should be None in current observation. # the methods are loadded prior to test class # in case logic is changed, so keep this logic # to make two collection consistent. - class_name = full_name.split(".")[0] - test_suite = _all_suites.get(class_name) - if test_suite: + prefix = metadata.full_name.split(".")[0] + if prefix in lisa_test_cases_metadata: log = get_logger("init", "test") - log.debug(f"add case '{metadata.name}' to suite '{test_suite.name}'") - _add_case_to_suite(test_suite, metadata) + log.debug( + f"add case '{metadata.name}' to " + f"suite '{lisa_test_cases_metadata[prefix].name}'" + ) + _add_test_to_case(lisa_test_cases_metadata[prefix], metadata) -def _add_case_to_suite( - test_suite: LisaTestCaseMetadata, test_case: LisaTestMetadata -) -> None: - test_case.suite = test_suite - test_suite.cases.append(test_case) +def _add_test_to_case(case: LisaTestCaseMetadata, test: LisaTestMetadata) -> None: + test.suite = case + case.cases.append(test) From b220dca232ade0673d76d5460f5c267fabe8fd6c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 16:38:38 -0700 Subject: [PATCH 24/27] Rename `self.suite` to `self.case` for consistency And start updating other functions. --- lisa/commands.py | 2 +- lisa/runner.py | 45 ++++++++++++++++++------------------ lisa/tests/test_testsuite.py | 8 +++---- lisa/testsuite.py | 10 ++++---- 4 files changed, 32 insertions(+), 33 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index 38da8fef6f..d07f43f25e 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -44,7 +44,7 @@ async def list_start(args: Namespace) -> int: cases = select_testcases(runbook.testcase) for case_data in cases: log.info( - f"case: {case_data.name}, suite: {case_data.metadata.suite.name}, " + f"test: {case_data.name}, case: {case_data.metadata.case.name}, " f"area: {case_data.suite.area}, " f"category: {case_data.suite.category}, " f"tags: {','.join(case_data.suite.tags)}, " diff --git a/lisa/runner.py b/lisa/runner.py index b2f02be07e..da8cb8bc74 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -47,7 +47,7 @@ async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 try: is_needed: bool = False can_run_results = [x for x in can_run_results if x.can_run] - can_run_results.sort(key=lambda x: x.runtime_data.metadata.suite.name) + can_run_results.sort(key=lambda x: x.runtime_data.metadata.case.name) new_env_can_run_results = [ x for x in can_run_results if x.runtime_data.use_new_environment ] @@ -92,12 +92,12 @@ async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 # try a case need new environment firstly for new_env_result in new_env_can_run_results: if new_env_result.check_environment(environment, True): - await _run_suite(environment=environment, cases=[new_env_result]) + await _run_tests(environment=environment, tests=[new_env_result]) break - # grouped test results by test suite. + # grouped test results by test case. grouped_cases: List[TestResult] = [] - current_test_suite: Optional[LisaTestCaseMetadata] = None + current_case: Optional[LisaTestCaseMetadata] = None for test_result in can_run_results: if ( test_result.can_run @@ -105,31 +105,31 @@ async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 and not test_result.runtime_data.use_new_environment ): if ( - test_result.runtime_data.metadata.suite != current_test_suite + test_result.runtime_data.metadata.case != current_case and grouped_cases ): # run last batch cases - await _run_suite(environment=environment, cases=grouped_cases) + await _run_tests(environment=environment, tests=grouped_cases) grouped_cases = [] # append new test cases - current_test_suite = test_result.runtime_data.metadata.suite + current_case = test_result.runtime_data.metadata.case grouped_cases.append(test_result) if grouped_cases: - await _run_suite(environment=environment, cases=grouped_cases) + await _run_tests(environment=environment, tests=grouped_cases) finally: if environment and environment.is_ready: platform.delete_environment(environment) # not run as there is no fit environment. - for case in can_run_results: - if case.can_run: + for test in can_run_results: + if test.can_run: reasons = "no available environment" - if case.check_results and case.check_results.reasons: - reasons = f"{reasons}: {case.check_results.reasons}" + if test.check_results and test.check_results.reasons: + reasons = f"{reasons}: {test.check_results.reasons}" - case.set_status(TestStatus.SKIPPED, reasons) + test.set_status(TestStatus.SKIPPED, reasons) result_count_dict: Dict[TestStatus, int] = dict() for test_result in selected_test_results: @@ -149,18 +149,17 @@ async def run(runbook: schema.Runbook) -> List[TestResult]: # noqa: C901 return selected_test_results -async def _run_suite(environment: Environment, cases: List[TestResult]) -> None: - - assert cases - suite_metadata = cases[0].runtime_data.metadata.suite - test_suite: LisaTestCase = suite_metadata.test_class( +async def _run_tests(environment: Environment, tests: List[TestResult]) -> None: + assert tests + case_metadata = tests[0].runtime_data.metadata.case + test_case: LisaTestCase = case_metadata.test_class( environment, - cases, - suite_metadata, + tests, + case_metadata, ) - for case in cases: - case.env = environment.name - await test_suite.start() + for test in tests: + test.env = environment.name + await test_case.start() def _merge_test_requirements( diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index 0c437a3782..6c4e62248d 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -136,7 +136,7 @@ class TestSuiteTestCase(IsolatedAsyncioTestCase): def generate_suite_instance(self) -> MockTestSuite: case_results = generate_cases_result() case_results = case_results[:2] - suite_metadata = case_results[0].runtime_data.metadata.suite + suite_metadata = case_results[0].runtime_data.metadata.case runbook = generate_runbook(is_single_env=True, local=True, remote=True) envs = load_environments(runbook) self.default_env = list(envs.values())[0] @@ -167,14 +167,14 @@ def test_case_override_suite(self) -> None: case2_found = False for case in cases: assert case.requirement.environment - assert case.suite.requirement.environment + assert case.case.requirement.environment if case.name == "mock_ut1": self.assertEqual(2, len(case.requirement.environment.nodes)) - self.assertEqual(1, len(case.suite.requirement.environment.nodes)) + self.assertEqual(1, len(case.case.requirement.environment.nodes)) case1_found = True if case.name == "mock_ut2": self.assertEqual(1, len(case.requirement.environment.nodes)) - self.assertEqual(1, len(case.suite.requirement.environment.nodes)) + self.assertEqual(1, len(case.case.requirement.environment.nodes)) case2_found = True self.assertEqual(True, case1_found) self.assertEqual(True, case2_found) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 189fd7347a..b52ba94285 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -222,8 +222,8 @@ def __init__( # from an instance of that class. def __getattr__(self, key: str) -> Any: # inherit all attributes of test suite - assert self.suite, "suite is not set before use metadata" - return getattr(self.suite, key) + assert self.case, "suite is not set before use metadata" + return getattr(self.case, key) def __call__(self, func: Callable[..., None]) -> Callable[..., None]: self.name = func.__name__ @@ -238,8 +238,8 @@ def wrapper(*args: object) -> None: return wrapper - def set_suite(self, suite: LisaTestCaseMetadata) -> None: - self.suite: LisaTestCaseMetadata = suite + def set_case(self, case: LisaTestCaseMetadata) -> None: + self.case: LisaTestCaseMetadata = case class TestCaseRuntimeData: @@ -468,5 +468,5 @@ def _add_lisa_test_metadata(metadata: LisaTestMetadata) -> None: def _add_test_to_case(case: LisaTestCaseMetadata, test: LisaTestMetadata) -> None: - test.suite = case + test.case = case case.cases.append(test) From e4ee0b378a6ff29cd0b747785c282ad9369c41be Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 16:47:29 -0700 Subject: [PATCH 25/27] =?UTF-8?q?Don=E2=80=99t=20subclass=20ABCMeta=20when?= =?UTF-8?q?=20unneeded?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `LisaTestCase` is _not_ an abstract base class, it’s just a base class (with a lot of implementation) and defines no abstract methods. So we remove this. --- lisa/testsuite.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index b52ba94285..9a0ce5f772 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -1,7 +1,6 @@ from __future__ import annotations import unittest -from abc import ABCMeta from dataclasses import dataclass from enum import Enum from functools import wraps @@ -276,7 +275,7 @@ def clone(self) -> TestCaseRuntimeData: return cloned -class LisaTestCase(unittest.TestCase, metaclass=ABCMeta): +class LisaTestCase(unittest.TestCase): """This class wraps the unittest module's 'TestCase' class. It should be used in the same way, where non-abstract methods From 1967e7d857a3618f1ce26372962e8240d13a0644 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 16:50:23 -0700 Subject: [PATCH 26/27] Rename `TestCaseRuntimeData` to `LisaTestRuntimeData` Again for consistency. --- lisa/commands.py | 4 ++-- lisa/tests/test_testsuite.py | 6 +++--- lisa/testselector.py | 36 ++++++++++++++++++------------------ lisa/testsuite.py | 8 ++++---- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index d07f43f25e..022e324d77 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -6,7 +6,7 @@ from lisa import notifier from lisa.parameter_parser.runbook import load_runbook from lisa.testselector import select_testcases -from lisa.testsuite import TestCaseRuntimeData, TestStatus +from lisa.testsuite import LisaTestRuntimeData, TestStatus from lisa.util import LisaException, constants from lisa.util.logger import get_logger @@ -39,7 +39,7 @@ async def list_start(args: Namespace) -> int: log = _get_init_logger("list") if args.type == constants.LIST_CASE: if list_all: - cases: Iterable[TestCaseRuntimeData] = select_testcases() + cases: Iterable[LisaTestRuntimeData] = select_testcases() else: cases = select_testcases(runbook.testcase) for case_data in cases: diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index 6c4e62248d..2b7776b83d 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -12,7 +12,7 @@ LisaTestCase, LisaTestCaseMetadata, LisaTestMetadata, - TestCaseRuntimeData, + LisaTestRuntimeData, TestResult, TestStatus, simple_requirement, @@ -116,14 +116,14 @@ def generate_cases_metadata() -> List[LisaTestMetadata]: def generate_cases_result() -> List[TestResult]: case_metadata = generate_cases_metadata() - case_results = [TestResult(TestCaseRuntimeData(x)) for x in case_metadata] + case_results = [TestResult(LisaTestRuntimeData(x)) for x in case_metadata] return case_results def select_and_check( ut: TestCase, case_runbook: List[Any], expected_descriptions: List[str] -) -> List[TestCaseRuntimeData]: +) -> List[LisaTestRuntimeData]: runbook = validate_data({constants.TESTCASE: case_runbook}) case_metadatas = generate_cases_metadata() selected = select_testcases(runbook.testcase, case_metadatas) diff --git a/lisa/testselector.py b/lisa/testselector.py index 8041719bef..383fa3032a 100644 --- a/lisa/testselector.py +++ b/lisa/testselector.py @@ -4,7 +4,7 @@ import lisa.testsuite from lisa import schema -from lisa.testsuite import LisaTestMetadata, TestCaseRuntimeData +from lisa.testsuite import LisaTestMetadata, LisaTestRuntimeData from lisa.util import LisaException, constants, set_filtered_fields from lisa.util.logger import get_logger @@ -14,7 +14,7 @@ def select_testcases( filters: Optional[List[schema.TestCase]] = None, init_cases: Optional[List[LisaTestMetadata]] = None, -) -> List[TestCaseRuntimeData]: +) -> List[LisaTestRuntimeData]: """ based on filters to select test cases. If filters are None, return all cases. """ @@ -26,7 +26,7 @@ def select_testcases( else: full_list = lisa.testsuite.lisa_tests_metadata if filters: - selected: Dict[str, TestCaseRuntimeData] = dict() + selected: Dict[str, LisaTestRuntimeData] = dict() force_included: Set[str] = set() force_excluded: Set[str] = set() for filter in filters: @@ -36,7 +36,7 @@ def select_testcases( ) else: log.debug(f"skip disabled rule: {filter}") - results: List[TestCaseRuntimeData] = [] + results: List[LisaTestRuntimeData] = [] for case in selected.values(): times = case.times for index in range(times): @@ -47,14 +47,14 @@ def select_testcases( else: results = [] for metadata in full_list.values(): - results.append(TestCaseRuntimeData(metadata)) + results.append(LisaTestRuntimeData(metadata)) log.info(f"selected cases count: {len(results)}") return results def _match_string( - case: Union[TestCaseRuntimeData, LisaTestMetadata], + case: Union[LisaTestRuntimeData, LisaTestMetadata], pattern: Pattern[str], attr_name: str, ) -> bool: @@ -64,7 +64,7 @@ def _match_string( def _match_priority( - case: Union[TestCaseRuntimeData, LisaTestMetadata], pattern: Union[int, List[int]] + case: Union[LisaTestRuntimeData, LisaTestMetadata], pattern: Union[int, List[int]] ) -> bool: priority = case.priority is_matched: bool = False @@ -76,7 +76,7 @@ def _match_priority( def _match_tags( - case: Union[TestCaseRuntimeData, LisaTestMetadata], + case: Union[LisaTestRuntimeData, LisaTestMetadata], criteria_tags: Union[str, List[str]], ) -> bool: case_tags = case.tags @@ -89,23 +89,23 @@ def _match_tags( def _match_cases( - candidates: Mapping[str, Union[TestCaseRuntimeData, LisaTestMetadata]], - patterns: List[Callable[[Union[TestCaseRuntimeData, LisaTestMetadata]], bool]], -) -> Dict[str, TestCaseRuntimeData]: - changed_cases: Dict[str, TestCaseRuntimeData] = dict() + candidates: Mapping[str, Union[LisaTestRuntimeData, LisaTestMetadata]], + patterns: List[Callable[[Union[LisaTestRuntimeData, LisaTestMetadata]], bool]], +) -> Dict[str, LisaTestRuntimeData]: + changed_cases: Dict[str, LisaTestRuntimeData] = dict() for candidate_name in candidates: candidate = candidates[candidate_name] is_matched = all(pattern(candidate) for pattern in patterns) if is_matched: if isinstance(candidate, LisaTestMetadata): - candidate = TestCaseRuntimeData(candidate) + candidate = LisaTestRuntimeData(candidate) changed_cases[candidate_name] = candidate return changed_cases def _apply_settings( - applied_case_data: TestCaseRuntimeData, case_runbook: schema.TestCase, action: str + applied_case_data: LisaTestRuntimeData, case_runbook: schema.TestCase, action: str ) -> None: fields = [ constants.TESTCASE_TIMES, @@ -142,16 +142,16 @@ def _force_check( def _apply_filter( # noqa: C901 case_runbook: schema.TestCase, - current_selected: Dict[str, TestCaseRuntimeData], + current_selected: Dict[str, LisaTestRuntimeData], force_included: Set[str], force_excluded: Set[str], full_list: Dict[str, LisaTestMetadata], -) -> Dict[str, TestCaseRuntimeData]: +) -> Dict[str, LisaTestRuntimeData]: # TODO: Reduce this function's complexity and remove the disabled warning. log = _get_logger() # initialize criterias - patterns: List[Callable[[Union[TestCaseRuntimeData, LisaTestMetadata]], bool]] = [] + patterns: List[Callable[[Union[LisaTestRuntimeData, LisaTestMetadata]], bool]] = [] criterias_runbook = case_runbook.criteria assert criterias_runbook, "test case criteria cannot be None" criterias_runbook_dict = criterias_runbook.__dict__ @@ -182,7 +182,7 @@ def _apply_filter( # noqa: C901 raise LisaException(f"unknown criteria key: {runbook_key}") # match by select Action: - changed_cases: Dict[str, TestCaseRuntimeData] = dict() + changed_cases: Dict[str, LisaTestRuntimeData] = dict() is_force = case_runbook.select_action in [ constants.TESTCASE_SELECT_ACTION_FORCE_INCLUDE, constants.TESTCASE_SELECT_ACTION_FORCE_EXCLUDE, diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 9a0ce5f772..c07b2368a5 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -44,7 +44,7 @@ class TestResultMessage(notifier.MessageBase): # reuse it, especially if we’ve been inspired by it so much. @dataclass class TestResult: - runtime_data: TestCaseRuntimeData + runtime_data: LisaTestRuntimeData status: TestStatus = TestStatus.NOTRUN elapsed: float = 0 message: str = "" @@ -241,7 +241,7 @@ def set_case(self, case: LisaTestCaseMetadata) -> None: self.case: LisaTestCaseMetadata = case -class TestCaseRuntimeData: +class LisaTestRuntimeData: def __init__(self, metadata: LisaTestMetadata): self.metadata = metadata @@ -261,8 +261,8 @@ def __getattr__(self, key: str) -> Any: assert self.metadata return getattr(self.metadata, key) - def clone(self) -> TestCaseRuntimeData: - cloned = TestCaseRuntimeData(self.metadata) + def clone(self) -> LisaTestRuntimeData: + cloned = LisaTestRuntimeData(self.metadata) fields = [ constants.TESTCASE_SELECT_ACTION, constants.TESTCASE_TIMES, From cdb99b2efb6faa6ab81dc314a6fcb9fec0879550 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 17:07:45 -0700 Subject: [PATCH 27/27] Simplify `LisaTestCaseMetadata` and `LisaTestRuntimeData` By using @dataclass. Also, this actually a case for multiple inheritance instead of abuse of `__getattr__`. --- lisa/testsuite.py | 57 +++++++++++++++++-------------------------- lisa/util/__init__.py | 2 ++ 2 files changed, 25 insertions(+), 34 deletions(-) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index c07b2368a5..8097214736 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -1,7 +1,7 @@ from __future__ import annotations import unittest -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import Enum from functools import wraps from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union @@ -152,32 +152,19 @@ def simple_requirement( DEFAULT_REQUIREMENT = simple_requirement() +@dataclass class LisaTestCaseMetadata: """This decorator supplies metadata for each case of tests.""" - def __init__( - self, - area: str, - category: str, - description: str, - tags: List[str], - name: str = "", - requirement: TestCaseRequirement = DEFAULT_REQUIREMENT, - ) -> None: - # TODO: The name should be the test case’s class name. - self.name = name - self.cases: List[LisaTestMetadata] = [] - - self.area = area - self.category = category - if tags: - self.tags = tags - else: - self.tags = [] - # TODO: Each test case description should be from its - # docstring, not here. - self.description = description - self.requirement = requirement + area: str + category: str + # TODO: Each description should be from the docstring instead. + description: str + tags: List[str] = field(default_factory=list) + # TODO: The name should be the test case’s class name. + name: str = "" + requirement: TestCaseRequirement = DEFAULT_REQUIREMENT + cases: List[LisaTestMetadata] = field(default_factory=list) def __call__(self, test_class: Type[LisaTestCase]) -> Callable[..., object]: self.test_class = test_class @@ -237,21 +224,23 @@ def wrapper(*args: object) -> None: return wrapper + # TODO: Currently set by `_add_test_to_case`, want to set + # automatically here. def set_case(self, case: LisaTestCaseMetadata) -> None: self.case: LisaTestCaseMetadata = case +@dataclass class LisaTestRuntimeData: - def __init__(self, metadata: LisaTestMetadata): - self.metadata = metadata - - # all runtime setting fields - self.select_action: str = "" - self.times: int = 1 - self.retry: int = 0 - self.use_new_environment: bool = False - self.ignore_failure: bool = False - self.environment_name: str = "" + """This adds runtime data to tests.""" + + metadata: LisaTestMetadata + select_action: str = "" + times: int = 1 + retry: int = 0 + use_new_environment: bool = False + ignore_failure: bool = False + environment_name: str = "" # TODO: This implies that we should actually subclass (inherit) # `TestCaseMetaData`, with some way of instantiating this class diff --git a/lisa/util/__init__.py b/lisa/util/__init__.py index b67d1c7212..861b477043 100644 --- a/lisa/util/__init__.py +++ b/lisa/util/__init__.py @@ -67,6 +67,7 @@ def get_public_key_data(private_key_file_path: str) -> str: return public_key_data +# TODO: Why do we need this? def fields_to_dict(src: Any, fields: Iterable[str]) -> Dict[str, Any]: """ copy field values form src to dest, if it's not None @@ -82,6 +83,7 @@ def fields_to_dict(src: Any, fields: Iterable[str]) -> Dict[str, Any]: return result +# TODO: Why do we need this? def set_filtered_fields(src: Any, dest: Any, fields: List[str]) -> None: """ copy field values form src to dest, if it's not None