From 7dd71a7825bf1e48b045543d3cc08d61c9e7363c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 30 Sep 2020 18:21:25 -0700 Subject: [PATCH 01/16] Run Python in developer mode The `-X dev` option reveals useful information such as leaked system resources. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 11f5b1368b..f6a4a69924 100644 --- a/Makefile +++ b/Makefile @@ -8,11 +8,11 @@ setup: # Run LISAv3 run: - @poetry run python lisa/main.py --debug + @poetry run python -X dev lisa/main.py --debug # Run unit tests test: - @poetry run python -m unittest discover lisa + @poetry run python -X dev -m unittest discover lisa # Generate coverage report (slow, reruns LISAv3 and tests) coverage: From 97a6546167383c1293aeb642c678d95254e04dc8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:30:01 -0700 Subject: [PATCH 02/16] Add `test` to `make all` Forgot this. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f6a4a69924..d5dbe13404 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # This Makefile simply automates all our tasks. Its use is optional. -all: setup run check +all: setup run test check # Install Python packages setup: @@ -12,7 +12,7 @@ run: # Run unit tests test: - @poetry run python -X dev -m unittest discover lisa + @poetry run python -X dev -m unittest discover -v lisa # Generate coverage report (slow, reruns LISAv3 and tests) coverage: From 9750c6b06f45c79d93dc17995984ad55e7bb7474 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 30 Sep 2020 18:22:45 -0700 Subject: [PATCH 03/16] Use asyncio correctly At minimum this converts the usage to the documented approach where this is exactly one asyncio event loop (started in main) and everything that currently is written async actually becomes a coroutine. The unit tests which use coroutines now use `IsolatedAsyncioTestCase` instead of just `TestCase`, which ensures they continue to work. --- lisa/commands.py | 10 ++++----- lisa/main.py | 8 ++++--- lisa/tests/test_lisarunner.py | 41 +++++++++++++++++------------------ lisa/tests/test_testsuite.py | 37 +++++++++++++++---------------- 4 files changed, 47 insertions(+), 49 deletions(-) diff --git a/lisa/commands.py b/lisa/commands.py index dd4a2dd4c2..ad33301162 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -1,4 +1,3 @@ -import asyncio import functools from argparse import Namespace from typing import Iterable, Optional, cast @@ -14,15 +13,14 @@ _get_init_logger = functools.partial(get_logger, "init") -def run(args: Namespace) -> int: +async def run(args: Namespace) -> int: runbook = load_runbook(args) if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) try: runner = LisaRunner(runbook) - awaitable = runner.start() - asyncio.run(awaitable) + await runner.start() finally: notifier.finalize() @@ -30,12 +28,12 @@ def run(args: Namespace) -> int: # check runbook -def check(args: Namespace) -> int: +async def check(args: Namespace) -> int: load_runbook(args) return 0 -def list_start(args: Namespace) -> int: +async def list_start(args: Namespace) -> int: runbook = load_runbook(args) list_all = cast(Optional[bool], args.list_all) log = _get_init_logger("list") diff --git a/lisa/main.py b/lisa/main.py index 3088615688..e8b63fd646 100644 --- a/lisa/main.py +++ b/lisa/main.py @@ -1,3 +1,4 @@ +import asyncio import sys import traceback from datetime import datetime @@ -23,7 +24,7 @@ def create_run_path(root_path: Path) -> Path: return run_path -def main() -> int: +async def main() -> int: total_timer = create_timer() log = get_logger() exit_code: int = 0 @@ -57,7 +58,7 @@ def main() -> int: log.debug(f"command line args: {sys.argv}") log.info(f"run local path: {runtime_root}") - exit_code = args.func(args) + exit_code = await args.func(args) assert isinstance(exit_code, int), f"actual: {type(exit_code)}" finally: log.info(f"completed in {total_timer}") @@ -68,7 +69,8 @@ def main() -> int: if __name__ == "__main__": exit_code = 0 try: - exit_code = main() + # TODO: Turn off debugging when we ship this. + exit_code = asyncio.run(main(), debug=True) except Exception as exception: exit_code = -1 log = get_logger() diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_lisarunner.py index 8182138454..13a881a0e7 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_lisarunner.py @@ -1,6 +1,5 @@ -import asyncio from typing import List, Optional -from unittest.case import TestCase +from unittest import IsolatedAsyncioTestCase from lisa import schema from lisa.environment import load_environments @@ -38,7 +37,7 @@ def generate_lisarunner( return runner -class LisaRunnerTestCase(TestCase): +class LisaRunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: cleanup_cases_metadata() test_platform.return_prepared = True @@ -205,7 +204,7 @@ def test_merge_req_platform_type_checked(self) -> None: test_results=test_results, ) - def test_fit_a_predefined_env(self) -> None: + async def test_fit_a_predefined_env(self) -> None: # predefined env can run case in below condition. # 1. with predefined env of 1 simple node, so ut2 don't need a new env # 2. ut3 need 8 cores, and predefined env target to meet all core requirement, @@ -213,7 +212,7 @@ def test_fit_a_predefined_env(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=["customized_0", "generated_1", "generated_2"], expected_deployed_envs=["customized_0", "generated_1"], @@ -226,13 +225,13 @@ def test_fit_a_predefined_env(self) -> None: test_results=runner._latest_test_results, ) - def test_fit_a_bigger_env(self) -> None: + async def test_fit_a_bigger_env(self) -> None: # similar with test_fit_a_predefined_env, but predefined 2 nodes, # it doesn't equal to any case req, but reusable for all cases. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -250,13 +249,13 @@ def test_fit_a_bigger_env(self) -> None: test_results=runner._latest_test_results, ) - def test_case_new_env_run_only_1_needed(self) -> None: + async def test_case_new_env_run_only_1_needed(self) -> None: # same predefined env as test_fit_a_bigger_env, # but all case want to run on a new env generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook, case_use_new_env=True) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -274,13 +273,13 @@ def test_case_new_env_run_only_1_needed(self) -> None: test_results=runner._latest_test_results, ) - def test_no_needed_env(self) -> None: + async def test_no_needed_env(self) -> None: # two 1 node env predefined, but only customized_0 go to deploy # no cases assigned to customized_1, as fit cases run on customized_0 already generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -298,7 +297,7 @@ def test_no_needed_env(self) -> None: test_results=runner._latest_test_results, ) - def test_deploy_no_more_resource(self) -> None: + async def test_deploy_no_more_resource(self) -> None: # platform may see no more resource, like no azure quota. # cases skipped due to this. # In future, will add retry on wait more resource. @@ -306,7 +305,7 @@ def test_deploy_no_more_resource(self) -> None: generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ @@ -334,13 +333,13 @@ def test_deploy_no_more_resource(self) -> None: test_results=runner._latest_test_results, ) - def test_skipped_on_suite_failure(self) -> None: + async def test_skipped_on_suite_failure(self) -> None: # first two cases skipped due to test suite setup failed test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -363,13 +362,13 @@ def test_skipped_on_suite_failure(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_no_prepared_env(self) -> None: + async def test_env_skipped_no_prepared_env(self) -> None: # test env not prepared, so test cases cannot find an env to run test_platform.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -392,14 +391,14 @@ def test_env_skipped_no_prepared_env(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_not_ready(self) -> None: + async def test_env_skipped_not_ready(self) -> None: # env prepared, but not deployed to ready. # so no cases can run test_platform.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() self.verify_env_results( expected_prepared=[ "customized_0", @@ -427,12 +426,12 @@ def test_env_skipped_not_ready(self) -> None: test_results=runner._latest_test_results, ) - def test_env_skipped_no_case(self) -> None: + async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) runner = generate_lisarunner(env_runbook) - asyncio.run(runner.start()) + await runner.start() # still prepare predefined, but not deploy self.verify_env_results( expected_prepared=["customized_0"], diff --git a/lisa/tests/test_testsuite.py b/lisa/tests/test_testsuite.py index f5e79e5760..183d33816f 100644 --- a/lisa/tests/test_testsuite.py +++ b/lisa/tests/test_testsuite.py @@ -1,6 +1,5 @@ -import asyncio from typing import Any, List -from unittest import TestCase +from unittest import IsolatedAsyncioTestCase, TestCase from lisa import schema from lisa.environment import load_environments @@ -134,7 +133,7 @@ def select_and_check( return selected -class TestSuiteTestCase(TestCase): +class TestSuiteTestCase(IsolatedAsyncioTestCase): def generate_suite_instance(self) -> MockTestSuite: case_results = generate_cases_result() case_results = case_results[:2] @@ -196,42 +195,42 @@ def test_test_result_canrun(self) -> None: else: self.assertEqual(False, result.can_run) - def test_skip_before_suite_failed(self) -> None: + async def test_skip_before_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_suite: failed", result.message) - def test_pass_after_suite_failed(self) -> None: + async def test_pass_after_suite_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_suite=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_before_case_failed(self) -> None: + async def test_skip_before_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_before_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.SKIPPED, result.status) self.assertEqual("before_case: failed", result.message) - def test_pass_after_case_failed(self) -> None: + async def test_pass_after_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_on_after_case=True) - asyncio.run(test_suite.start()) + await test_suite.start() for result in test_suite.case_results: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_skip_case_failed(self) -> None: + async def test_skip_case_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) - asyncio.run(test_suite.start()) + await test_suite.start() result = test_suite.case_results[0] self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) @@ -239,36 +238,36 @@ def test_skip_case_failed(self) -> None: self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_passed(self) -> None: + async def test_retry_passed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=1) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_retry_notenough_failed(self) -> None: + async def test_retry_notenough_failed(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.retry = 1 - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.FAILED, result.status) self.assertEqual("failed: mock_ut1 failed", result.message) result = test_suite.case_results[1] self.assertEqual(TestStatus.PASSED, result.status) self.assertEqual("", result.message) - def test_attempt_ignore_failure(self) -> None: + async def test_attempt_ignore_failure(self) -> None: test_suite = self.generate_suite_instance() test_suite.set_fail_phase(fail_case_count=2) result = test_suite.case_results[0] result.runtime_data.ignore_failure = True - asyncio.run(test_suite.start()) + await test_suite.start() self.assertEqual(TestStatus.ATTEMPTED, result.status) self.assertEqual("mock_ut1 failed", result.message) result = test_suite.case_results[1] From 67f74728105cedbedd34d94ca9b645db0a7c969b Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:09:27 -0700 Subject: [PATCH 04/16] Add info on Async IO to contributing guidelines --- CONTRIBUTING.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 41bd025527..55e28147c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -252,6 +252,18 @@ Python world. If you make it through even some of these guides, you will be well on your way to being a “Pythonista” (a Python developer) writing “Pythonic” (canonically correct Python) code left and right. +### Async IO + +With Python 3.4, the Async IO pattern found in languages such as C# and Go is +available through the keywords `async` and `await`, along with the Python module +`asyncio`. Please read [Async IO in Python: A Complete +Walkthrough](https://realpython.com/async-io-python/) to understand at a high +level how asynchronous programming works. As of Python 3.7, One major “gotcha” +is that `asyncio.run(...)` should be used [exactly once in +`main`](https://docs.python.org/3/library/asyncio-task.html), it starts the +event loop. Everything else should be a coroutine or task which the event loop +schedules. + ## Future Sections Just a collection of reminders for the author to expand on later. From 8d3251358544f0e85a260b91e405705622e8564b Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:40:05 -0700 Subject: [PATCH 05/16] Clean up `parse_args` --- lisa/parameter_parser/argparser.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lisa/parameter_parser/argparser.py b/lisa/parameter_parser/argparser.py index 5f389e1c65..7a79cd4358 100644 --- a/lisa/parameter_parser/argparser.py +++ b/lisa/parameter_parser/argparser.py @@ -36,33 +36,34 @@ def support_variable(parser: ArgumentParser) -> None: def parse_args() -> Namespace: - # parse args run function. + """This wraps Python's 'ArgumentParser' to setup our CLI.""" parser = ArgumentParser() support_debug(parser) support_runbook(parser, required=False) support_variable(parser) + # Default to ‘run’ when no subcommand is given. + parser.set_defaults(func=commands.run) + subparsers = parser.add_subparsers(dest="cmd", required=False) + + # Entry point for ‘run’. run_parser = subparsers.add_parser("run") run_parser.set_defaults(func=commands.run) - support_runbook(run_parser) - support_variable(run_parser) + # Entry point for ‘list-start’. list_parser = subparsers.add_parser(constants.LIST) list_parser.set_defaults(func=commands.list_start) list_parser.add_argument("--type", "-t", dest="type", choices=["case"]) list_parser.add_argument("--all", "-a", dest="list_all", action="store_true") - support_runbook(list_parser) - support_variable(list_parser) + # Entry point for ‘check’. check_parser = subparsers.add_parser("check") check_parser.set_defaults(func=commands.check) - support_runbook(check_parser) - support_variable(check_parser) - - parser.set_defaults(func=commands.run) for sub_parser in subparsers.choices.values(): + support_runbook(sub_parser) + support_variable(sub_parser) support_debug(sub_parser) return parser.parse_args() From 996bc804d2463ab47ab4e0ed9f67c91cc1cce110 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 15:48:37 -0700 Subject: [PATCH 06/16] Clean up `load_runbook` Instead of aliasing this function, we should just rename it entirely. Instead of delegating the parsing of an `argparse.Namespace` object, we just setup the `runbook` argument to be a `Path` instead of a string, and have `load_runbook` accept what it needs instead of digging it out of the `Namespace`. --- CONTRIBUTING.md | 2 ++ lisa/commands.py | 8 ++++---- lisa/parameter_parser/argparser.py | 11 ++++++----- lisa/parameter_parser/runbook.py | 13 ++++++------- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 55e28147c7..07e54cc8a9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,8 @@ short summary of the most important parts: * Use one leading underscore only for non-public methods and instance variables, such as `_data`. * Constants should be `CAPITALIZED_SNAKE_CASE`. +* When importing a function, try to avoid renaming it with `import as` because + it introduces cognitive overhead to track yet another name. When in doubt, adhere to existing conventions, or check the style guide. diff --git a/lisa/commands.py b/lisa/commands.py index ad33301162..c718716623 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -4,7 +4,7 @@ from lisa import notifier from lisa.lisarunner import LisaRunner -from lisa.parameter_parser.runbook import load as load_runbook +from lisa.parameter_parser.runbook import load_runbook from lisa.testselector import select_testcases from lisa.testsuite import TestCaseRuntimeData from lisa.util import LisaException, constants @@ -14,7 +14,7 @@ async def run(args: Namespace) -> int: - runbook = load_runbook(args) + runbook = load_runbook(args.runbook, args.variables) if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) @@ -29,12 +29,12 @@ async def run(args: Namespace) -> int: # check runbook async def check(args: Namespace) -> int: - load_runbook(args) + load_runbook(args.runbook, args.variables) return 0 async def list_start(args: Namespace) -> int: - runbook = load_runbook(args) + runbook = load_runbook(args.runbook, args.variables) list_all = cast(Optional[bool], args.list_all) log = _get_init_logger("list") if args.type == constants.LIST_CASE: diff --git a/lisa/parameter_parser/argparser.py b/lisa/parameter_parser/argparser.py index 7a79cd4358..5478fc23a4 100644 --- a/lisa/parameter_parser/argparser.py +++ b/lisa/parameter_parser/argparser.py @@ -1,4 +1,5 @@ from argparse import ArgumentParser, Namespace +from pathlib import Path from lisa import commands from lisa.util import constants @@ -8,10 +9,10 @@ def support_runbook(parser: ArgumentParser, required: bool = True) -> None: parser.add_argument( "--runbook", "-r", + type=Path, required=required, - dest="runbook", - help="runbook of this run", - default="examples/runbook/hello_world.yml", + help="Path to the runbook", + default=Path("examples/runbook/hello_world.yml").absolute(), ) @@ -21,7 +22,7 @@ def support_debug(parser: ArgumentParser) -> None: "-d", dest="debug", action="store_true", - help="set log level to debug", + help="Set log level to debug", ) @@ -31,7 +32,7 @@ def support_variable(parser: ArgumentParser) -> None: "-v", dest="variables", action="append", - help="define variable from command line. format is NAME:VALUE", + help="Define one or more variables with 'NAME:VALUE'", ) diff --git a/lisa/parameter_parser/runbook.py b/lisa/parameter_parser/runbook.py index fc9f2cc016..8c357f0b73 100644 --- a/lisa/parameter_parser/runbook.py +++ b/lisa/parameter_parser/runbook.py @@ -1,7 +1,6 @@ -from argparse import Namespace from functools import partial from pathlib import Path, PurePath -from typing import Any, Dict, Optional, cast +from typing import Any, Dict, List, Optional, cast import yaml from marshmallow import Schema @@ -58,13 +57,13 @@ def validate_data(data: Any) -> schema.Runbook: return runbook -def load(args: Namespace) -> schema.Runbook: +def load_runbook(path: Path, user_variables: Optional[List[str]]) -> schema.Runbook: + """Loads a runbook given a user-supplied path and set of variables.""" # make sure extension in lisa is loaded base_module_path = Path(__file__).parent.parent import_module(base_module_path, logDetails=False) # merge all parameters - path = Path(args.runbook).absolute() data = _load_data(path) constants.RUNBOOK_PATH = path.parent @@ -73,14 +72,14 @@ def load(args: Namespace) -> schema.Runbook: extends_runbook = schema.Extension.schema().load( # type:ignore data[constants.EXTENSION] ) - _load_extends(path.parent, extends_runbook) + _load_extends(constants.RUNBOOK_PATH, extends_runbook) # load arg variables variables: Dict[str, Any] = dict() + # TODO: This is all side-effect driven and needs to be fixed. load_from_runbook(data, variables) load_from_env(variables) - if hasattr(args, "variables"): - load_from_pairs(args.variables, variables) + load_from_pairs(user_variables, variables) # replace variables: data = replace_variables(data, variables) From 1067920e157cc39913e18b42820405c1063564d8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 24 Sep 2020 15:55:44 -0700 Subject: [PATCH 07/16] Turn `status` into a property of `Action` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And use single underscore to avoid invoking name mangling. If the name is reused in a subclass, we likely do not intend to have a duplicate variable, but are intending to use it. For variables we expect to be used, we should use Python’s built-in `property` class/decorator. Co-Authored-By: Andy Schwartzmeyer --- CONTRIBUTING.md | 7 +++++-- lisa/action.py | 17 ++++++++++------- lisa/lisarunner.py | 4 ++-- lisa/testsuite.py | 4 ++-- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 07e54cc8a9..353e2c6af3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -19,8 +19,11 @@ short summary of the most important parts: such as `id_`. * Always use `self` for the first argument to instance methods. * Always use `cls` for the first argument to class methods. -* Use one leading underscore only for non-public methods and instance variables, - such as `_data`. +* One leading underscore like `_data` is for non-public methods and instance + variables. And it can be used by sub-classes. If it won't be used in + sub-classes, use like `__data`. +* If there is a pair of `get_x` and `set_x` methods, they should instead be a + proper property, which is easy to do with the built-in `@property` decorator. * Constants should be `CAPITALIZED_SNAKE_CASE`. * When importing a function, try to avoid renaming it with `import as` because it introduces cognitive overhead to track yet another name. diff --git a/lisa/action.py b/lisa/action.py index e6a5b68d75..105b9f8901 100644 --- a/lisa/action.py +++ b/lisa/action.py @@ -47,7 +47,7 @@ def __init__(self) -> None: @abstractmethod async def start(self) -> None: self.__is_started = True - self.set_status(ActionStatus.RUNNING) + self.status = ActionStatus.RUNNING @abstractmethod async def stop(self) -> None: @@ -57,25 +57,28 @@ async def stop(self) -> None: async def close(self) -> None: self.validate_started() - def get_status(self) -> ActionStatus: + @property + def status(self) -> ActionStatus: + """The Action's current state, for example, 'UNINITIALIZED'.""" return self.__status - def set_status(self, status: ActionStatus) -> None: - if self.__status != status: + @status.setter + def status(self, value: ActionStatus) -> None: + if self.__status != value: self.log.debug( f"{self.name} status changed from {self.__status.name} " - f"to {status.name} with {self.__timer}" + f"to {value.name} with {self.__timer}" ) self.__total += self.__timer.elapsed() message = ActionMessage( elapsed=self.__timer.elapsed(), sub_type=self.name, - status=status, + status=value, total_elapsed=self.__total, ) notifier.notify(message=message) self.__timer = create_timer() - self.__status = status + self.__status = value def validate_started(self) -> None: if not self.__is_started: diff --git a/lisa/lisarunner.py b/lisa/lisarunner.py index 378983853b..6be2c61148 100644 --- a/lisa/lisarunner.py +++ b/lisa/lisarunner.py @@ -27,7 +27,7 @@ def __init__(self, runbook: schema.Runbook) -> None: async def start(self) -> None: # noqa: C901 # TODO: Reduce this function's complexity and remove the disabled warning. await super().start() - self.set_status(ActionStatus.RUNNING) + self.status = ActionStatus.RUNNING # select test cases selected_test_cases = select_testcases(self._runbook.testcase) @@ -169,7 +169,7 @@ async def start(self) -> None: # noqa: C901 continue self._log.info(f" {key.name:<9}: {count}") - self.set_status(ActionStatus.SUCCESS) + self.status = ActionStatus.SUCCESS # pass failed count to exit code self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index c06e708138..8a31e3964d 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -387,7 +387,7 @@ async def start(self) -> None: # noqa: C901 if self._should_stop: self.log.info("received stop message, stop run") - self.set_status(ActionStatus.STOPPED) + self.status = ActionStatus.STOPPED break self.log = suite_log @@ -400,7 +400,7 @@ async def start(self) -> None: # noqa: C901 self.log.debug(f"after_suite end with {timer}") async def stop(self) -> None: - self.set_status(ActionStatus.STOPPING) + self.status = ActionStatus.STOPPING self._should_stop = True async def close(self) -> None: From 67afb5524d4eab2ba672cc5aa8f8b200e6834491 Mon Sep 17 00:00:00 2001 From: Chi Song <27178119+squirrelsc@users.noreply.github.com> Date: Mon, 26 Oct 2020 07:52:06 +0800 Subject: [PATCH 08/16] remove module variables in test_platform Module variables are not recommend, and it brings problems in ut. Refine code to avoid use it. --- lisa/lisarunner.py | 1 + lisa/tests/test_lisarunner.py | 57 ++++++++++++++++++---------- lisa/tests/test_platform.py | 71 ++++++++++++++++++++--------------- 3 files changed, 79 insertions(+), 50 deletions(-) diff --git a/lisa/lisarunner.py b/lisa/lisarunner.py index 6be2c61148..bd39e54570 100644 --- a/lisa/lisarunner.py +++ b/lisa/lisarunner.py @@ -175,6 +175,7 @@ async def start(self) -> None: # noqa: C901 self.exit_code = result_count_dict.get(TestStatus.FAILED, 0) # for UT testability + self._latest_platform = platform self._latest_test_results = selected_test_results async def stop(self) -> None: diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_lisarunner.py index 13a881a0e7..ac06406883 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_lisarunner.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import List, Optional, cast from unittest import IsolatedAsyncioTestCase from lisa import schema @@ -6,7 +6,6 @@ from lisa.lisarunner import LisaRunner from lisa.tests import test_platform, test_testsuite from lisa.tests.test_environment import generate_runbook as generate_env_runbook -from lisa.tests.test_platform import deleted_envs, deployed_envs, prepared_envs from lisa.tests.test_testsuite import ( cleanup_cases_metadata, generate_cases_metadata, @@ -17,12 +16,19 @@ def generate_lisarunner( - env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False + env_runbook: Optional[schema.EnvironmentRoot] = None, + case_use_new_env: bool = False, + platform_schema: Optional[test_platform.MockPlatformSchema] = None, ) -> LisaRunner: + platform_runbook = schema.Platform( + type=constants.PLATFORM_MOCK, admin_password="not use it" + ) + if platform_schema: + platform_runbook.extended_schemas = { + constants.PLATFORM_MOCK: platform_schema.to_dict() # type:ignore + } runbook = schema.Runbook( - platform=[ - schema.Platform(type=constants.PLATFORM_MOCK, admin_password="not use it") - ], + platform=[platform_runbook], testcase=[ schema.TestCase( criteria=schema.Criteria(priority=[0, 1, 2]), @@ -40,10 +46,6 @@ def generate_lisarunner( class LisaRunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: cleanup_cases_metadata() - test_platform.return_prepared = True - test_platform.deploy_is_ready = True - test_platform.deploy_success = True - test_platform.wait_more_resource_error = False def test_merge_req_create_on_new(self) -> None: # if no predefined envs, can generate from requirement @@ -217,6 +219,7 @@ async def test_fit_a_predefined_env(self) -> None: expected_prepared=["customized_0", "generated_1", "generated_2"], expected_deployed_envs=["customized_0", "generated_1"], expected_deleted_envs=["customized_0", "generated_1"], + runner=runner, ) self.verify_test_results( expected_envs=["generated_1", "customized_0", "customized_0"], @@ -241,6 +244,7 @@ async def test_fit_a_bigger_env(self) -> None: ], expected_deployed_envs=["customized_0"], expected_deleted_envs=["customized_0"], + runner=runner, ) self.verify_test_results( expected_envs=["customized_0", "customized_0", "customized_0"], @@ -265,6 +269,7 @@ async def test_case_new_env_run_only_1_needed(self) -> None: ], expected_deployed_envs=["customized_0", "generated_1", "generated_3"], expected_deleted_envs=["customized_0", "generated_1", "generated_3"], + runner=runner, ) self.verify_test_results( expected_envs=["customized_0", "generated_1", "generated_3"], @@ -289,6 +294,7 @@ async def test_no_needed_env(self) -> None: ], expected_deployed_envs=["customized_0", "generated_2"], expected_deleted_envs=["customized_0", "generated_2"], + runner=runner, ) self.verify_test_results( expected_envs=["generated_2", "customized_0", "customized_0"], @@ -301,10 +307,11 @@ async def test_deploy_no_more_resource(self) -> None: # platform may see no more resource, like no azure quota. # cases skipped due to this. # In future, will add retry on wait more resource. - test_platform.wait_more_resource_error = True + platform_schema = test_platform.MockPlatformSchema() + platform_schema.wait_more_resource_error = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) - runner = generate_lisarunner(env_runbook) + runner = generate_lisarunner(env_runbook, platform_schema=platform_schema) await runner.start() self.verify_env_results( @@ -316,6 +323,7 @@ async def test_deploy_no_more_resource(self) -> None: ], expected_deployed_envs=[], expected_deleted_envs=[], + runner=runner, ) before_suite_failed = "no available environment" self.verify_test_results( @@ -349,6 +357,7 @@ async def test_skipped_on_suite_failure(self) -> None: ], expected_deployed_envs=["customized_0"], expected_deleted_envs=["customized_0"], + runner=runner, ) before_suite_failed = "before_suite: failed" self.verify_test_results( @@ -364,10 +373,11 @@ async def test_skipped_on_suite_failure(self) -> None: async def test_env_skipped_no_prepared_env(self) -> None: # test env not prepared, so test cases cannot find an env to run - test_platform.return_prepared = False + platform_schema = test_platform.MockPlatformSchema() + platform_schema.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_lisarunner(env_runbook, platform_schema=platform_schema) await runner.start() self.verify_env_results( expected_prepared=[ @@ -378,6 +388,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: ], expected_deployed_envs=[], expected_deleted_envs=[], + runner=runner, ) no_avaiable_env = "no available environment" self.verify_test_results( @@ -394,10 +405,11 @@ async def test_env_skipped_no_prepared_env(self) -> None: async def test_env_skipped_not_ready(self) -> None: # env prepared, but not deployed to ready. # so no cases can run - test_platform.deploy_is_ready = False + platform_schema = test_platform.MockPlatformSchema() + platform_schema.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_lisarunner(env_runbook, platform_schema=platform_schema) await runner.start() self.verify_env_results( expected_prepared=[ @@ -413,6 +425,7 @@ async def test_env_skipped_not_ready(self) -> None: "generated_3", ], expected_deleted_envs=[], + runner=runner, ) no_avaiable_env = "no available environment" self.verify_test_results( @@ -437,6 +450,7 @@ async def test_env_skipped_no_case(self) -> None: expected_prepared=["customized_0"], expected_deployed_envs=[], expected_deleted_envs=[], + runner=runner, ) self.verify_test_results( expected_envs=[], @@ -452,6 +466,7 @@ def verify_test_results( expected_message: List[str], test_results: List[TestResult], ) -> None: + self.assertListEqual( expected_envs, [ @@ -478,16 +493,20 @@ def verify_env_results( expected_prepared: List[str], expected_deployed_envs: List[str], expected_deleted_envs: List[str], + runner: LisaRunner, ) -> None: + platform = cast(test_platform.MockPlatform, runner._latest_platform) + platform_test_data = platform.test_data + self.assertListEqual( expected_prepared, - [x for x in prepared_envs], + [x for x in platform_test_data.prepared_envs], ) self.assertListEqual( expected_deployed_envs, - [x for x in deployed_envs], + [x for x in platform_test_data.deployed_envs], ) self.assertListEqual( expected_deleted_envs, - [x for x in deleted_envs], + [x for x in platform_test_data.deleted_envs], ) diff --git a/lisa/tests/test_platform.py b/lisa/tests/test_platform.py index f931e04f62..18116e9db8 100644 --- a/lisa/tests/test_platform.py +++ b/lisa/tests/test_platform.py @@ -1,6 +1,9 @@ -from typing import List, Type +from dataclasses import dataclass, field +from typing import Any, List, Type from unittest.case import TestCase +from dataclasses_json import LetterCase, dataclass_json # type: ignore + from lisa import schema from lisa.environment import Environment, Environments, load_environments from lisa.feature import Feature @@ -9,28 +12,28 @@ from lisa.util import LisaException, constants from lisa.util.logger import Logger -# for other UT to set value -return_prepared = True -deploy_success = True -deploy_is_ready = True -wait_more_resource_error = False -prepared_envs: List[str] = [] -deployed_envs: List[str] = [] -deleted_envs: List[str] = [] + +@dataclass +class MockPlatformTestData: + prepared_envs: List[str] = field(default_factory=list) + deployed_envs: List[str] = field(default_factory=list) + deleted_envs: List[str] = field(default_factory=list) + + +@dataclass_json(letter_case=LetterCase.CAMEL) +@dataclass +class MockPlatformSchema: + # for other UT to set value + return_prepared: bool = True + deploy_success: bool = True + deploy_is_ready: bool = True + wait_more_resource_error: bool = False class MockPlatform(Platform): def __init__(self, runbook: schema.Platform) -> None: super().__init__(runbook=runbook) - prepared_envs.clear() - deployed_envs.clear() - deleted_envs.clear() - self.set_test_config( - return_prepared=return_prepared, - deploy_success=deploy_success, - deploy_is_ready=deploy_is_ready, - wait_more_resource_error=wait_more_resource_error, - ) + self.test_data = MockPlatformTestData() @classmethod def type_name(cls) -> str: @@ -47,39 +50,45 @@ def set_test_config( deploy_is_ready: bool = True, wait_more_resource_error: bool = False, ) -> None: - self.return_prepared = return_prepared - self.deploy_success = deploy_success - self.deploy_is_ready = deploy_is_ready - self.wait_more_resource_error = wait_more_resource_error + self.initialize() + self._mock_runbook.return_prepared = return_prepared + self._mock_runbook.deploy_success = deploy_success + self._mock_runbook.deploy_is_ready = deploy_is_ready + self._mock_runbook.wait_more_resource_error = wait_more_resource_error + + def _initialize(self, *args: Any, **kwargs: Any) -> None: + self._mock_runbook: MockPlatformSchema = self._runbook.get_extended_runbook( + MockPlatformSchema, constants.PLATFORM_MOCK + ) def _prepare_environment(self, environment: Environment, log: Logger) -> bool: - prepared_envs.append(environment.name) + self.test_data.prepared_envs.append(environment.name) requirements = environment.runbook.nodes_requirement - if self.return_prepared and requirements: + if self._mock_runbook.return_prepared and requirements: min_capabilities: List[schema.NodeSpace] = [] for node_space in requirements: min_capabilities.append(node_space.generate_min_capability(node_space)) environment.runbook.nodes_requirement = min_capabilities - return self.return_prepared + return self._mock_runbook.return_prepared def _deploy_environment(self, environment: Environment, log: Logger) -> None: - if self.wait_more_resource_error: + if self._mock_runbook.wait_more_resource_error: raise WaitMoreResourceError("wait more resource") - if not self.deploy_success: + if not self._mock_runbook.deploy_success: raise LisaException("mock deploy failed") - if self.return_prepared and environment.runbook.nodes_requirement: + if self._mock_runbook.return_prepared and environment.runbook.nodes_requirement: requirements = environment.runbook.nodes_requirement for node_space in requirements: environment.nodes.from_requirement(node_requirement=node_space) for node in environment.nodes.list(): # prevent real calls node._node_information_hooks.clear() - deployed_envs.append(environment.name) + self.test_data.deployed_envs.append(environment.name) environment._is_initialized = True - environment.is_ready = self.deploy_is_ready + environment.is_ready = self._mock_runbook.deploy_is_ready def _delete_environment(self, environment: Environment, log: Logger) -> None: - deleted_envs.append(environment.name) + self.test_data.deleted_envs.append(environment.name) self.delete_called = True From 8c8a7c9ec067039b95f101ed43d9baee0ede974b Mon Sep 17 00:00:00 2001 From: Chi Song <27178119+squirrelsc@users.noreply.github.com> Date: Mon, 26 Oct 2020 08:57:54 +0800 Subject: [PATCH 09/16] Rename `LisaRunner` to `Runner` since we only have one Co-Authored-By: Andy Schwartzmeyer --- lisa/commands.py | 4 +- lisa/{lisarunner.py => runner.py} | 2 +- .../{test_lisarunner.py => test_runner.py} | 40 +++++++++---------- 3 files changed, 23 insertions(+), 23 deletions(-) rename lisa/{lisarunner.py => runner.py} (99%) rename lisa/tests/{test_lisarunner.py => test_runner.py} (94%) diff --git a/lisa/commands.py b/lisa/commands.py index c718716623..acb09452e4 100644 --- a/lisa/commands.py +++ b/lisa/commands.py @@ -3,8 +3,8 @@ from typing import Iterable, Optional, cast from lisa import notifier -from lisa.lisarunner import LisaRunner from lisa.parameter_parser.runbook import load_runbook +from lisa.runner import Runner from lisa.testselector import select_testcases from lisa.testsuite import TestCaseRuntimeData from lisa.util import LisaException, constants @@ -19,7 +19,7 @@ async def run(args: Namespace) -> int: if runbook.notifier: notifier.initialize(runbooks=runbook.notifier) try: - runner = LisaRunner(runbook) + runner = Runner(runbook) await runner.start() finally: notifier.finalize() diff --git a/lisa/lisarunner.py b/lisa/runner.py similarity index 99% rename from lisa/lisarunner.py rename to lisa/runner.py index bd39e54570..56e2762e71 100644 --- a/lisa/lisarunner.py +++ b/lisa/runner.py @@ -16,7 +16,7 @@ from lisa.util.logger import get_logger -class LisaRunner(Action): +class Runner(Action): def __init__(self, runbook: schema.Runbook) -> None: super().__init__() self.exit_code: int = 0 diff --git a/lisa/tests/test_lisarunner.py b/lisa/tests/test_runner.py similarity index 94% rename from lisa/tests/test_lisarunner.py rename to lisa/tests/test_runner.py index ac06406883..59178ee97e 100644 --- a/lisa/tests/test_lisarunner.py +++ b/lisa/tests/test_runner.py @@ -3,7 +3,7 @@ from lisa import schema from lisa.environment import load_environments -from lisa.lisarunner import LisaRunner +from lisa.runner import Runner from lisa.tests import test_platform, test_testsuite from lisa.tests.test_environment import generate_runbook as generate_env_runbook from lisa.tests.test_testsuite import ( @@ -15,11 +15,11 @@ from lisa.util import constants -def generate_lisarunner( +def generate_runner( env_runbook: Optional[schema.EnvironmentRoot] = None, case_use_new_env: bool = False, platform_schema: Optional[test_platform.MockPlatformSchema] = None, -) -> LisaRunner: +) -> Runner: platform_runbook = schema.Platform( type=constants.PLATFORM_MOCK, admin_password="not use it" ) @@ -38,12 +38,12 @@ def generate_lisarunner( ) if env_runbook: runbook.environment = env_runbook - runner = LisaRunner(runbook) + runner = Runner(runbook) return runner -class LisaRunnerTestCase(IsolatedAsyncioTestCase): +class RunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: cleanup_cases_metadata() @@ -55,7 +55,7 @@ def test_merge_req_create_on_new(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -84,7 +84,7 @@ def test_merge_req_run_not_create_on_equal(self) -> None: [x for x in envs], ) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -112,7 +112,7 @@ def test_merge_req_create_on_use_new(self) -> None: ["customized_0"], [x for x in envs], ) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) test_results = generate_cases_result() for test_result in test_results: test_result.runtime_data.use_new_environment = True @@ -143,7 +143,7 @@ def test_merge_req_not_allow_create(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() runner._merge_test_requirements( test_results=test_results, @@ -177,7 +177,7 @@ def test_merge_req_platform_type_checked(self) -> None: [], [x for x in envs], ) - runner = generate_lisarunner(None) + runner = generate_runner(None) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata @@ -213,7 +213,7 @@ async def test_fit_a_predefined_env(self) -> None: # so it can run any case with core requirements. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=["customized_0", "generated_1", "generated_2"], @@ -233,7 +233,7 @@ async def test_fit_a_bigger_env(self) -> None: # it doesn't equal to any case req, but reusable for all cases. generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=[ @@ -258,7 +258,7 @@ async def test_case_new_env_run_only_1_needed(self) -> None: # but all case want to run on a new env generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook, case_use_new_env=True) + runner = generate_runner(env_runbook, case_use_new_env=True) await runner.start() self.verify_env_results( expected_prepared=[ @@ -283,7 +283,7 @@ async def test_no_needed_env(self) -> None: # no cases assigned to customized_1, as fit cases run on customized_0 already generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=[ @@ -311,7 +311,7 @@ async def test_deploy_no_more_resource(self) -> None: platform_schema.wait_more_resource_error = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True) - runner = generate_lisarunner(env_runbook, platform_schema=platform_schema) + runner = generate_runner(env_runbook, platform_schema=platform_schema) await runner.start() self.verify_env_results( @@ -346,7 +346,7 @@ async def test_skipped_on_suite_failure(self) -> None: test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() self.verify_env_results( expected_prepared=[ @@ -377,7 +377,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: platform_schema.return_prepared = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook, platform_schema=platform_schema) + runner = generate_runner(env_runbook, platform_schema=platform_schema) await runner.start() self.verify_env_results( expected_prepared=[ @@ -409,7 +409,7 @@ async def test_env_skipped_not_ready(self) -> None: platform_schema.deploy_is_ready = False generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) - runner = generate_lisarunner(env_runbook, platform_schema=platform_schema) + runner = generate_runner(env_runbook, platform_schema=platform_schema) await runner.start() self.verify_env_results( expected_prepared=[ @@ -443,7 +443,7 @@ async def test_env_skipped_no_case(self) -> None: # no case found, as not call generate_case_metadata # in this case, not deploy any env env_runbook = generate_env_runbook(is_single_env=True, remote=True) - runner = generate_lisarunner(env_runbook) + runner = generate_runner(env_runbook) await runner.start() # still prepare predefined, but not deploy self.verify_env_results( @@ -493,7 +493,7 @@ def verify_env_results( expected_prepared: List[str], expected_deployed_envs: List[str], expected_deleted_envs: List[str], - runner: LisaRunner, + runner: Runner, ) -> None: platform = cast(test_platform.MockPlatform, runner._latest_platform) platform_test_data = platform.test_data From 3e446e075a7256b2e9eef889a4e76edae83e870c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 2 Oct 2020 18:09:05 -0700 Subject: [PATCH 10/16] Delete fluffy `_create_test_results` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since it’s really just a list comprehension. Also add a bunch of notes identifying things to investigate further. --- lisa/runner.py | 17 ++++++----------- lisa/testsuite.py | 8 +++++--- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/lisa/runner.py b/lisa/runner.py index 56e2762e71..a2c06a7ff5 100644 --- a/lisa/runner.py +++ b/lisa/runner.py @@ -7,7 +7,6 @@ from lisa.testselector import select_testcases from lisa.testsuite import ( TestCaseRequirement, - TestCaseRuntimeData, TestResult, TestStatus, TestSuite, @@ -24,8 +23,10 @@ def __init__(self, runbook: schema.Runbook) -> None: self._runbook = runbook self._log = get_logger("runner") + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. await super().start() self.status = ActionStatus.RUNNING @@ -33,7 +34,9 @@ async def start(self) -> None: # noqa: C901 selected_test_cases = select_testcases(self._runbook.testcase) # create test results - selected_test_results = self._create_test_results(selected_test_cases) + selected_test_results = [ + TestResult(runtime_data=case) for case in selected_test_cases + ] # load predefined environments candidate_environments = load_environments(self._runbook.environment) @@ -199,14 +202,6 @@ async def _run_suite( result.environment = environment await test_suite.start() - def _create_test_results( - self, cases: List[TestCaseRuntimeData] - ) -> List[TestResult]: - test_results: List[TestResult] = [] - for x in cases: - test_results.append(TestResult(runtime_data=x)) - return test_results - def _merge_test_requirements( self, test_results: List[TestResult], diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 8a31e3964d..603b36f7fc 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -220,7 +220,7 @@ def __init__( self.requirement = requirement def __getattr__(self, key: str) -> Any: - # inherit all attributes of test suite + # return attributes of test suite, if it's not redefined in case level assert self.suite, "suite is not set before use metadata" return getattr(self.suite, key) @@ -254,7 +254,7 @@ def __init__(self, metadata: TestCaseMetadata): self.environment_name: str = "" def __getattr__(self, key: str) -> Any: - # inherit all attributes of metadata + # return attributes of metadata for convenient assert self.metadata return getattr(self.metadata, key) @@ -299,8 +299,10 @@ def before_case(self) -> None: def after_case(self) -> None: pass + # TODO: This entire function is one long string of side-effects. + # We need to reduce this function's complexity to remove the + # disabled warning, and not rely solely on side effects. async def start(self) -> None: # noqa: C901 - # TODO: Reduce this function's complexity and remove the disabled warning. suite_error_message = "" is_suite_continue = True From 115c25133dadcdc30069ef071d5e4c1edee3dbcf Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 5 Oct 2020 18:59:44 -0700 Subject: [PATCH 11/16] clean up tests --- lisa/tests/test_runner.py | 51 +++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/lisa/tests/test_runner.py b/lisa/tests/test_runner.py index 59178ee97e..6d43e1a995 100644 --- a/lisa/tests/test_runner.py +++ b/lisa/tests/test_runner.py @@ -21,7 +21,7 @@ def generate_runner( platform_schema: Optional[test_platform.MockPlatformSchema] = None, ) -> Runner: platform_runbook = schema.Platform( - type=constants.PLATFORM_MOCK, admin_password="not use it" + type=constants.PLATFORM_MOCK, admin_password="do-not-use" ) if platform_schema: platform_runbook.extended_schemas = { @@ -45,7 +45,7 @@ def generate_runner( class RunnerTestCase(IsolatedAsyncioTestCase): def tearDown(self) -> None: - cleanup_cases_metadata() + cleanup_cases_metadata() # Necessary side effects! def test_merge_req_create_on_new(self) -> None: # if no predefined envs, can generate from requirement @@ -62,10 +62,10 @@ def test_merge_req_create_on_new(self) -> None: existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # 3 cases create 3 envs + # 3 cases create 3 environments. self.assertListEqual( ["generated_0", "generated_1", "generated_2"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -81,7 +81,7 @@ def test_merge_req_run_not_create_on_equal(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( ["customized_0"], - [x for x in envs], + list(envs), ) runner = generate_runner(env_runbook) @@ -91,11 +91,11 @@ def test_merge_req_run_not_create_on_equal(self) -> None: existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - - # 3 cases created only two req, as simple req meets on customized_0 + # 3 cases created only two required environments, as the + # simple requirement was met by runbook_0. self.assertListEqual( ["customized_0", "generated_1", "generated_2"], - [x for x in envs], + list(envs), ) self.assertListEqual( [TestStatus.NOTRUN, TestStatus.NOTRUN, TestStatus.NOTRUN], @@ -110,7 +110,7 @@ def test_merge_req_create_on_use_new(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( ["customized_0"], - [x for x in envs], + list(envs), ) runner = generate_runner(env_runbook) test_results = generate_cases_result() @@ -121,10 +121,10 @@ def test_merge_req_create_on_use_new(self) -> None: existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - # every case need a new environment, so created 3 + # All 3 cases needed a new environment, so it created 3. self.assertListEqual( ["customized_0", "generated_1", "generated_2", "generated_3"], - [x for x in envs], + list(envs), ) self.verify_test_results( expected_envs=["", "", ""], @@ -141,7 +141,7 @@ def test_merge_req_not_allow_create(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( [], - [x for x in envs], + list(envs), ) runner = generate_runner(None) test_results = generate_cases_result() @@ -152,7 +152,7 @@ def test_merge_req_not_allow_create(self) -> None: ) self.assertListEqual( [], - [x for x in envs], + list(envs), ) self.verify_test_results( @@ -175,21 +175,20 @@ def test_merge_req_platform_type_checked(self) -> None: envs = load_environments(env_runbook) self.assertListEqual( [], - [x for x in envs], + list(envs), ) runner = generate_runner(None) test_results = generate_cases_result() for test_result in test_results: metadata = test_result.runtime_data.metadata metadata.requirement = simple_requirement( - supported_platform_type=["notexists"] + supported_platform_type=["does-not-exist"] ) runner._merge_test_requirements( test_results=test_results, existing_environments=envs, platform_type=constants.PLATFORM_MOCK, ) - platform_unsupported = "capability cannot support some of requirement" self.verify_test_results( expected_envs=["", "", ""], @@ -231,6 +230,7 @@ async def test_fit_a_predefined_env(self) -> None: async def test_fit_a_bigger_env(self) -> None: # similar with test_fit_a_predefined_env, but predefined 2 nodes, # it doesn't equal to any case req, but reusable for all cases. + generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) runner = generate_runner(env_runbook) @@ -281,6 +281,7 @@ async def test_case_new_env_run_only_1_needed(self) -> None: async def test_no_needed_env(self) -> None: # two 1 node env predefined, but only customized_0 go to deploy # no cases assigned to customized_1, as fit cases run on customized_0 already + generate_cases_metadata() env_runbook = generate_env_runbook(local=True, remote=True) runner = generate_runner(env_runbook) @@ -342,7 +343,7 @@ async def test_deploy_no_more_resource(self) -> None: ) async def test_skipped_on_suite_failure(self) -> None: - # first two cases skipped due to test suite setup failed + # First two tests were skipped because the setup is made to fail. test_testsuite.fail_on_before_suite = True generate_cases_metadata() env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True) @@ -359,6 +360,7 @@ async def test_skipped_on_suite_failure(self) -> None: expected_deleted_envs=["customized_0"], runner=runner, ) + before_suite_failed = "before_suite: failed" self.verify_test_results( expected_envs=["customized_0", "customized_0", "customized_0"], @@ -390,7 +392,8 @@ async def test_env_skipped_no_prepared_env(self) -> None: expected_deleted_envs=[], runner=runner, ) - no_avaiable_env = "no available environment" + + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -398,7 +401,7 @@ async def test_env_skipped_no_prepared_env(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], + expected_message=[no_available_env, no_available_env, no_available_env], test_results=runner._latest_test_results, ) @@ -427,7 +430,7 @@ async def test_env_skipped_not_ready(self) -> None: expected_deleted_envs=[], runner=runner, ) - no_avaiable_env = "no available environment" + no_available_env = "no available environment" self.verify_test_results( expected_envs=["", "", ""], expected_status=[ @@ -435,7 +438,7 @@ async def test_env_skipped_not_ready(self) -> None: TestStatus.SKIPPED, TestStatus.SKIPPED, ], - expected_message=[no_avaiable_env, no_avaiable_env, no_avaiable_env], + expected_message=[no_available_env, no_available_env, no_available_env], test_results=runner._latest_test_results, ) @@ -500,13 +503,13 @@ def verify_env_results( self.assertListEqual( expected_prepared, - [x for x in platform_test_data.prepared_envs], + list(platform_test_data.prepared_envs), ) self.assertListEqual( expected_deployed_envs, - [x for x in platform_test_data.deployed_envs], + list(platform_test_data.deployed_envs), ) self.assertListEqual( expected_deleted_envs, - [x for x in platform_test_data.deleted_envs], + list(platform_test_data.deleted_envs), ) From 760060c7f9c638873df05f29b6dd29933718119a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 6 Oct 2020 18:29:28 -0700 Subject: [PATCH 12/16] Fix some types in process and shell MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There’s more work to do because this was using duck typing. It’s a little better now, but it’s just a massive wrapper for a library that’s just another wrapper for Python’s actual APIs (which are just wrappers for the system’s APIs). --- lisa/util/process.py | 23 +++++++++++++---------- lisa/util/shell.py | 4 ++-- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/lisa/util/process.py b/lisa/util/process.py index 862b370d2d..8cc4bf085a 100644 --- a/lisa/util/process.py +++ b/lisa/util/process.py @@ -5,6 +5,7 @@ from dataclasses import dataclass from typing import Dict, Optional +import spur # type: ignore from spur.errors import NoSuchCommandError # type: ignore from lisa.util.logger import Logger, LogWriter, get_logger @@ -23,6 +24,7 @@ def __str__(self) -> str: return self.stdout +# TODO: So much cleanup here. It was using duck typing. class Process: def __init__( self, @@ -36,6 +38,8 @@ def __init__( self._is_linux = shell.is_linux self._running: bool = False self._log = get_logger("cmd", id_, parent=parent_logger) + self._process: Optional[spur.local.LocalProcess] = None + self._result: Optional[ExecutableResult] = None def start( self, @@ -107,7 +111,7 @@ def start( except (FileNotFoundError, NoSuchCommandError) as identifier: # FileNotFoundError: not found command on Windows # NoSuchCommandError: not found command on remote Linux - self._process = ExecutableResult( + self._result = ExecutableResult( "", identifier.strerror, 1, self._timer.elapsed() ) self._log.debug(f"not found command: {identifier}") @@ -123,30 +127,29 @@ def wait_result(self, timeout: float = 600) -> ExecutableResult: self._log.warning(f"timeout in {timeout} sec, and killed") self.kill() - if not isinstance(self._process, ExecutableResult): + if self._result is None: + # if not isinstance(self._process, ExecutableResult): assert self._process proces_result = self._process.wait_for_result() self.stdout_writer.close() self.stderr_writer.close() - result: ExecutableResult = ExecutableResult( + # cache for future queries, in case it's queried twice. + self._result = ExecutableResult( proces_result.output.strip(), proces_result.stderr_output.strip(), proces_result.return_code, self._timer.elapsed(), ) - # cache for future queries, in case it's queried twice. - self._process = result - else: - result = self._process + self._process = None self._log.debug(f"waited with {self._timer}") - return result + return self._result def kill(self) -> None: - if self._process and not isinstance(self._process, ExecutableResult): + if self._process: self._process.send_signal(9) def is_running(self) -> bool: - if self._running: + if self._running and self._process: self._running = self._process.is_running() return self._running diff --git a/lisa/util/shell.py b/lisa/util/shell.py index b51f7ab6a7..f695e8da6d 100644 --- a/lisa/util/shell.py +++ b/lisa/util/shell.py @@ -177,7 +177,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = True, - ) -> Any: + ) -> spur.ssh.SshProcess: self.initialize() assert self._inner_shell return self._inner_shell.spawn( @@ -307,7 +307,7 @@ def spawn( encoding: str = "utf-8", use_pty: bool = False, allow_error: bool = False, - ) -> Any: + ) -> spur.local.LocalProcess: return self._inner_shell.spawn( command=command, update_env=update_env, From 44753630cb658c62c8fc177bb399447f611121fd Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 6 Oct 2020 18:30:25 -0700 Subject: [PATCH 13/16] Manually close the fds which spur leaks Co-authored-by: Chi Song <27178119+squirrelsc@users.noreply.github.com> --- lisa/util/process.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/lisa/util/process.py b/lisa/util/process.py index 8cc4bf085a..798c1c7913 100644 --- a/lisa/util/process.py +++ b/lisa/util/process.py @@ -1,6 +1,7 @@ import logging import pathlib import shlex +import subprocess import time from dataclasses import dataclass from typing import Dict, Optional @@ -140,6 +141,25 @@ def wait_result(self, timeout: float = 600) -> ExecutableResult: proces_result.return_code, self._timer.elapsed(), ) + # TODO: The spur library is not very good and leaves open + # resources (probably due to it starting the process with + # `bufsize=0`). We need to replace it, but for now, we + # manually close the leaks. + if isinstance(self._process, spur.local.LocalProcess): + popen: subprocess.Popen[str] = self._process._subprocess + if popen.stdin: + popen.stdin.close() + if popen.stdout: + popen.stdout.close() + if popen.stderr: + popen.stderr.close() + elif isinstance(self._process, spur.ssh.SshProcess): + if self._process._stdin: + self._process._stdin.close() + if self._process._stdout: + self._process._stdout.close() + if self._process._stderr: + self._process._stderr.close() self._process = None self._log.debug(f"waited with {self._timer}") From 039e3feea5a70214ef9de9683a8dcdc90ead2a27 Mon Sep 17 00:00:00 2001 From: Chi Song <27178119+squirrelsc@users.noreply.github.com> Date: Mon, 26 Oct 2020 09:49:21 +0800 Subject: [PATCH 14/16] remove dead code set_suite --- lisa/testsuite.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 603b36f7fc..1b7cfa3b90 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -214,6 +214,8 @@ def __init__( priority: int = 2, requirement: Optional[TestCaseRequirement] = None, ) -> None: + self.suite: TestSuiteMetadata + self.priority = priority self.description = description if requirement: @@ -237,9 +239,6 @@ def wrapper(*args: object) -> None: return wrapper - def set_suite(self, suite: TestSuiteMetadata) -> None: - self.suite: TestSuiteMetadata = suite - class TestCaseRuntimeData: def __init__(self, metadata: TestCaseMetadata): From 4116850690b3aa46ab12d6d3cee52dd1fcef8231 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 7 Oct 2020 16:47:29 -0700 Subject: [PATCH 15/16] =?UTF-8?q?Don=E2=80=99t=20subclass=20ABCMeta=20when?= =?UTF-8?q?=20unneeded?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `LisaTestCase` is _not_ an abstract base class, it’s just a base class (with a lot of implementation) and defines no abstract methods. So we remove this. --- lisa/testsuite.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lisa/testsuite.py b/lisa/testsuite.py index 1b7cfa3b90..bbed2fcae4 100644 --- a/lisa/testsuite.py +++ b/lisa/testsuite.py @@ -1,6 +1,5 @@ from __future__ import annotations -from abc import ABCMeta from dataclasses import dataclass, field from enum import Enum from functools import wraps @@ -271,7 +270,7 @@ def clone(self) -> TestCaseRuntimeData: return cloned -class TestSuite(Action, metaclass=ABCMeta): +class TestSuite(Action): def __init__( self, environment: Environment, From b536d96b1958ff3ef98eebb8f8047b155982454e Mon Sep 17 00:00:00 2001 From: Chi Song <27178119+squirrelsc@users.noreply.github.com> Date: Mon, 26 Oct 2020 11:39:04 +0800 Subject: [PATCH 16/16] remove debug flag to avoid stderr --- lisa/main.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lisa/main.py b/lisa/main.py index e8b63fd646..5402da681e 100644 --- a/lisa/main.py +++ b/lisa/main.py @@ -69,8 +69,7 @@ async def main() -> int: if __name__ == "__main__": exit_code = 0 try: - # TODO: Turn off debugging when we ship this. - exit_code = asyncio.run(main(), debug=True) + exit_code = asyncio.run(main()) except Exception as exception: exit_code = -1 log = get_logger()