From eca598a9ecb6040e766dd190a2bdae10b71ecf23 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 9 Oct 2020 17:49:45 -0700 Subject: [PATCH 01/84] Demo LISAv3 as simply pytest This is a _working_ test. --- pytest/.editorconfig | 2 + pytest/.flake8 | 5 + pytest/CODE_OF_CONDUCT.md | 9 + pytest/CONTRIBUTING.md | 269 ++++++++++ pytest/Makefile | 13 + pytest/README.md | 105 ++++ pytest/mypy.ini | 20 + pytest/poetry.lock | 935 ++++++++++++++++++++++++++++++++++ pytest/pyproject.toml | 43 ++ pytest/pytest.ini | 4 + pytest/testsuites/test_lis.py | 30 ++ 11 files changed, 1435 insertions(+) create mode 100644 pytest/.editorconfig create mode 100644 pytest/.flake8 create mode 100644 pytest/CODE_OF_CONDUCT.md create mode 100644 pytest/CONTRIBUTING.md create mode 100644 pytest/Makefile create mode 100644 pytest/README.md create mode 100644 pytest/mypy.ini create mode 100644 pytest/poetry.lock create mode 100644 pytest/pyproject.toml create mode 100644 pytest/pytest.ini create mode 100644 pytest/testsuites/test_lis.py diff --git a/pytest/.editorconfig b/pytest/.editorconfig new file mode 100644 index 0000000000..15e6a1f149 --- /dev/null +++ b/pytest/.editorconfig @@ -0,0 +1,2 @@ +# Ignore parent project’s config +root = true diff --git a/pytest/.flake8 b/pytest/.flake8 new file mode 100644 index 0000000000..f855799a35 --- /dev/null +++ b/pytest/.flake8 @@ -0,0 +1,5 @@ +[flake8] +max-line-length = 88 +select = B,BLK,C90,E,F,I,W +max-complexity = 15 +extend-ignore = E203 diff --git a/pytest/CODE_OF_CONDUCT.md b/pytest/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..f9ba8cf65f --- /dev/null +++ b/pytest/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/pytest/CONTRIBUTING.md b/pytest/CONTRIBUTING.md new file mode 100644 index 0000000000..f74461187d --- /dev/null +++ b/pytest/CONTRIBUTING.md @@ -0,0 +1,269 @@ +# Contributing Guidelines + +This document describes the existing developer tooling we have in place (and what to +expect of it), as well as our design and development philosophy. + +## Naming Conventions + +Naming conventions are not automatically enforced, so please read the [naming +conventions](https://www.python.org/dev/peps/pep-0008/#naming-conventions) +section of PEP 8, which describes what each of the different styles means. A +short summary of the most important parts: + +* Modules (and hence files) should have short, all-lowercase names. +* Class (and exception) names should normally use the `CapWords` convention + (also known as `CamelCase`). +* Function and variable names should be lowercase, with words separated by + underscores as necessary to improve readability (also known as `snake_case`). +* To avoid collisions with the standard library, an underscore can be appended, + such as `id_`. +* Always use `self` for the first argument to instance methods. +* Always use `cls` for the first argument to class methods. +* Use one leading underscore only for non-public methods and instance variables, + such as `_data`. Do not activate name mangling with `__` unless necessary. +* If there is a pair of `get_x` and `set_x` methods, they should instead be a + proper property, which is easy to do with the built-in `@property` decorator. +* Constants should be `CAPITALIZED_SNAKE_CASE`. +* When importing a function, try to avoid renaming it with `import as` because + it introduces cognitive overhead to track yet another name. +* When deriving another module’s class (such as `unittest.TestCase`), reuse the + class name to avoid confusion, such as `LisaTestCase`, instead of introducing + a different connotation like `TestSuite`. + +When in doubt, adhere to existing conventions, or check the style guide. + +## Automated Tooling + +If you have ran pytest-lisa already, then you have installed and used the `poetry` +tool. [Poetry][] is a [PEP 518][] compliant and cross-platform build system +which handles our Python dependencies and environment. + +This project’s dependencies are found in the [`pyproject.toml`](pyproject.toml) +file. This is similar to but more powerful than the familiar `requirements.txt`. +With [PEP 518][] and [PEP 621][]. + +[Poetry]: https://python-poetry.org/docs/ +[PEP 518]: https://www.python.org/dev/peps/pep-0518/ +[PEP 621]: https://www.python.org/dev/peps/pep-0621/ + +### Metadata + +The first section, `tool.poetry`, defines the project’s metadata (name, version, +description, authors, and license) which will be embedded in the final built +package. + +The chosen version follows [Semantic Versioning][], with the [Python specific +pre-release versioning suffix][pre-release] ‘.dev1’. Since this is “pytest-lisa” it +seemed appropriate to set our version to ‘3.0.0.dev1’, that is, “the first +development release of pytest-lisa.” + +[Semantic Versioning]: https://semver.org/ +[pre-release]: https://packaging.python.org/guides/distributing-packages-using-setuptools/#choosing-a-versioning-scheme + +### Package Dependencies + +The next section, `tool.poetry.dependencies`, is where `poetry add +` records our required packages. + +Poetry automatically creates and manages [isolated +environments](https://python-poetry.org/docs/managing-environments/). + +From the documentation: + +> Poetry will first check if it’s currently running inside a virtual +> environment. If it is, it will use it directly without creating a new one. But +> if it’s not, it will use one that it has already created or create a brand new +> one for you. + +On Linux, your initial run of `poetry install` will cause Poetry to +automatically setup a new [virtualenv][] using [pyenv][]. If you are developing +on Windows, you will want to setup your own, perhaps using [Conda][]. + +[virtualenv]: https://docs.python-guide.org/dev/virtualenvs/ +[pyenv]: https://github.com/pyenv/pyenv +[Conda]: https://docs.conda.io/en/latest/ + +* python: We pinned Python to version 3.8 so everyone uses the same version. + +### Developer Dependencies + +Similar to the previous section, `tool.poetry.dev-dependencies` is where `poetry +add --dev ` records our _developer_ packages. These are not +necessary for LISAv3 to execute, but are used by developers to automatically +adhere to our coding standards. + +* [Black](https://github.com/psf/black), the opinionated code formatter which + settles all debates as to how our Python files should be formatted. It follows + [PEP 8](https://www.python.org/dev/peps/pep-0008/), the official Python style + guide, and where ambiguous makes the decision for us. + +* [Flake8](https://flake8.pycqa.org/en/latest/) (and integrations), the semantic + analyzer, used to coordinate most of the other tools. + +* [isort](https://timothycrosley.github.io/isort/), the `import` sorter, which + automatically splits imports into the expected, alphabetized sections. + +* [mypy](http://mypy-lang.org/), the static type checker, which coupled with + type annotations allows us to avoid the pitfalls of Python being a dynamically + typed language. + +* [python-language-server](https://github.com/palantir/python-language-server) + (and integrations), the de facto LSP server. While Microsoft is developing + their own LSP servers, they do not integrate with the existing ecosystem of + tools, and their latest tool, Pyright, simply does not support + `pyproject.toml`. Since pyls is used far more widely, and supports every + editor, we use it. + +* [rope](https://github.com/python-rope/rope), to provide completions and + renaming support to pyls. + +With these packages installed and a correctly setup editor (see the readme and +feel free to reach out to us), your code should automatically follow all the +standards which we could automate. + +The final sections, `tool.black`, `tool.isort`, `build-system`, and the +`.flake8` file (Flake8 does not yet support `pyproject.toml`) configure the +tools per their recommendations. + +## Type Annotations + +We are using [mypy][] to enforce static type checking of our Python code. This +may surprise you as Python is not a statically typed language. While dynamic +typing can be useful, for a complex tool such as LISA it is more likely to +introduce bugs that are found only at runtime (which the user experiences as a +crash). For more information on why we (and others) do this, see [Dropbox’s +journey to type checking 4 million lines of Python][dropbox]. [PEP 484][] and +[PEP 526][] (among others) introduced and defined [type hints][] for the Python +language. You can probably figuring out the syntax based on the surrounding +code, but you can also see this [Intro to Using Python Type Hints][intro] and +mypy’s [cheat sheet][]. + +[mypy]: http://mypy-lang.org/ +[dropbox]: https://dropbox.tech/application/our-journey-to-type-checking-4-million-lines-of-python +[PEP 484]: https://www.python.org/dev/peps/pep-0484/ +[PEP 526]: https://www.python.org/dev/peps/pep-0526/ +[type hints]: https://docs.python.org/3/library/typing.html +[intro]: https://kishstats.com/python/2019/01/07/python-type-hinting.html +[cheat sheet]: https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html + +## Runbook schema + +Some plugins like Platform need follow this section to extend runbook schema. Runbook is the configurations of LISA runs. Every LISA run need a runbook. + +The runbook uses [dataclass](https://docs.python.org/3/library/dataclasses.html) to define, [dataclass-json](https://github.com/lidatong/dataclasses-json/) to deserialize, and [marshmallow](https://marshmallow.readthedocs.io/en/3.0/api_reference.html) to validate the schema. + +See more examples in [schema.py](lisa/schema.py), if you need to extend runbook schema. + +## Committing Guidelines + +A best practice when using [Git](https://git-scm.com/book/en/v2) is to create a +series of independent and well-documented commits. Each commit should “do one +thing” and do it correctly. If a mistake is made (you need to fix a bug or +adjust formatting), you should amend it (or use an [interactive +rebase](https://thoughtbot.com/blog/git-interactive-rebase-squash-amend-rewriting-history) +to edit it). If you’re using Emacs, the [Magit](https://magit.vc/) package makes +all of this easy. Some of the reasons for making each commit polished is that it +aids immensely in future debugging. It lets us use tools like [`git +bisect`](https://git-scm.com/docs/git-bisect) to automatically find bugs, and +understand why prior code was written. Although some of it has gone out of date, +see this otherwise great essay on [Git best +practices](http://sethrobertson.github.io/GitBestPractices/). For how Git works, +read [Git from the Bottom +Up](https://jwiegley.github.io/git-from-the-bottom-up/). + +For writing your commit messages, see this modification of [Tim Pope’s +example](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html): + +> Capitalized, short (72 chars or less) summary +> +> More detailed explanatory text, if necessary. Wrap it to about 72 +> characters or so. In some contexts, the first line is treated as the +> subject of an email and the rest of the text as the body. The blank line +> separating the summary from the body is critical (unless you omit the +> body entirely); tools like rebase can get confused if you run the two +> together. +> +> Write your commit message in the imperative: “Fix bug” and not “Fixed +> bug” or “Fixes bug.” This convention matches up with commit messages +> generated by commands like git merge and git revert. +> +> Further paragraphs come after blank lines. +> +> * Bullet points are okay, too +> +> * Typically a hyphen or asterisk is used for the bullet, followed by a +> single space, with blank lines in between, but conventions vary here +> +> * Use a hanging indent + +You should also feel free to use Markdown in the commit messages, as our project +is hosted on GitHub which renders it (and Markdown is human readable). + +## Design Patterns + +The most important goal we are attempting to accomplish with LISAv3 is for it to +be “simple, clean, and with a low maintenance cost.” + +We should use caution when using Object Oriented Design, because when it is used +without critical analysis, it creates unmaintainable code. A great talk on this +subject is [Stop Writing Classes](https://www.youtube.com/watch?v=o9pEzgHorH0), +by Jack Diederich. As he says, “classes are great but they are also overused.” + +This [Python Design Patterns](https://python-patterns.guide/) is a fantastic +collection of material for writing maintainable Python code. It specifically +details many of the common “Object Oriented” patterns from the Gang of Four book +(which, in fact, were patterns geared toward languages like C++, and no longer +apply to modern languages like Python), what lessons can be learned from them, +and how to apply them (or their modern alternatives) today. It also serves as an +easy-to-read guide to the Gang of Four book itself, as its principles still +serve us well today. + +Every time a developer chooses to use a design pattern, that person needs to +reason through and document why it was chosen, and what alternatives were +considered. We will recreate the problems with LISAv2 unless we take our time to +carefully create a well-designed and maintainable framework. + +Several popular patterns that actually _do not_ work well in Python are: + +* [The Abstract Factory Pattern](https://python-patterns.guide/gang-of-four/abstract-factory/) +* [The Factory Method Pattern](https://python-patterns.guide/gang-of-four/factory-method/) +* [The Prototype Pattern](https://python-patterns.guide/gang-of-four/prototype/) +* [The Singleton Pattern](https://python-patterns.guide/gang-of-four/singleton/) + +Conversely, patterns that are a natural fit to Python include: + +* [The Composite Pattern](https://python-patterns.guide/gang-of-four/composite/) +* [The Iterator Pattern](https://python-patterns.guide/gang-of-four/iterator/) + (caution: it is actually better to implement these with `yield`!) + +Finally, a high-level guide to all things Python is [The Hitchhiker’s Guide to +Python](https://docs.python-guide.org/). It covers just about everything in the +Python world. If you make it through even some of these guides, you will be well +on your way to being a “Pythonista” (a Python developer) writing “Pythonic” +(canonically correct Python) code left and right. + +### Async IO + +With Python 3.4, the Async IO pattern found in languages such as C# and Go is +available through the keywords `async` and `await`, along with the Python module +`asyncio`. Please read [Async IO in Python: A Complete +Walkthrough](https://realpython.com/async-io-python/) to understand at a high +level how asynchronous programming works. As of Python 3.7, One major “gotcha” +is that `asyncio.run(...)` should be used [exactly once in +`main`](https://docs.python.org/3/library/asyncio-task.html), it starts the +event loop. Everything else should be a coroutine or task which the event loop +schedules. + +## Future Sections + +Just a collection of reminders for the author to expand on later. + +* [unittest](https://docs.python.org/3/library/unittest.html) +* [doctest](https://docs.python.org/3/library/doctest.html) +* [subprocess](https://pymotw.com/3/subprocess/index.html) +* [GitHub Actions](https://github.com/LIS/LISAv2/actions) +* [ShellCheck](https://www.shellcheck.net/) +* [Governance](https://opensource.guide/leadership-and-governance/) +* [Maintenance Cost](https://web.archive.org/web/20120313070806/http://users.jyu.fi/~koskinen/smcosts.htm) +* Parallelism and multi-plexing +* Versioned inputs and outputs diff --git a/pytest/Makefile b/pytest/Makefile new file mode 100644 index 0000000000..5c052c4707 --- /dev/null +++ b/pytest/Makefile @@ -0,0 +1,13 @@ +all: setup run + +# Install Python packages +setup: + @poetry install --no-ansi --remove-untracked + +# Run Pytest +run: + @poetry run python -X dev -m pytest --flake8 --mypy -rA + +# Print current Python virtualenv +venv: + @poetry env list --no-ansi --full-path diff --git a/pytest/README.md b/pytest/README.md new file mode 100644 index 0000000000..38c1ae798b --- /dev/null +++ b/pytest/README.md @@ -0,0 +1,105 @@ +# LISAv3 via pytest-lisa + +[Pytest](https://docs.pytest.org/en/stable/) is an [incredibly +popular](https://docs.pytest.org/en/stable/talks.html) MIT licensed open source +Python testing framework. It has a thriving community and plugin framework, with +[over 750 plugins](https://plugincompat.herokuapp.com/). There is even a YAML +example of writing a Domain Specific Language +[DSL](https://docs.pytest.org/en/stable/example/nonpython.html#yaml-plugin) for +specifying tests. Instead of writing yet another test framework, LISAv3 could be +written as pytest-lisa, a [plugin for +Pytest](https://docs.pytest.org/en/stable/writing_plugins.html) which implements +our requirements. In fact, most of Pytest itself is implemented via [built-in +plugins](https://docs.pytest.org/en/stable/plugins.html), providing us with a +lot to leverage. + +The [fundamental features](https://www.youtube.com/watch?v=CMuSn9cofbI) of +Pytest match our needs very well: + +* Automatic test discovery, no boiler-plate test code +* Useful information when a test fails (assertions are introspected) +* Test parameterization +* Modular setup/teardown via fixtures +* Customizable (as detailed above) + +So all the logic for discovering, running, skipping based on requirements, and +reporting the tests is already written and maintained by the greater open source +community, leaving us to focus on the hard and unique problem: creating an API +to launch the necessary nodes. It would also allow us the space to abstract the +installation of tools required by tests. In this way, LISAv3 could solve the +difficulties we have at hand without creating yet another unit test framework. + +## Design + +### pytest-mark + +The [pytest-mark](https://docs.pytest.org/en/stable/mark.html) already provides +functionality for adding metadata to tests, where we specifically want: + +* Owner +* Category +* Area +* Tags +* Priority + +We could simply reuse this built-in plugin with minimal logic to enforce our +required metadata, with sane defaults (such as setting the area to the name of +the module), and to list statistics about our test coverage. + +It also through pytest-mark that [skipping +functionality](https://docs.pytest.org/en/stable/skipping.html) exists, which we +would leverage for ensuring our environmental requirements are met. + +Note that Pytest leverages Python’s docstrings for built-in documentation (and +can even run tests discovered in such strings, like doctest). + +### Fixtures + +Pytest supports [fixtures](https://docs.pytest.org/en/stable/fixture.html), +which are the primary way of setting up test requirements. They replace less +flexible alternatives like setup/teardown functions. It is through fixtures that +pytest-lisa would implement remote node setup/teardown. Our node fixture would +implement (with more as found to be required): + +* Provision a node based on parameterized requirements +* Reboot the node if requested +* Run a command (perhaps asynchronously) on the node using SSH +* Download and upload files to the node (with retries and timeouts) + +Our abstraction would leverage +[Fabric](https://docs.fabfile.org/en/stable/index.html), which uses +[paramiko](https://docs.paramiko.org/en/stable/) underneath, directly to +implement the SSH commands, and it would use existing modules to deploy +[Azure](https://aka.ms/azsdk/python/all) and AWS nodes. We would need implement +specific logic for Hyper-V and similar platforms where APIs do not currently +exist, and this would be the bulk of our work instead of rewriting a unit test +framework. + +Other test specific requirements, such as installing software and daemons or +downloading files from remote storage, would similarly be implemented via +fixtures and shared among tests. + +Note that Paramiko is less complex (smaller library footprint) than Fabric, but +is a bit more difficult to use, and doesn’t support reading existing SSH config +files, nor does it support “ProxyJump” which we use heavily. + +## pytest-xdist + +With the [pytest-xdist plugin](https://github.com/pytest-dev/pytest-xdist) there +already exists support for running a folder of tests on an arbitrary remote host +via SSH. + +The LISA tests could be written as Python code suitable for running on the +target test system, which means direct access to the system in the test code +itself (subprocesses are still available, without having to use SSH within the +test, but would become far less necessary), something that is not possible with +the current prototype. Where the pytest-xdist plugin copies the package of code +to the target node and runs it, the pytest-lisa plugin could instantiate that +node (boot the necessary image on a remote machine or launch a new Hyper-V or +Azure VM, etc.) for the tests. YAML playbooks (AKA “runbooks” in the current +prototype) could be interpreted by the pytest-lisa plugin to determine how to +create those nodes. + +However, this is only one approach, and we may prefer to run the Python code on +the user’s machine, with pytest-lisa instead providing the previously mentioned +node fixtures, default marks, and requirements logic. diff --git a/pytest/mypy.ini b/pytest/mypy.ini new file mode 100644 index 0000000000..b5a41dd5b5 --- /dev/null +++ b/pytest/mypy.ini @@ -0,0 +1,20 @@ +[mypy] +namespace_packages = True +pretty = True + +warn_unused_configs = True +disallow_any_generics = True +disallow_subclassing_any = True +disallow_untyped_calls = True +disallow_untyped_defs = True +disallow_incomplete_defs = True +check_untyped_defs = True +disallow_untyped_decorators = True +no_implicit_optional = True +warn_redundant_casts = True +warn_unused_ignores = True +warn_return_any = True +no_implicit_reexport = True +strict_equality = True + +warn_unreachable = True diff --git a/pytest/poetry.lock b/pytest/poetry.lock new file mode 100644 index 0000000000..a0a87e8d83 --- /dev/null +++ b/pytest/poetry.lock @@ -0,0 +1,935 @@ +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "atomicwrites" +version = "1.4.0" +description = "Atomic file writes." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "attrs" +version = "20.2.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "sphinx-rtd-theme", "pre-commit"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six"] + +[[package]] +name = "bcrypt" +version = "3.2.0" +description = "Modern password hashing for your software and your servers" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +cffi = ">=1.1" +six = ">=1.4.1" + +[package.extras] +tests = ["pytest (>=3.2.1,<3.3.0 || >3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "black" +version = "20.8b1" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +appdirs = "*" +click = ">=7.1.2" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.6,<1" +regex = ">=2020.1.8" +toml = ">=0.10.1" +typed-ast = ">=1.4.0" +typing-extensions = ">=3.7.4" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] + +[[package]] +name = "cffi" +version = "1.14.3" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "click" +version = "7.1.2" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "colorama" +version = "0.4.3" +description = "Cross-platform colored terminal text." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "cryptography" +version = "3.1.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" + +[package.dependencies] +cffi = ">=1.8,<1.11.3 || >1.11.3" +six = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5,<1.8.0 || >1.8.0,<3.1.0 || >3.1.0,<3.1.1 || >3.1.1)", "sphinx-rtd-theme"] +docstest = ["doc8", "pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pytest (>=3.6.0,<3.9.0 || >3.9.0,<3.9.1 || >3.9.1,<3.9.2 || >3.9.2)", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,<3.79.2 || >3.79.2)"] + +[[package]] +name = "fabric" +version = "2.5.0" +description = "High level SSH command execution" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +invoke = ">=1.3,<2.0" +paramiko = ">=2.4" + +[package.extras] +pytest = ["mock (>=2.0.0,<3.0)", "pytest (>=3.2.5,<4.0)"] +testing = ["mock (>=2.0.0,<3.0)"] + +[[package]] +name = "filelock" +version = "3.0.12" +description = "A platform independent file lock." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "flake8" +version = "3.8.4" +description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" + +[package.dependencies] +mccabe = ">=0.6.0,<0.7.0" +pycodestyle = ">=2.6.0a1,<2.7.0" +pyflakes = ">=2.2.0,<2.3.0" + +[[package]] +name = "flake8-black" +version = "0.2.1" +description = "flake8 plugin to call black as a code style validator" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +black = "*" +flake8 = ">=3.0.0" + +[[package]] +name = "flake8-bugbear" +version = "20.1.4" +description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +attrs = ">=19.2.0" +flake8 = ">=3.0.0" + +[[package]] +name = "flake8-isort" +version = "4.0.0" +description = "flake8 plugin that integrates isort ." +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +flake8 = ">=3.2.1,<4" +isort = ">=4.3.5,<6" +testfixtures = ">=6.8.0,<7" + +[package.extras] +test = ["pytest (>=4.0.2,<6)", "toml"] + +[[package]] +name = "iniconfig" +version = "1.0.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "invoke" +version = "1.4.1" +description = "Pythonic task execution" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "isort" +version = "5.6.1" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.6,<4.0" + +[package.extras] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] +requirements_deprecated_finder = ["pipreqs", "pip-api"] +colors = ["colorama (>=0.4.3,<0.5.0)"] + +[[package]] +name = "jedi" +version = "0.17.2" +description = "An autocompletion tool for Python that can be used for text editors." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +parso = ">=0.7.0,<0.8.0" + +[package.extras] +qa = ["flake8 (3.7.9)"] +testing = ["Django (<3.1)", "colorama", "docopt", "pytest (>=3.9.0,<5.0.0)"] + +[[package]] +name = "mccabe" +version = "0.6.1" +description = "McCabe checker, plugin for flake8" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "mypy" +version = "0.782" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +mypy-extensions = ">=0.4.3,<0.5.0" +typed-ast = ">=1.4.0,<1.5.0" +typing-extensions = ">=3.7.4" + +[package.extras] +dmypy = ["psutil (>=4.0)"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "packaging" +version = "20.4" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +pyparsing = ">=2.0.2" +six = "*" + +[[package]] +name = "paramiko" +version = "2.7.2" +description = "SSH2 protocol library" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +bcrypt = ">=3.1.3" +cryptography = ">=2.5" +pynacl = ">=1.0.1" + +[package.extras] +all = ["pyasn1 (>=0.1.7)", "pynacl (>=1.0.1)", "bcrypt (>=3.1.3)", "invoke (>=1.3)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +ed25519 = ["pynacl (>=1.0.1)", "bcrypt (>=3.1.3)"] +gssapi = ["pyasn1 (>=0.1.7)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +invoke = ["invoke (>=1.3)"] + +[[package]] +name = "parso" +version = "0.7.1" +description = "A Python Parser" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +testing = ["docopt", "pytest (>=3.0.7)"] + +[[package]] +name = "pathspec" +version = "0.8.0" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "pluggy" +version = "0.13.1" +description = "plugin and hook calling mechanisms for python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +name = "py" +version = "1.9.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pycodestyle" +version = "2.6.0" +description = "Python style guide checker" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pycparser" +version = "2.20" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyflakes" +version = "2.2.0" +description = "passive checker of Python programs" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyls-black" +version = "0.4.6" +description = "Black plugin for the Python Language Server" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +black = ">=19.3b0" +python-language-server = "*" +toml = "*" + +[package.extras] +dev = ["isort (>=5.0)", "flake8", "pytest", "mypy"] + +[[package]] +name = "pyls-isort" +version = "0.2.0" +description = "Isort plugin for python-language-server" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +isort = "*" +python-language-server = "*" + +[[package]] +name = "pyls-mypy" +version = "0.1.8" +description = "Mypy linter for the Python Language Server" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +mypy = "*" +python-language-server = "*" + +[package.extras] +test = ["tox", "versioneer", "pytest", "pytest-cov", "coverage"] + +[[package]] +name = "pynacl" +version = "1.4.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +cffi = ">=1.4.1" +six = "*" + +[package.extras] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["pytest (>=3.2.1,<3.3.0 || >3.3.0)", "hypothesis (>=3.27.0)"] + +[[package]] +name = "pyparsing" +version = "2.4.7" +description = "Python parsing module" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "pytest" +version = "6.1.1" +description = "pytest: simple powerful testing with Python" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=17.4.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<1.0" +py = ">=1.8.2" +toml = "*" + +[package.extras] +checkqa_mypy = ["mypy (0.780)"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +name = "pytest-flake8" +version = "1.0.6" +description = "pytest plugin to check FLAKE8 requirements" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +flake8 = ">=3.5" +pytest = ">=3.5" + +[[package]] +name = "pytest-mypy" +version = "0.7.0" +description = "Mypy static type checker plugin for Pytest" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +filelock = ">=3.0" +mypy = {version = ">=0.700", markers = "python_version >= \"3.8\""} +pytest = ">=3.5" + +[[package]] +name = "python-jsonrpc-server" +version = "0.4.0" +description = "JSON RPC 2.0 server library" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +ujson = ">=3.0.0" + +[package.extras] +test = ["versioneer", "pylint", "pycodestyle", "pyflakes", "pytest", "mock", "pytest-cov", "coverage"] + +[[package]] +name = "python-language-server" +version = "0.35.1" +description = "Python Language Server for the Language Server Protocol" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +jedi = ">=0.17.0,<0.18.0" +pluggy = "*" +python-jsonrpc-server = ">=0.4.0" + +[package.extras] +all = ["autopep8", "flake8 (>=3.8.0)", "mccabe (>=0.6.0,<0.7.0)", "pycodestyle (>=2.6.0,<2.7.0)", "pydocstyle (>=2.0.0)", "pyflakes (>=2.2.0,<2.3.0)", "pylint (>=2.5.0)", "rope (>=0.10.5)", "yapf"] +autopep8 = ["autopep8"] +flake8 = ["flake8 (>=3.8.0)"] +mccabe = ["mccabe (>=0.6.0,<0.7.0)"] +pycodestyle = ["pycodestyle (>=2.6.0,<2.7.0)"] +pydocstyle = ["pydocstyle (>=2.0.0)"] +pyflakes = ["pyflakes (>=2.2.0,<2.3.0)"] +pylint = ["pylint (>=2.5.0)"] +rope = ["rope (>0.10.5)"] +test = ["versioneer", "pylint (>=2.5.0)", "pytest", "mock", "pytest-cov", "coverage", "numpy", "pandas", "matplotlib", "flaky", "pyqt5"] +yapf = ["yapf"] + +[[package]] +name = "regex" +version = "2020.9.27" +description = "Alternative regular expression module, to replace re." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "rope" +version = "0.18.0" +description = "a python refactoring library..." +category = "dev" +optional = false +python-versions = "*" + +[package.extras] +dev = ["pytest"] + +[[package]] +name = "six" +version = "1.15.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "testfixtures" +version = "6.15.0" +description = "A collection of helpers and mock objects for unit tests and doc tests." +category = "dev" +optional = false +python-versions = "*" + +[package.extras] +build = ["setuptools-git", "wheel", "twine"] +docs = ["sphinx", "zope.component", "sybil", "twisted", "mock", "django (<2)", "django"] +test = ["pytest (>=3.6)", "pytest-cov", "pytest-django", "zope.component", "sybil", "twisted", "mock", "django (<2)", "django"] + +[[package]] +name = "toml" +version = "0.10.1" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "typed-ast" +version = "1.4.1" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "typing-extensions" +version = "3.7.4.3" +description = "Backported and Experimental Type Hints for Python 3.5+" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "ujson" +version = "4.0.1" +description = "Ultra fast JSON encoder and decoder for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[metadata] +lock-version = "1.1" +python-versions = "^3.8" +content-hash = "40a1da4f76e519932e1c86deaa81cb3331cc282c37ff78f39607936730cbb322" + +[metadata.files] +appdirs = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-20.2.0-py2.py3-none-any.whl", hash = "sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc"}, + {file = "attrs-20.2.0.tar.gz", hash = "sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594"}, +] +bcrypt = [ + {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"}, + {file = "bcrypt-3.2.0-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7"}, + {file = "bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1"}, + {file = "bcrypt-3.2.0-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"}, + {file = "bcrypt-3.2.0-cp36-abi3-win32.whl", hash = "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55"}, + {file = "bcrypt-3.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34"}, + {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"}, +] +black = [ + {file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"}, +] +cffi = [ + {file = "cffi-1.14.3-2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc"}, + {file = "cffi-1.14.3-2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768"}, + {file = "cffi-1.14.3-2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d"}, + {file = "cffi-1.14.3-2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1"}, + {file = "cffi-1.14.3-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca"}, + {file = "cffi-1.14.3-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a"}, + {file = "cffi-1.14.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c"}, + {file = "cffi-1.14.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730"}, + {file = "cffi-1.14.3-cp27-cp27m-win32.whl", hash = "sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d"}, + {file = "cffi-1.14.3-cp27-cp27m-win_amd64.whl", hash = "sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05"}, + {file = "cffi-1.14.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b"}, + {file = "cffi-1.14.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171"}, + {file = "cffi-1.14.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f"}, + {file = "cffi-1.14.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4"}, + {file = "cffi-1.14.3-cp35-cp35m-win32.whl", hash = "sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d"}, + {file = "cffi-1.14.3-cp35-cp35m-win_amd64.whl", hash = "sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d"}, + {file = "cffi-1.14.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3"}, + {file = "cffi-1.14.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808"}, + {file = "cffi-1.14.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537"}, + {file = "cffi-1.14.3-cp36-cp36m-win32.whl", hash = "sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0"}, + {file = "cffi-1.14.3-cp36-cp36m-win_amd64.whl", hash = "sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e"}, + {file = "cffi-1.14.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1"}, + {file = "cffi-1.14.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579"}, + {file = "cffi-1.14.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394"}, + {file = "cffi-1.14.3-cp37-cp37m-win32.whl", hash = "sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc"}, + {file = "cffi-1.14.3-cp37-cp37m-win_amd64.whl", hash = "sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869"}, + {file = "cffi-1.14.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e"}, + {file = "cffi-1.14.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828"}, + {file = "cffi-1.14.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9"}, + {file = "cffi-1.14.3-cp38-cp38-win32.whl", hash = "sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522"}, + {file = "cffi-1.14.3-cp38-cp38-win_amd64.whl", hash = "sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15"}, + {file = "cffi-1.14.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d"}, + {file = "cffi-1.14.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c"}, + {file = "cffi-1.14.3-cp39-cp39-win32.whl", hash = "sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b"}, + {file = "cffi-1.14.3-cp39-cp39-win_amd64.whl", hash = "sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3"}, + {file = "cffi-1.14.3.tar.gz", hash = "sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591"}, +] +click = [ + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, +] +colorama = [ + {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, + {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, +] +cryptography = [ + {file = "cryptography-3.1.1-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:65beb15e7f9c16e15934569d29fb4def74ea1469d8781f6b3507ab896d6d8719"}, + {file = "cryptography-3.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:983c0c3de4cb9fcba68fd3f45ed846eb86a2a8b8d8bc5bb18364c4d00b3c61fe"}, + {file = "cryptography-3.1.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e97a3b627e3cb63c415a16245d6cef2139cca18bb1183d1b9375a1c14e83f3b3"}, + {file = "cryptography-3.1.1-cp27-cp27m-win32.whl", hash = "sha256:cb179acdd4ae1e4a5a160d80b87841b3d0e0be84af46c7bb2cd7ece57a39c4ba"}, + {file = "cryptography-3.1.1-cp27-cp27m-win_amd64.whl", hash = "sha256:b372026ebf32fe2523159f27d9f0e9f485092e43b00a5adacf732192a70ba118"}, + {file = "cryptography-3.1.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:680da076cad81cdf5ffcac50c477b6790be81768d30f9da9e01960c4b18a66db"}, + {file = "cryptography-3.1.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5d52c72449bb02dd45a773a203196e6d4fae34e158769c896012401f33064396"}, + {file = "cryptography-3.1.1-cp35-abi3-macosx_10_10_x86_64.whl", hash = "sha256:f0e099fc4cc697450c3dd4031791559692dd941a95254cb9aeded66a7aa8b9bc"}, + {file = "cryptography-3.1.1-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:a7597ffc67987b37b12e09c029bd1dc43965f75d328076ae85721b84046e9ca7"}, + {file = "cryptography-3.1.1-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:4549b137d8cbe3c2eadfa56c0c858b78acbeff956bd461e40000b2164d9167c6"}, + {file = "cryptography-3.1.1-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:89aceb31cd5f9fc2449fe8cf3810797ca52b65f1489002d58fe190bfb265c536"}, + {file = "cryptography-3.1.1-cp35-cp35m-win32.whl", hash = "sha256:559d622aef2a2dff98a892eef321433ba5bc55b2485220a8ca289c1ecc2bd54f"}, + {file = "cryptography-3.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:451cdf60be4dafb6a3b78802006a020e6cd709c22d240f94f7a0696240a17154"}, + {file = "cryptography-3.1.1-cp36-abi3-win32.whl", hash = "sha256:762bc5a0df03c51ee3f09c621e1cee64e3a079a2b5020de82f1613873d79ee70"}, + {file = "cryptography-3.1.1-cp36-abi3-win_amd64.whl", hash = "sha256:b12e715c10a13ca1bd27fbceed9adc8c5ff640f8e1f7ea76416352de703523c8"}, + {file = "cryptography-3.1.1-cp36-cp36m-win32.whl", hash = "sha256:21b47c59fcb1c36f1113f3709d37935368e34815ea1d7073862e92f810dc7499"}, + {file = "cryptography-3.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:48ee615a779ffa749d7d50c291761dc921d93d7cf203dca2db663b4f193f0e49"}, + {file = "cryptography-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:b2bded09c578d19e08bd2c5bb8fed7f103e089752c9cf7ca7ca7de522326e921"}, + {file = "cryptography-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f99317a0fa2e49917689b8cf977510addcfaaab769b3f899b9c481bbd76730c2"}, + {file = "cryptography-3.1.1-cp38-cp38-win32.whl", hash = "sha256:ab010e461bb6b444eaf7f8c813bb716be2d78ab786103f9608ffd37a4bd7d490"}, + {file = "cryptography-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:99d4984aabd4c7182050bca76176ce2dbc9fa9748afe583a7865c12954d714ba"}, + {file = "cryptography-3.1.1.tar.gz", hash = "sha256:9d9fc6a16357965d282dd4ab6531013935425d0dc4950df2e0cf2a1b1ac1017d"}, +] +fabric = [ + {file = "fabric-2.5.0-py2.py3-none-any.whl", hash = "sha256:160331934ea60036604928e792fa8e9f813266b098ef5562aa82b88527740389"}, + {file = "fabric-2.5.0.tar.gz", hash = "sha256:24842d7d51556adcabd885ac3cf5e1df73fc622a1708bf3667bf5927576cdfa6"}, +] +filelock = [ + {file = "filelock-3.0.12-py3-none-any.whl", hash = "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"}, + {file = "filelock-3.0.12.tar.gz", hash = "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59"}, +] +flake8 = [ + {file = "flake8-3.8.4-py2.py3-none-any.whl", hash = "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839"}, + {file = "flake8-3.8.4.tar.gz", hash = "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b"}, +] +flake8-black = [ + {file = "flake8-black-0.2.1.tar.gz", hash = "sha256:f26651bc10db786c03f4093414f7c9ea982ed8a244cec323c984feeffdf4c118"}, +] +flake8-bugbear = [ + {file = "flake8-bugbear-20.1.4.tar.gz", hash = "sha256:bd02e4b009fb153fe6072c31c52aeab5b133d508095befb2ffcf3b41c4823162"}, + {file = "flake8_bugbear-20.1.4-py36.py37.py38-none-any.whl", hash = "sha256:a3ddc03ec28ba2296fc6f89444d1c946a6b76460f859795b35b77d4920a51b63"}, +] +flake8-isort = [ + {file = "flake8-isort-4.0.0.tar.gz", hash = "sha256:2b91300f4f1926b396c2c90185844eb1a3d5ec39ea6138832d119da0a208f4d9"}, + {file = "flake8_isort-4.0.0-py2.py3-none-any.whl", hash = "sha256:729cd6ef9ba3659512dee337687c05d79c78e1215fdf921ed67e5fe46cce2f3c"}, +] +iniconfig = [ + {file = "iniconfig-1.0.1-py3-none-any.whl", hash = "sha256:80cf40c597eb564e86346103f609d74efce0f6b4d4f30ec8ce9e2c26411ba437"}, + {file = "iniconfig-1.0.1.tar.gz", hash = "sha256:e5f92f89355a67de0595932a6c6c02ab4afddc6fcdc0bfc5becd0d60884d3f69"}, +] +invoke = [ + {file = "invoke-1.4.1-py2-none-any.whl", hash = "sha256:93e12876d88130c8e0d7fd6618dd5387d6b36da55ad541481dfa5e001656f134"}, + {file = "invoke-1.4.1-py3-none-any.whl", hash = "sha256:87b3ef9d72a1667e104f89b159eaf8a514dbf2f3576885b2bbdefe74c3fb2132"}, + {file = "invoke-1.4.1.tar.gz", hash = "sha256:de3f23bfe669e3db1085789fd859eb8ca8e0c5d9c20811e2407fa042e8a5e15d"}, +] +isort = [ + {file = "isort-5.6.1-py3-none-any.whl", hash = "sha256:dd3211f513f4a92ec1ec1876fc1dc3c686649c349d49523f5b5adbb0814e5960"}, + {file = "isort-5.6.1.tar.gz", hash = "sha256:2f510f34ae18a8d0958c53eec51ef84fd099f07c4c639676525acbcd7b5bd3ff"}, +] +jedi = [ + {file = "jedi-0.17.2-py2.py3-none-any.whl", hash = "sha256:98cc583fa0f2f8304968199b01b6b4b94f469a1f4a74c1560506ca2a211378b5"}, + {file = "jedi-0.17.2.tar.gz", hash = "sha256:86ed7d9b750603e4ba582ea8edc678657fb4007894a12bcf6f4bb97892f31d20"}, +] +mccabe = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] +mypy = [ + {file = "mypy-0.782-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:2c6cde8aa3426c1682d35190b59b71f661237d74b053822ea3d748e2c9578a7c"}, + {file = "mypy-0.782-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9c7a9a7ceb2871ba4bac1cf7217a7dd9ccd44c27c2950edbc6dc08530f32ad4e"}, + {file = "mypy-0.782-cp35-cp35m-win_amd64.whl", hash = "sha256:c05b9e4fb1d8a41d41dec8786c94f3b95d3c5f528298d769eb8e73d293abc48d"}, + {file = "mypy-0.782-cp36-cp36m-macosx_10_6_x86_64.whl", hash = "sha256:6731603dfe0ce4352c555c6284c6db0dc935b685e9ce2e4cf220abe1e14386fd"}, + {file = "mypy-0.782-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f05644db6779387ccdb468cc47a44b4356fc2ffa9287135d05b70a98dc83b89a"}, + {file = "mypy-0.782-cp36-cp36m-win_amd64.whl", hash = "sha256:b7fbfabdbcc78c4f6fc4712544b9b0d6bf171069c6e0e3cb82440dd10ced3406"}, + {file = "mypy-0.782-cp37-cp37m-macosx_10_6_x86_64.whl", hash = "sha256:3fdda71c067d3ddfb21da4b80e2686b71e9e5c72cca65fa216d207a358827f86"}, + {file = "mypy-0.782-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:d7df6eddb6054d21ca4d3c6249cae5578cb4602951fd2b6ee2f5510ffb098707"}, + {file = "mypy-0.782-cp37-cp37m-win_amd64.whl", hash = "sha256:a4a2cbcfc4cbf45cd126f531dedda8485671545b43107ded25ce952aac6fb308"}, + {file = "mypy-0.782-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6bb93479caa6619d21d6e7160c552c1193f6952f0668cdda2f851156e85186fc"}, + {file = "mypy-0.782-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:81c7908b94239c4010e16642c9102bfc958ab14e36048fa77d0be3289dda76ea"}, + {file = "mypy-0.782-cp38-cp38-win_amd64.whl", hash = "sha256:5dd13ff1f2a97f94540fd37a49e5d255950ebcdf446fb597463a40d0df3fac8b"}, + {file = "mypy-0.782-py3-none-any.whl", hash = "sha256:e0b61738ab504e656d1fe4ff0c0601387a5489ca122d55390ade31f9ca0e252d"}, + {file = "mypy-0.782.tar.gz", hash = "sha256:eff7d4a85e9eea55afa34888dfeaccde99e7520b51f867ac28a48492c0b1130c"}, +] +mypy-extensions = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] +packaging = [ + {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, + {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, +] +paramiko = [ + {file = "paramiko-2.7.2-py2.py3-none-any.whl", hash = "sha256:4f3e316fef2ac628b05097a637af35685183111d4bc1b5979bd397c2ab7b5898"}, + {file = "paramiko-2.7.2.tar.gz", hash = "sha256:7f36f4ba2c0d81d219f4595e35f70d56cc94f9ac40a6acdf51d6ca210ce65035"}, +] +parso = [ + {file = "parso-0.7.1-py2.py3-none-any.whl", hash = "sha256:97218d9159b2520ff45eb78028ba8b50d2bc61dcc062a9682666f2dc4bd331ea"}, + {file = "parso-0.7.1.tar.gz", hash = "sha256:caba44724b994a8a5e086460bb212abc5a8bc46951bf4a9a1210745953622eb9"}, +] +pathspec = [ + {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, + {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, +] +pluggy = [ + {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, + {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, +] +py = [ + {file = "py-1.9.0-py2.py3-none-any.whl", hash = "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2"}, + {file = "py-1.9.0.tar.gz", hash = "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"}, +] +pycodestyle = [ + {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, + {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, +] +pycparser = [ + {file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"}, + {file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"}, +] +pyflakes = [ + {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, + {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, +] +pyls-black = [ + {file = "pyls-black-0.4.6.tar.gz", hash = "sha256:33700e5ed605636ea7ba39188a1362d2f8602f7301f8f2b8544773886f965663"}, + {file = "pyls_black-0.4.6-py3-none-any.whl", hash = "sha256:8f5fb8fed503588c10435d2d48e2c3751437f1bdb8116134b05a4591c4899940"}, +] +pyls-isort = [ + {file = "pyls-isort-0.2.0.tar.gz", hash = "sha256:a6c292332746d3dc690f2a3dcdb9a01d913b9ee8444defe3cbffcddb7e3874eb"}, +] +pyls-mypy = [ + {file = "pyls-mypy-0.1.8.tar.gz", hash = "sha256:3fd83028961f0ca9eb3048b7a01cf42a9e3d46d8ea4935c1424c33da22c3eb03"}, +] +pynacl = [ + {file = "PyNaCl-1.4.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff"}, + {file = "PyNaCl-1.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514"}, + {file = "PyNaCl-1.4.0-cp27-cp27m-win32.whl", hash = "sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574"}, + {file = "PyNaCl-1.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80"}, + {file = "PyNaCl-1.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7"}, + {file = "PyNaCl-1.4.0-cp35-abi3-macosx_10_10_x86_64.whl", hash = "sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122"}, + {file = "PyNaCl-1.4.0-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d"}, + {file = "PyNaCl-1.4.0-cp35-abi3-win32.whl", hash = "sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634"}, + {file = "PyNaCl-1.4.0-cp35-abi3-win_amd64.whl", hash = "sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6"}, + {file = "PyNaCl-1.4.0-cp35-cp35m-win32.whl", hash = "sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4"}, + {file = "PyNaCl-1.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25"}, + {file = "PyNaCl-1.4.0-cp36-cp36m-win32.whl", hash = "sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4"}, + {file = "PyNaCl-1.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6"}, + {file = "PyNaCl-1.4.0-cp37-cp37m-win32.whl", hash = "sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f"}, + {file = "PyNaCl-1.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f"}, + {file = "PyNaCl-1.4.0-cp38-cp38-win32.whl", hash = "sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96"}, + {file = "PyNaCl-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420"}, + {file = "PyNaCl-1.4.0.tar.gz", hash = "sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505"}, +] +pyparsing = [ + {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, + {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, +] +pytest = [ + {file = "pytest-6.1.1-py3-none-any.whl", hash = "sha256:7a8190790c17d79a11f847fba0b004ee9a8122582ebff4729a082c109e81a4c9"}, + {file = "pytest-6.1.1.tar.gz", hash = "sha256:8f593023c1a0f916110285b6efd7f99db07d59546e3d8c36fc60e2ab05d3be92"}, +] +pytest-flake8 = [ + {file = "pytest-flake8-1.0.6.tar.gz", hash = "sha256:1b82bb58c88eb1db40524018d3fcfd0424575029703b4e2d8e3ee873f2b17027"}, + {file = "pytest_flake8-1.0.6-py2.py3-none-any.whl", hash = "sha256:2e91578ecd9b200066f99c1e1de0f510fbb85bcf43712d46ea29fe47607cc234"}, +] +pytest-mypy = [ + {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, + {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, +] +python-jsonrpc-server = [ + {file = "python-jsonrpc-server-0.4.0.tar.gz", hash = "sha256:62c543e541f101ec5b57dc654efc212d2c2e3ea47ff6f54b2e7dcb36ecf20595"}, + {file = "python_jsonrpc_server-0.4.0-py3-none-any.whl", hash = "sha256:e5a908ff182e620aac07db5f57887eeb0afe33993008f57dc1b85b594cea250c"}, +] +python-language-server = [ + {file = "python-language-server-0.35.1.tar.gz", hash = "sha256:6e0c9a3b2ae98e0eb22e98ed6b3c4e190a6bf9e27af53efd2396da60cd92b221"}, + {file = "python_language_server-0.35.1-py2.py3-none-any.whl", hash = "sha256:7051090259e3e81c0cdb140de8e32b8f11219808cda4427e6faf61f9ff9a3bf4"}, +] +regex = [ + {file = "regex-2020.9.27-cp27-cp27m-win32.whl", hash = "sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3"}, + {file = "regex-2020.9.27-cp27-cp27m-win_amd64.whl", hash = "sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b"}, + {file = "regex-2020.9.27-cp36-cp36m-win32.whl", hash = "sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63"}, + {file = "regex-2020.9.27-cp36-cp36m-win_amd64.whl", hash = "sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100"}, + {file = "regex-2020.9.27-cp37-cp37m-win32.whl", hash = "sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707"}, + {file = "regex-2020.9.27-cp37-cp37m-win_amd64.whl", hash = "sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux1_i686.whl", hash = "sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637"}, + {file = "regex-2020.9.27-cp38-cp38-win32.whl", hash = "sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f"}, + {file = "regex-2020.9.27-cp38-cp38-win_amd64.whl", hash = "sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux1_i686.whl", hash = "sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad"}, + {file = "regex-2020.9.27-cp39-cp39-win32.whl", hash = "sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302"}, + {file = "regex-2020.9.27-cp39-cp39-win_amd64.whl", hash = "sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7"}, + {file = "regex-2020.9.27.tar.gz", hash = "sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d"}, +] +rope = [ + {file = "rope-0.18.0.tar.gz", hash = "sha256:786b5c38c530d4846aa68a42604f61b4e69a493390e3ca11b88df0fbfdc3ed04"}, +] +six = [ + {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, + {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, +] +testfixtures = [ + {file = "testfixtures-6.15.0-py2.py3-none-any.whl", hash = "sha256:e17f4f526fc90b0ac9bc7f8ca62b7dec17d9faf3d721f56bda4f0fd94d02f85a"}, + {file = "testfixtures-6.15.0.tar.gz", hash = "sha256:409f77cfbdad822d12a8ce5c4aa8fb4d0bb38073f4a5444fede3702716a2cec2"}, +] +toml = [ + {file = "toml-0.10.1-py2.py3-none-any.whl", hash = "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88"}, + {file = "toml-0.10.1.tar.gz", hash = "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f"}, +] +typed-ast = [ + {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3"}, + {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb"}, + {file = "typed_ast-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919"}, + {file = "typed_ast-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01"}, + {file = "typed_ast-1.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75"}, + {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652"}, + {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7"}, + {file = "typed_ast-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1"}, + {file = "typed_ast-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa"}, + {file = "typed_ast-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614"}, + {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41"}, + {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b"}, + {file = "typed_ast-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe"}, + {file = "typed_ast-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355"}, + {file = "typed_ast-1.4.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6"}, + {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907"}, + {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d"}, + {file = "typed_ast-1.4.1-cp38-cp38-win32.whl", hash = "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c"}, + {file = "typed_ast-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4"}, + {file = "typed_ast-1.4.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34"}, + {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, +] +typing-extensions = [ + {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, + {file = "typing_extensions-3.7.4.3-py3-none-any.whl", hash = "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918"}, + {file = "typing_extensions-3.7.4.3.tar.gz", hash = "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c"}, +] +ujson = [ + {file = "ujson-4.0.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:5fe1536465b1c86e32a47113abd3178001b7c2dcd61f95f336fe2febf4661e74"}, + {file = "ujson-4.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0f412c3f59b1ab0f40018235224ca0cf29232d0201ff5085618565a8a9c810ed"}, + {file = "ujson-4.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4f12b0b4e235b35d49f15227b0a827e614c52dda903c58a8f5523936c233dfc7"}, + {file = "ujson-4.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:7a1545ac2476db4cc1f0f236603ccbb50991fc1bba480cda1bc06348cc2a2bf0"}, + {file = "ujson-4.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:078808c385036cba73cad96f498310c61e9b5ae5ac9ea01e7c3996ece544b556"}, + {file = "ujson-4.0.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:4fe8c6112b732cba5a722f7cbe22f18d405f6f44415794a5b46473a477635233"}, + {file = "ujson-4.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:71703a269f074ff65b9d7746662e4b3e76a4af443e532218af1e8ce15d9b1e7b"}, + {file = "ujson-4.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b87379a3f8046d6d111762d81f3384bf38ab24b1535c841fe867a4a097d84523"}, + {file = "ujson-4.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a79bca47eafb31c74b38e68623bc9b2bb930cb48fab1af31c8f2cb68cf473421"}, + {file = "ujson-4.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:e7ab24942b2d57920d75b817b8eead293026db003247e26f99506bdad86c61b4"}, + {file = "ujson-4.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:51480048373cf97a6b97fcd70c3586ca0a31f27e22ab680fb14c1f22bedbf743"}, + {file = "ujson-4.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c604024bd853b5df6be7d933e934da8dd139e6159564db7c55b92a9937678093"}, + {file = "ujson-4.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:568bb3e7f035006147af4ce3a9ced7d126c92e1a8607c7b2266007b1c1162c53"}, + {file = "ujson-4.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:bd4c77aee3ffb920e2dbc21a9e0c7945a400557ce671cfd57dbd569f5ebc619d"}, + {file = "ujson-4.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c354c1617b0a4378b6279d0cd511b769500cf3fa7c42e8e004cbbbb6b4c2a875"}, + {file = "ujson-4.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:a5200a68f1dcf3ce275e1cefbcfa3914b70c2b5e2f71c2e31556aa1f7244c845"}, + {file = "ujson-4.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a618af22407baeadb3f046f81e7a5ee5e9f8b0b716d2b564f92276a54d26a823"}, + {file = "ujson-4.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:0a2e1b211714eb1ec0772a013ec9967f8f95f21c84e8f46382e9f8a32ae781fe"}, + {file = "ujson-4.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:2b2d9264ac76aeb11f590f7a1ccff0689ba1313adacbb6d38d3b15f21a392897"}, + {file = "ujson-4.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f8a60928737a9a47e692fcd661ef2b5d75ba22c7c930025bd95e338f2a6e15bc"}, + {file = "ujson-4.0.1.tar.gz", hash = "sha256:26cf6241b36ff5ce4539ae687b6b02673109c5e3efc96148806a7873eaa229d3"}, +] diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml new file mode 100644 index 0000000000..10f302440b --- /dev/null +++ b/pytest/pyproject.toml @@ -0,0 +1,43 @@ +[tool.poetry] +name = "pytest-lisa" +version = "0.1.0" +description = "LISA plugin for pytest" +authors = ["Andrew Schwartzmeyer "] +license = "MIT License" + +[tool.poetry.dependencies] +python = "^3.8" +pytest = "^6.1.1" +fabric = "^2.5.0" + +[tool.poetry.dev-dependencies] +black = "^20.8b1" +flake8 = "^3.8.4" +flake8-black = "^0.2.1" +flake8-bugbear = "^20.1.4" +flake8-isort = "^4.0.0" +isort = "^5.6.1" +mypy = "^0.782" +python-language-server = "^0.35.1" +pyls-black = "^0.4.6" +pyls-isort = "^0.2.0" +pyls-mypy = "^0.1.8" +rope = "^0.18.0" +pytest-flake8 = "^1.0.6" +pytest-mypy = "^0.7.0" + +[tool.black] +line-length = 88 +target-version = ['py38'] + +[tool.isort] +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/pytest/pytest.ini b/pytest/pytest.ini new file mode 100644 index 0000000000..91a21cac60 --- /dev/null +++ b/pytest/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +filterwarnings = + error + ignore:the imp module is deprecated in favour of importlib:DeprecationWarning diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py new file mode 100644 index 0000000000..7fe5c022d0 --- /dev/null +++ b/pytest/testsuites/test_lis.py @@ -0,0 +1,30 @@ +from pathlib import Path + +from fabric import Config, Connection # type: ignore + +import pytest + +LINUX_SCRIPTS = Path("../Testscripts/Linux") + + +# TODO: Make the hostname a parameter. +@pytest.fixture +def node() -> Connection: + config = Config(overrides={"run": {"in_stream": False}}) + with Connection("centos", config=config) as connection: + yield connection + + +def test_lis_version(node: Connection) -> None: + # TODO: Include “utils.sh” automatically? Or something... + for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: + node.put(LINUX_SCRIPTS / f) + node.run(f"chmod +x {f}") + node.sudo("yum install -y bc") + # TODO: Fix this PATH issue. + node.run( + "PATH=$PATH:/usr/local/sbin:/usr/sbin ./LIS-VERSION-CHECK.sh", + ) + node.get("state.txt") + with open("state.txt") as f: + assert f.readline().strip() == "TestCompleted" From a5fd9a836aae445baa8ea4b0b588aa56d410b250 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 9 Oct 2020 23:28:13 -0700 Subject: [PATCH 02/84] Make Node fixture more reusable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend Fabric’s Connection class with a new (and simple) command “cat” which return the value of a remote file as a string. Setup a Config for the Connection when creating it that echoes every command, disables the stdin forwarding (since we’re running under Pytest), and fixes the PATH since the remote commands don’t run under a login shell. --- pytest/mypy.ini | 2 +- pytest/testsuites/test_lis.py | 43 +++++++++++++++++++++++++---------- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/pytest/mypy.ini b/pytest/mypy.ini index b5a41dd5b5..85e42268e0 100644 --- a/pytest/mypy.ini +++ b/pytest/mypy.ini @@ -4,7 +4,7 @@ pretty = True warn_unused_configs = True disallow_any_generics = True -disallow_subclassing_any = True +disallow_subclassing_any = False disallow_untyped_calls = True disallow_untyped_defs = True disallow_incomplete_defs = True diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index 7fe5c022d0..f36423caa1 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -1,4 +1,6 @@ +from io import BytesIO from pathlib import Path +from typing import Iterator from fabric import Config, Connection # type: ignore @@ -7,24 +9,41 @@ LINUX_SCRIPTS = Path("../Testscripts/Linux") +class Node(Connection): + """Extends 'fabric.Connection' with our own utilities.""" + + def cat(self, path: str) -> str: + """Gets the value of a remote file without a temporary file.""" + with BytesIO() as buf: + self.get(path, buf) + return buf.getvalue().decode("utf-8").strip() + + # TODO: Make the hostname a parameter. @pytest.fixture -def node() -> Connection: - config = Config(overrides={"run": {"in_stream": False}}) - with Connection("centos", config=config) as connection: - yield connection +def node() -> Iterator[Node]: + """Yields a safe remote Node on which to run commands.""" + config = Config( + overrides={ + "run": { + # Show each command as its run. + "echo": True, + # Disable stdin forwarding. + "in_stream": False, + # Set PATH since it’s not a login shell. + "env": {"PATH": "$PATH:/usr/local/sbin:/usr/sbin"}, + } + } + ) + with Node("centos", config=config, inline_ssh_env=True) as n: + yield n -def test_lis_version(node: Connection) -> None: +def test_lis_driver_version(node: Node) -> None: # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: node.put(LINUX_SCRIPTS / f) node.run(f"chmod +x {f}") node.sudo("yum install -y bc") - # TODO: Fix this PATH issue. - node.run( - "PATH=$PATH:/usr/local/sbin:/usr/sbin ./LIS-VERSION-CHECK.sh", - ) - node.get("state.txt") - with open("state.txt") as f: - assert f.readline().strip() == "TestCompleted" + node.run("./LIS-VERSION-CHECK.sh") + assert node.cat("state.txt") == "TestCompleted" From 1a0463d56d98a666e0ca11514807b311c7518e1d Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 9 Oct 2020 23:48:37 -0700 Subject: [PATCH 03/84] Add 300 second timeout to all tests --- pytest/poetry.lock | 17 ++++++++++++++++- pytest/pyproject.toml | 1 + pytest/pytest.ini | 1 + 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/pytest/poetry.lock b/pytest/poetry.lock index a0a87e8d83..82eabae938 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -472,6 +472,17 @@ filelock = ">=3.0" mypy = {version = ">=0.700", markers = "python_version >= \"3.8\""} pytest = ">=3.5" +[[package]] +name = "pytest-timeout" +version = "1.4.2" +description = "py.test plugin to abort hanging tests" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pytest = ">=3.6.0" + [[package]] name = "python-jsonrpc-server" version = "0.4.0" @@ -587,7 +598,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "40a1da4f76e519932e1c86deaa81cb3331cc282c37ff78f39607936730cbb322" +content-hash = "307896057c574edcbf704e9060caabb3eb14b01c06ba841b8de5c7715ce86ecb" [metadata.files] appdirs = [ @@ -830,6 +841,10 @@ pytest-mypy = [ {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, ] +pytest-timeout = [ + {file = "pytest-timeout-1.4.2.tar.gz", hash = "sha256:20b3113cf6e4e80ce2d403b6fb56e9e1b871b510259206d40ff8d609f48bda76"}, + {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, +] python-jsonrpc-server = [ {file = "python-jsonrpc-server-0.4.0.tar.gz", hash = "sha256:62c543e541f101ec5b57dc654efc212d2c2e3ea47ff6f54b2e7dcb36ecf20595"}, {file = "python_jsonrpc_server-0.4.0-py3-none-any.whl", hash = "sha256:e5a908ff182e620aac07db5f57887eeb0afe33993008f57dc1b85b594cea250c"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index 10f302440b..c083e43bef 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -9,6 +9,7 @@ license = "MIT License" python = "^3.8" pytest = "^6.1.1" fabric = "^2.5.0" +pytest-timeout = "^1.4.2" [tool.poetry.dev-dependencies] black = "^20.8b1" diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 91a21cac60..f3efd3b4f3 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -1,4 +1,5 @@ [pytest] +timeout = 300 filterwarnings = error ignore:the imp module is deprecated in favour of importlib:DeprecationWarning From f3a26e69670224f44ad540ceb0d4b384ac3e9a69 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sun, 11 Oct 2020 20:21:10 -0700 Subject: [PATCH 04/84] Move node implementation to custom plugin --- pytest/conftest.py | 6 ++++++ pytest/node_plugin.py | 37 +++++++++++++++++++++++++++++++++++ pytest/testsuites/test_lis.py | 37 ++--------------------------------- 3 files changed, 45 insertions(+), 35 deletions(-) create mode 100644 pytest/conftest.py create mode 100644 pytest/node_plugin.py diff --git a/pytest/conftest.py b/pytest/conftest.py new file mode 100644 index 0000000000..5f84e8864c --- /dev/null +++ b/pytest/conftest.py @@ -0,0 +1,6 @@ +"""This file sets up custom plugins. + +https://docs.pytest.org/en/stable/writing_plugins.html + +""" +pytest_plugins = "node_plugin" diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py new file mode 100644 index 0000000000..509b7450d4 --- /dev/null +++ b/pytest/node_plugin.py @@ -0,0 +1,37 @@ +"""Pytest plugin implementing a Node fixture for running remote commands.""" +from io import BytesIO +from typing import Iterator + +from fabric import Config, Connection # type: ignore + +import pytest + + +class Node(Connection): + """Extends 'fabric.Connection' with our own utilities.""" + + def cat(self, path: str) -> str: + """Gets the value of a remote file without a temporary file.""" + with BytesIO() as buf: + self.get(path, buf) + return buf.getvalue().decode("utf-8").strip() + + +# TODO: Make the hostname a parameter. +@pytest.fixture +def node() -> Iterator[Node]: + """Yields a safe remote Node on which to run commands.""" + config = Config( + overrides={ + "run": { + # Show each command as its run. + "echo": True, + # Disable stdin forwarding. + "in_stream": False, + # Set PATH since it’s not a login shell. + "env": {"PATH": "$PATH:/usr/local/sbin:/usr/sbin"}, + } + } + ) + with Node("centos", config=config, inline_ssh_env=True) as n: + yield n diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index f36423caa1..a2eeba74a2 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -1,44 +1,11 @@ -from io import BytesIO +"""Runs 'LIS-Tests.xml' using Pytest.""" from pathlib import Path -from typing import Iterator -from fabric import Config, Connection # type: ignore - -import pytest +from node_plugin import Node LINUX_SCRIPTS = Path("../Testscripts/Linux") -class Node(Connection): - """Extends 'fabric.Connection' with our own utilities.""" - - def cat(self, path: str) -> str: - """Gets the value of a remote file without a temporary file.""" - with BytesIO() as buf: - self.get(path, buf) - return buf.getvalue().decode("utf-8").strip() - - -# TODO: Make the hostname a parameter. -@pytest.fixture -def node() -> Iterator[Node]: - """Yields a safe remote Node on which to run commands.""" - config = Config( - overrides={ - "run": { - # Show each command as its run. - "echo": True, - # Disable stdin forwarding. - "in_stream": False, - # Set PATH since it’s not a login shell. - "env": {"PATH": "$PATH:/usr/local/sbin:/usr/sbin"}, - } - } - ) - with Node("centos", config=config, inline_ssh_env=True) as n: - yield n - - def test_lis_driver_version(node: Node) -> None: # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: From 3f8d15e0349db06a323322e61382bf1f5968efbd Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sun, 11 Oct 2020 20:21:20 -0700 Subject: [PATCH 05/84] Shorten tracebacks --- pytest/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest/Makefile b/pytest/Makefile index 5c052c4707..a30b69a904 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -6,7 +6,7 @@ setup: # Run Pytest run: - @poetry run python -X dev -m pytest --flake8 --mypy -rA + @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -rA --tb=short # Print current Python virtualenv venv: From e1a1333b1d6f1f622b79b1613214743cfabf2a1a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sun, 11 Oct 2020 21:37:33 -0700 Subject: [PATCH 06/84] Demo connecting to host based on mark MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This could be extended to instead deploy a host of the specified distro. Most likely we’ll want a command-line parameter that the fixture uses to create a Node with the given requirements, and then tests will be skipped if their requirements aren’t met. Further more, the mark here is very simple. It can instead take keyword arguments, which would map to our metadata. --- pytest/node_plugin.py | 11 ++++++++--- pytest/pytest.ini | 2 ++ pytest/testsuites/test_lis.py | 2 ++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 509b7450d4..4fb2d17eca 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -2,6 +2,7 @@ from io import BytesIO from typing import Iterator +import _pytest from fabric import Config, Connection # type: ignore import pytest @@ -17,9 +18,8 @@ def cat(self, path: str) -> str: return buf.getvalue().decode("utf-8").strip() -# TODO: Make the hostname a parameter. @pytest.fixture -def node() -> Iterator[Node]: +def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: """Yields a safe remote Node on which to run commands.""" config = Config( overrides={ @@ -33,5 +33,10 @@ def node() -> Iterator[Node]: } } ) - with Node("centos", config=config, inline_ssh_env=True) as n: + # Get the host from the test’s marker. + host = "localhost" + marker = request.node.get_closest_marker("host") + if marker is not None: + host = marker.args[0] + with Node(host, config=config, inline_ssh_env=True) as n: yield n diff --git a/pytest/pytest.ini b/pytest/pytest.ini index f3efd3b4f3..68d0e9d973 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -1,4 +1,6 @@ [pytest] +markers = + host timeout = 300 filterwarnings = error diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index a2eeba74a2..b5c6f29204 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -1,11 +1,13 @@ """Runs 'LIS-Tests.xml' using Pytest.""" from pathlib import Path +import pytest from node_plugin import Node LINUX_SCRIPTS = Path("../Testscripts/Linux") +@pytest.mark.host("centos") # type: ignore def test_lis_driver_version(node: Node) -> None: # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: From bc7c4999c270a33c4261c62a4eb016cde471a414 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sun, 11 Oct 2020 21:46:00 -0700 Subject: [PATCH 07/84] Add considered early alternatives --- pytest/README.md | 55 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index 38c1ae798b..c538711d39 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -79,9 +79,7 @@ Other test specific requirements, such as installing software and daemons or downloading files from remote storage, would similarly be implemented via fixtures and shared among tests. -Note that Paramiko is less complex (smaller library footprint) than Fabric, but -is a bit more difficult to use, and doesn’t support reading existing SSH config -files, nor does it support “ProxyJump” which we use heavily. +### Alternatives considered ## pytest-xdist @@ -103,3 +101,54 @@ create those nodes. However, this is only one approach, and we may prefer to run the Python code on the user’s machine, with pytest-lisa instead providing the previously mentioned node fixtures, default marks, and requirements logic. + +## Paramiko instead of Fabric + +The Paramiko library is less complex (smaller library footprint) than Fabric, as +the latter wraps the former, but it is a bit more difficult to use, and doesn’t +support reading existing SSH config files, nor does it support “ProxyJump” which +we use heavily. Fabric instead provides a clean high-level interface for +existing shell commands, handling all the connection abstractions for us. + +It looked a like this: + +```python +from pathlib import Path +from typing import List + +from paramiko import SSHClient + +import pytest + +@pytest.fixture +def node() -> SSHClient: + with SSHClient() as client: + client.load_system_host_keys() + client.connect(hostname="...") + yield client + + +def test_lis_version(node: SSHClient) -> None: + with node.open_sftp() as sftp: + for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: + sftp.put(LINUX_SCRIPTS / f, f) + _, stdout, stderr = node.exec_command("./LIS-VERSION-CHECK.sh") + sftp.get("state.txt", "state.txt") + with Path("state.txt").open as f: + assert f.readline() == "TestCompleted" +``` +## StringIO + +For `Node.cat()` it would seem we could use `StringIO` like so: + +```python +from io import StringIO + +with StringIO() as result: + node.get("state.txt", result) + assert result.getvalue().strip() == "TestCompleted" +``` + +However, the data returned by Paramiko is in bytes, which in Python 3 are not +equivalent to strings, hence the existing implementation which uses `BytesIO` +and decodes the bytes to a string. From cc62e1ac0518749bfb3a55830114a4672d5dc3da Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 12 Oct 2020 11:56:42 -0700 Subject: [PATCH 08/84] Allow untyped decorators As supplying types for these would be supremely annoying. --- pytest/mypy.ini | 2 +- pytest/testsuites/test_lis.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pytest/mypy.ini b/pytest/mypy.ini index 85e42268e0..6b513808fb 100644 --- a/pytest/mypy.ini +++ b/pytest/mypy.ini @@ -9,7 +9,7 @@ disallow_untyped_calls = True disallow_untyped_defs = True disallow_incomplete_defs = True check_untyped_defs = True -disallow_untyped_decorators = True +disallow_untyped_decorators = False no_implicit_optional = True warn_redundant_casts = True warn_unused_ignores = True diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index b5c6f29204..bd1bf0d598 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -7,7 +7,7 @@ LINUX_SCRIPTS = Path("../Testscripts/Linux") -@pytest.mark.host("centos") # type: ignore +@pytest.mark.host("centos") def test_lis_driver_version(node: Node) -> None: # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: From b88ee580e314bdfd6a08105135a77f4de909cb86 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 12 Oct 2020 13:22:45 -0700 Subject: [PATCH 09/84] Setup markers for generation from XML --- pytest/conftest.py | 4 ++++ pytest/node_plugin.py | 3 ++- pytest/pytest.ini | 4 +++- pytest/testsuites/test_lis.py | 13 +++++++------ pytest/testsuites/test_xdp.py | 27 +++++++++++++++++++++++++++ 5 files changed, 43 insertions(+), 8 deletions(-) create mode 100644 pytest/testsuites/test_xdp.py diff --git a/pytest/conftest.py b/pytest/conftest.py index 5f84e8864c..fb5ebaeec6 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -3,4 +3,8 @@ https://docs.pytest.org/en/stable/writing_plugins.html """ +from pathlib import Path + pytest_plugins = "node_plugin" + +LINUX_SCRIPTS = Path("../Testscripts/Linux") diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 4fb2d17eca..888af4358c 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -21,6 +21,7 @@ def cat(self, path: str) -> str: @pytest.fixture def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: """Yields a safe remote Node on which to run commands.""" + # TODO: If test has ‘deploy’ marker, do so. config = Config( overrides={ "run": { @@ -35,7 +36,7 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: ) # Get the host from the test’s marker. host = "localhost" - marker = request.node.get_closest_marker("host") + marker = request.node.get_closest_marker("connect") if marker is not None: host = marker.args[0] with Node(host, config=config, inline_ssh_env=True) as n: diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 68d0e9d973..ca01a9b06f 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -1,6 +1,8 @@ [pytest] markers = - host + lisa + deploy + connect timeout = 300 filterwarnings = error diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index bd1bf0d598..bb050d7d0f 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -1,17 +1,18 @@ """Runs 'LIS-Tests.xml' using Pytest.""" -from pathlib import Path - +import conftest import pytest from node_plugin import Node -LINUX_SCRIPTS = Path("../Testscripts/Linux") - -@pytest.mark.host("centos") +@pytest.mark.lisa( + platform="Azure", category="Functional", area="LIS_DEPLOY", tags=["lis"], priority=0 +) +# @pytest.mark.deploy(setup="OneVM") +@pytest.mark.connect("centos") def test_lis_driver_version(node: Node) -> None: # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: - node.put(LINUX_SCRIPTS / f) + node.put(conftest.LINUX_SCRIPTS / f) node.run(f"chmod +x {f}") node.sudo("yum install -y bc") node.run("./LIS-VERSION-CHECK.sh") diff --git a/pytest/testsuites/test_xdp.py b/pytest/testsuites/test_xdp.py new file mode 100644 index 0000000000..cad6d97b76 --- /dev/null +++ b/pytest/testsuites/test_xdp.py @@ -0,0 +1,27 @@ +"""Runs 'FunctionalTests-XDP.xml' using Pytest.""" + + +import conftest +import pytest +from node_plugin import Node + + +@pytest.mark.lisa( + platform="Azure", + category="Functional", + area="XDP", + tags=["xdp", "network", "hv_netvsc", "sriov"], + priority=0, +) +@pytest.mark.deploy(setup="OneVM2NIC", networking="SRIOV", vm_size="Standard_DS4_v2") +@pytest.mark.skip(reason="Not Implemented") +def test_verify_xdp_compliance(node: Node) -> None: + for f in [ + "xdpdumpsetup.sh", + "xdputils.sh", + "utils.sh", + "enable_passwordless_root.sh", + "enable_root.sh", + ]: + node.put(conftest.LINUX_SCRIPTS / f) + node.run(f"chmod +x {f}") From 38626e72c22b1210e9a7100915a22e4a27b1fbf2 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 12 Oct 2020 13:26:35 -0700 Subject: [PATCH 10/84] Add libvirt note --- pytest/README.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index c538711d39..90b7052530 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -69,15 +69,14 @@ implement (with more as found to be required): Our abstraction would leverage [Fabric](https://docs.fabfile.org/en/stable/index.html), which uses [paramiko](https://docs.paramiko.org/en/stable/) underneath, directly to -implement the SSH commands, and it would use existing modules to deploy -[Azure](https://aka.ms/azsdk/python/all) and AWS nodes. We would need implement -specific logic for Hyper-V and similar platforms where APIs do not currently -exist, and this would be the bulk of our work instead of rewriting a unit test -framework. - -Other test specific requirements, such as installing software and daemons or -downloading files from remote storage, would similarly be implemented via -fixtures and shared among tests. +implement the SSH commands. For deployment logic, it would use existing Python APIs to deploy +[Azure](https://aka.ms/azsdk/python/all) nodes, and for Hyper-V (and other +virtualization platforms), it would use +[libvirt](https://libvirt.org/python.html). + +Other test specific requirements, such as installing software and daemons, +downloading files from remote storage, or checking the state of our Bash test +scripts, would similarly be implemented via fixtures and shared among tests. ### Alternatives considered From b7d0f73f5f9270605e05d5f00d7c2ad099e77629 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 12 Oct 2020 13:36:18 -0700 Subject: [PATCH 11/84] Add pytest-azurepipelines note --- pytest/README.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index 90b7052530..5dc023742b 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -78,9 +78,17 @@ Other test specific requirements, such as installing software and daemons, downloading files from remote storage, or checking the state of our Bash test scripts, would similarly be implemented via fixtures and shared among tests. -### Alternatives considered +### Test result output -## pytest-xdist +Instead of writing our own test result output, we can leverage existing plugins. +For instance, there already exists +[pytest-azurepipelines](https://pypi.org/project/pytest-azurepipelines/) which +transforms results into the format consumed by ADO. It has over 90,000 downloads +a month. We don’t need to rewrite this. + +## Alternatives considered + +### pytest-xdist With the [pytest-xdist plugin](https://github.com/pytest-dev/pytest-xdist) there already exists support for running a folder of tests on an arbitrary remote host From 5285e1c35dbac6a9386a0aaf0461a9f9eba96aca Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 12 Oct 2020 13:36:46 -0700 Subject: [PATCH 12/84] Add skeleton to parse deploy marker --- pytest/node_plugin.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 888af4358c..e493ff793e 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -21,7 +21,21 @@ def cat(self, path: str) -> str: @pytest.fixture def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: """Yields a safe remote Node on which to run commands.""" - # TODO: If test has ‘deploy’ marker, do so. + # TODO: The deploy and connect markers should be mutually + # exclusive. + host = "localhost" + + # Deploy a node. + deploy_marker = request.node.get_closest_marker("deploy") + if deploy_marker: + pass + + # Get the host from the test’s marker. + connect_marker = request.node.get_closest_marker("connect") + if connect_marker: + host = connect_marker.args[0] + + # Yield the configured Node connection. config = Config( overrides={ "run": { @@ -34,10 +48,5 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: } } ) - # Get the host from the test’s marker. - host = "localhost" - marker = request.node.get_closest_marker("connect") - if marker is not None: - host = marker.args[0] with Node(host, config=config, inline_ssh_env=True) as n: yield n From 91a54d8dfd6ed746e4dc71438478786f38108aae Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Mon, 12 Oct 2020 16:10:02 -0700 Subject: [PATCH 13/84] Create and delete VM resource --- pytest/node_plugin.py | 61 ++++++++++++++++++++++++++++++++++- pytest/testsuites/test_xdp.py | 18 ++++++++--- 2 files changed, 74 insertions(+), 5 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index e493ff793e..7e3ae9499a 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -1,13 +1,66 @@ """Pytest plugin implementing a Node fixture for running remote commands.""" +import json from io import BytesIO from typing import Iterator +from uuid import uuid4 import _pytest +import invoke # type: ignore from fabric import Config, Connection # type: ignore +from invoke.runners import Result # type: ignore import pytest +def install_az_cli() -> None: + if not invoke.run("which az", warn=True, echo=False, in_stream=False): + # TODO: Use Invoke for pipes. + invoke.run( + "curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash", + echo=True, + in_stream=False, + ) + # TODO: Login with service principal (az login) and set + # default subscription (az account set -s) using secrets. + + +def deploy_vm( + name: str, + location="westus2", + vm_image="UbuntuLTS", + vm_size="Standard_DS1_v2", + setup="", + networking="", +) -> str: + install_az_cli() + invoke.run( + f"az group create --name {name}-rg --location {location}", + echo=True, + in_stream=False, + ) + vm_command = [ + "az vm create", + f"--resource-group {name}-rg", + f"--name {name}", + f"--image {vm_image}", + f"--size {vm_size}", + "--generate-ssh-keys", + ] + if networking == "SRIOV": + vm_command.append("--accelerated-networking true") + vm_result: Result = invoke.run( + " ".join(vm_command), + echo=True, + in_stream=False, + ) + vm_data = json.loads(vm_result.stdout) + return vm_data["publicIpAddress"] + + +def delete_vm(name: str) -> None: + invoke.run(f"az group delete --name {name}-rg --yes", echo=True) + + class Node(Connection): """Extends 'fabric.Connection' with our own utilities.""" @@ -18,6 +71,7 @@ def cat(self, path: str) -> str: return buf.getvalue().decode("utf-8").strip() +# TODO: Scope this to a module. @pytest.fixture def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: """Yields a safe remote Node on which to run commands.""" @@ -26,9 +80,10 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: host = "localhost" # Deploy a node. + name = f"pytest-{uuid4()}" deploy_marker = request.node.get_closest_marker("deploy") if deploy_marker: - pass + host = deploy_vm(name, **deploy_marker.kwargs) # Get the host from the test’s marker. connect_marker = request.node.get_closest_marker("connect") @@ -48,5 +103,9 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: } } ) + print(f"Host is {host}") with Node(host, config=config, inline_ssh_env=True) as n: yield n + # Clean up! + if deploy_marker: + delete_vm(name) diff --git a/pytest/testsuites/test_xdp.py b/pytest/testsuites/test_xdp.py index cad6d97b76..d386ff2bfc 100644 --- a/pytest/testsuites/test_xdp.py +++ b/pytest/testsuites/test_xdp.py @@ -13,15 +13,25 @@ tags=["xdp", "network", "hv_netvsc", "sriov"], priority=0, ) -@pytest.mark.deploy(setup="OneVM2NIC", networking="SRIOV", vm_size="Standard_DS4_v2") -@pytest.mark.skip(reason="Not Implemented") +@pytest.mark.deploy( + setup="OneVM2NIC", + networking="SRIOV", + vm_image="Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest", + vm_size="Standard_DS4_v2", +) +@pytest.mark.skip(reason="Not Finished") def test_verify_xdp_compliance(node: Node) -> None: for f in [ - "xdpdumpsetup.sh", - "xdputils.sh", "utils.sh", + "XDPDumpSetup.sh", + "XDPUtils.sh", "enable_passwordless_root.sh", "enable_root.sh", ]: node.put(conftest.LINUX_SCRIPTS / f) node.run(f"chmod +x {f}") + node.run("./enable_root.sh") + node.run("./enable_passwordless_root.sh") + synth_interface = node.run("source XDPUtils.sh ; get_extra_synth_nic").stdout + node.run(f"./XDPDumpSetup.sh {node.internal_address} {synth_interface}") + assert node.cat("state.txt") == "TestCompleted" From 1bfad2c54b4f641c3c3974cadae9e0125bc712af Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 12:54:31 -0700 Subject: [PATCH 14/84] Move semantic analysis testing to separate target --- pytest/Makefile | 8 ++++++-- pytest/node_plugin.py | 14 +++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/pytest/Makefile b/pytest/Makefile index a30b69a904..3ca09385c5 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -1,4 +1,4 @@ -all: setup run +all: setup test run # Install Python packages setup: @@ -6,7 +6,11 @@ setup: # Run Pytest run: - @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -rA --tb=short + @poetry run python -m pytest --setup-show -rA --tb=short + +# Run semantic analysis +test: + @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' # Print current Python virtualenv venv: diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 7e3ae9499a..2900100567 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -1,7 +1,7 @@ """Pytest plugin implementing a Node fixture for running remote commands.""" import json from io import BytesIO -from typing import Iterator +from typing import Dict, Iterator from uuid import uuid4 import _pytest @@ -26,11 +26,11 @@ def install_az_cli() -> None: def deploy_vm( name: str, - location="westus2", - vm_image="UbuntuLTS", - vm_size="Standard_DS1_v2", - setup="", - networking="", + location: str = "westus2", + vm_image: str = "UbuntuLTS", + vm_size: str = "Standard_DS1_v2", + setup: str = "", + networking: str = "", ) -> str: install_az_cli() invoke.run( @@ -53,7 +53,7 @@ def deploy_vm( echo=True, in_stream=False, ) - vm_data = json.loads(vm_result.stdout) + vm_data: Dict[str, str] = json.loads(vm_result.stdout) return vm_data["publicIpAddress"] From 3d2b9deb875e7df1729814a3253c60e69c199922 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 15:20:12 -0700 Subject: [PATCH 15/84] Cache deployed VM --- pytest/node_plugin.py | 50 +++++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 2900100567..1897299274 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -1,13 +1,12 @@ """Pytest plugin implementing a Node fixture for running remote commands.""" import json from io import BytesIO -from typing import Dict, Iterator +from typing import Dict, Iterator, Optional, Tuple from uuid import uuid4 import _pytest import invoke # type: ignore from fabric import Config, Connection # type: ignore -from invoke.runners import Result # type: ignore import pytest @@ -25,19 +24,32 @@ def install_az_cli() -> None: def deploy_vm( - name: str, + request: _pytest.fixtures.FixtureRequest, location: str = "westus2", vm_image: str = "UbuntuLTS", vm_size: str = "Standard_DS1_v2", setup: str = "", networking: str = "", -) -> str: +) -> Tuple[str, Dict[str, str]]: + + key = f"{location}/{vm_image}/{vm_size}" + name: Optional[str] = request.config.cache.get(key, None) + if name: + result: Dict[str, str] = request.config.cache.get(name, {}) + assert result, "There was a cache problem, use --cache-clear and try again." + return name, result + + name = f"pytest-{uuid4()}" + request.config.cache.set(key, name) + install_az_cli() + invoke.run( f"az group create --name {name}-rg --location {location}", echo=True, in_stream=False, ) + vm_command = [ "az vm create", f"--resource-group {name}-rg", @@ -48,17 +60,20 @@ def deploy_vm( ] if networking == "SRIOV": vm_command.append("--accelerated-networking true") - vm_result: Result = invoke.run( - " ".join(vm_command), - echo=True, - in_stream=False, + + result: Dict[str, str] = json.loads( + invoke.run( + " ".join(vm_command), + echo=True, + in_stream=False, + ).stdout ) - vm_data: Dict[str, str] = json.loads(vm_result.stdout) - return vm_data["publicIpAddress"] + request.config.cache.set(name, result) + return name, result def delete_vm(name: str) -> None: - invoke.run(f"az group delete --name {name}-rg --yes", echo=True) + invoke.run(f"az group delete --name {name}-rg --yes", echo=True, in_stream=False) class Node(Connection): @@ -71,23 +86,24 @@ def cat(self, path: str) -> str: return buf.getvalue().decode("utf-8").strip() -# TODO: Scope this to a module. @pytest.fixture def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: """Yields a safe remote Node on which to run commands.""" + # TODO: The deploy and connect markers should be mutually # exclusive. host = "localhost" # Deploy a node. - name = f"pytest-{uuid4()}" deploy_marker = request.node.get_closest_marker("deploy") if deploy_marker: - host = deploy_vm(name, **deploy_marker.kwargs) + name, result = deploy_vm(request, **deploy_marker.kwargs) + host = result["publicIpAddress"] # Get the host from the test’s marker. connect_marker = request.node.get_closest_marker("connect") if connect_marker: + name = "local" host = connect_marker.args[0] # Yield the configured Node connection. @@ -103,9 +119,11 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: } } ) - print(f"Host is {host}") + with Node(host, config=config, inline_ssh_env=True) as n: yield n + # Clean up! - if deploy_marker: + # TODO: This logic is wrong. + if request.config.getoption("cacheclear") and name: delete_vm(name) From 099840bf4ac9cf0b877e60e1cb6465b21f04e0b9 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 17:04:52 -0700 Subject: [PATCH 16/84] Enable boot diagnostics when creating a VM --- pytest/node_plugin.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 1897299274..50c1d96445 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -45,18 +45,21 @@ def deploy_vm( install_az_cli() invoke.run( - f"az group create --name {name}-rg --location {location}", + f"az group create -n {name}-rg --location {location}", echo=True, in_stream=False, ) vm_command = [ "az vm create", - f"--resource-group {name}-rg", - f"--name {name}", + f"-g {name}-rg", + f"-n {name}", f"--image {vm_image}", f"--size {vm_size}", "--generate-ssh-keys", + # TODO: Create unique boot diagnostics storage account. + # `az storage account create -g {name}-rg -n pytestbootdiag` + f"--boot-diagnostics-storage pytestbootdiag", ] if networking == "SRIOV": vm_command.append("--accelerated-networking true") @@ -73,7 +76,7 @@ def deploy_vm( def delete_vm(name: str) -> None: - invoke.run(f"az group delete --name {name}-rg --yes", echo=True, in_stream=False) + invoke.run(f"az group delete -n {name}-rg --yes", echo=True, in_stream=False) class Node(Connection): From 3031368440ab91b0538103d25ceecec9b576f193 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 17:05:25 -0700 Subject: [PATCH 17/84] Include /usr/bin etc. in remote path --- pytest/node_plugin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 50c1d96445..66871d37a5 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -118,7 +118,9 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: # Disable stdin forwarding. "in_stream": False, # Set PATH since it’s not a login shell. - "env": {"PATH": "$PATH:/usr/local/sbin:/usr/sbin"}, + "env": { + "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" + }, } } ) From badb77e6afe3923411a0c80af139e4cbcd472a22 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 17:05:54 -0700 Subject: [PATCH 18/84] Add node functions to restart and get boot diagnostics --- pytest/node_plugin.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 66871d37a5..49901aa0d8 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -82,6 +82,18 @@ def delete_vm(name: str) -> None: class Node(Connection): """Extends 'fabric.Connection' with our own utilities.""" + name: str + + def get_boot_diagnostics(self): + """Gets the serial console logs.""" + return self.local( + f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg" + ) + + def platform_restart(self): + """TODO: Should this '--force' and redeploy?""" + return self.local(f"az vm restart -n {self.name} -g {self.name}-rg") + def cat(self, path: str) -> str: """Gets the value of a remote file without a temporary file.""" with BytesIO() as buf: @@ -126,6 +138,7 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: ) with Node(host, config=config, inline_ssh_env=True) as n: + n.name = name yield n # Clean up! From deb469550f3ebee2a59344dd20dd99c2d627052c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 17:06:11 -0700 Subject: [PATCH 19/84] Add a basic smoke test --- pytest/Makefile | 3 ++ pytest/testsuites/test_smoke.py | 51 +++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 pytest/testsuites/test_smoke.py diff --git a/pytest/Makefile b/pytest/Makefile index 3ca09385c5..fd931c8170 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -12,6 +12,9 @@ run: test: @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' +smoke: + @poetry run python -m pytest -rA -k smoke + # Print current Python virtualenv venv: @poetry env list --no-ansi --full-path diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py new file mode 100644 index 0000000000..cbc8dfac82 --- /dev/null +++ b/pytest/testsuites/test_smoke.py @@ -0,0 +1,51 @@ +"""Runs a 'smoke' test for an Azure Linux VM deployment.""" +import socket + +from invoke.runners import Result # type: ignore +from paramiko import SSHException + +import pytest +from node_plugin import Node + + +@pytest.mark.deploy(setup="OneVM", vm_size="Standard_DS2_v2") +def test_smoke(node: Node) -> None: + """Check that a VM can be deployed and is responsive. + + 1. Deploy the VM (via 'node' fixture) and log it. + 2. Ping the VM. + 3. Connect to the VM via SSH. + 4. Attempt to reboot via SSH, otherwise use the platform. + 5. Fetch the serial console logs. + + For commands where we expect a possible non-zero exit code, we + pass 'warn=True' to prevent it from throwing 'UnexpectedExit' and + we instead check its result at the end. + + SSH failures DO NOT fail this test. + TODO: Log warnings instead of printing. + """ + # TODO: Can’t ping by default, need to enable. + ping1_result: Result = node.local(f"ping {node.host} -c 1", warn=True) + + try: + node.run("uptime") # If SSH fails, we catch it. + reboot_result: Result = node.sudo("reboot", warn=True) # Expect -1 + except (TimeoutError, SSHException, socket.error) as e: + print(f"SSH failed '{e}', using platform to reboot...") + node.platform_restart() + + # Try pinging and SSH again. + ping2_result: Result = node.local(f"ping {node.host} -c 1", warn=True) + + try: + node.run("uptime") + except (TimeoutError, SSHException, socket.error) as e: + print(f"SSH failed '{e}' after the reboot.") + + # Always download the serial console logs. + node.get_boot_diagnostics() + + assert ping1_result.ok + assert reboot_result.exited == -1, "Reboot failed, used platform instead" + assert ping2_result.ok From 7f035a4b7bbf7b580fc7f8ccba92df277b9146a2 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 18:59:04 -0700 Subject: [PATCH 20/84] Set default node command timeout to 1 minute --- pytest/node_plugin.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 49901aa0d8..64a179f067 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -133,6 +133,9 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: "env": { "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" }, + # Don’t let remote commands take longer than a minute + # (unless later overridden). + "timeout": 60, } } ) From f4a979a4c89a4a9a63a41e70689542d782a45cd8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 19:15:59 -0700 Subject: [PATCH 21/84] Clean up Invoke configuration --- pytest/node_plugin.py | 61 +++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 64a179f067..28379ba89f 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -5,19 +5,40 @@ from uuid import uuid4 import _pytest -import invoke # type: ignore -from fabric import Config, Connection # type: ignore +from fabric import Connection # type: ignore +from invoke import Config, Context # type: ignore import pytest +# Setup a sane configuration for local and remote commands. +config = Config( + overrides={ + "run": { + # Show each command as its run. + "echo": True, + # Disable stdin forwarding. + "in_stream": False, + # Set PATH since it’s not a login shell. + "env": { + "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" + }, + # Don’t let remote commands take longer than a minute + # (unless later overridden). + "timeout": 60, + } + } +) + +# Provide a configured local Invoke context for running commands +# before establishing a connection. (Use like `local.run(...)`). +local = Context(config=config) + def install_az_cli() -> None: - if not invoke.run("which az", warn=True, echo=False, in_stream=False): + if not local.run("which az", warn=True, echo=False): # TODO: Use Invoke for pipes. - invoke.run( + local.run( "curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash", - echo=True, - in_stream=False, ) # TODO: Login with service principal (az login) and set # default subscription (az account set -s) using secrets. @@ -44,10 +65,8 @@ def deploy_vm( install_az_cli() - invoke.run( + local.run( f"az group create -n {name}-rg --location {location}", - echo=True, - in_stream=False, ) vm_command = [ @@ -65,10 +84,8 @@ def deploy_vm( vm_command.append("--accelerated-networking true") result: Dict[str, str] = json.loads( - invoke.run( + local.run( " ".join(vm_command), - echo=True, - in_stream=False, ).stdout ) request.config.cache.set(name, result) @@ -76,7 +93,7 @@ def deploy_vm( def delete_vm(name: str) -> None: - invoke.run(f"az group delete -n {name}-rg --yes", echo=True, in_stream=False) + local.run(f"az group delete -n {name}-rg --yes") class Node(Connection): @@ -122,24 +139,6 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: host = connect_marker.args[0] # Yield the configured Node connection. - config = Config( - overrides={ - "run": { - # Show each command as its run. - "echo": True, - # Disable stdin forwarding. - "in_stream": False, - # Set PATH since it’s not a login shell. - "env": { - "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" - }, - # Don’t let remote commands take longer than a minute - # (unless later overridden). - "timeout": 60, - } - } - ) - with Node(host, config=config, inline_ssh_env=True) as n: n.name = name yield n From b23d53719837dc730063269905bcea1e886836d9 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 19:24:59 -0700 Subject: [PATCH 22/84] Create boot storage account and resource group automatically --- pytest/node_plugin.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 28379ba89f..112b9ec2cd 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -44,6 +44,17 @@ def install_az_cli() -> None: # default subscription (az account set -s) using secrets. +def create_boot_storage(location: str) -> str: + """Create a separate resource group and storage account for boot diagnostics.""" + account = "pytestbootdiag" + # This command always exits with 0 but returns a string. + if local.run("az group exists -n pytest-lisa").stdout.strip() == "false": + local.run(f"az group create -n pytest-lisa --location {location}") + if not local.run(f"az storage account show -g pytest-lisa -n {account}", warn=True): + local.run(f"az storage account create -g pytest-lisa -n {account}") + return account + + def deploy_vm( request: _pytest.fixtures.FixtureRequest, location: str = "westus2", @@ -64,6 +75,7 @@ def deploy_vm( request.config.cache.set(key, name) install_az_cli() + boot_storage = create_boot_storage(location) local.run( f"az group create -n {name}-rg --location {location}", @@ -75,10 +87,8 @@ def deploy_vm( f"-n {name}", f"--image {vm_image}", f"--size {vm_size}", + f"--boot-diagnostics-storage {boot_storage}", "--generate-ssh-keys", - # TODO: Create unique boot diagnostics storage account. - # `az storage account create -g {name}-rg -n pytestbootdiag` - f"--boot-diagnostics-storage pytestbootdiag", ] if networking == "SRIOV": vm_command.append("--accelerated-networking true") From 0c8e5c598ccbcf7cc51ee4bbd81434606de23b35 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 19:59:49 -0700 Subject: [PATCH 23/84] Check that az cli is logged in with a default subscription --- pytest/node_plugin.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 112b9ec2cd..8573f47488 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -7,6 +7,7 @@ import _pytest from fabric import Connection # type: ignore from invoke import Config, Context # type: ignore +from invoke.runners import Result # type: ignore import pytest @@ -34,14 +35,19 @@ local = Context(config=config) -def install_az_cli() -> None: - if not local.run("which az", warn=True, echo=False): +def check_az_cli() -> None: + if not local.run("which az", warn=True): # TODO: Use Invoke for pipes. local.run( "curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash", ) - # TODO: Login with service principal (az login) and set - # default subscription (az account set -s) using secrets. + + # TODO: Login with service principal (az login) and set + # default subscription (az account set -s) using secrets. + account: Result = local.run("az account show") + assert account.ok, "Please `az login`!" + subs = json.loads(account.stdout) + assert subs["isDefault"], "Please `az account set -s `!" def create_boot_storage(location: str) -> str: @@ -74,7 +80,7 @@ def deploy_vm( name = f"pytest-{uuid4()}" request.config.cache.set(key, name) - install_az_cli() + check_az_cli() boot_storage = create_boot_storage(location) local.run( From 57f05cc0c950a35849d0912ffc9d9862db1241c0 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 20:42:46 -0700 Subject: [PATCH 24/84] Set default node command timeout to 5 minutes instead --- pytest/node_plugin.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 8573f47488..ecc55df97a 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -23,9 +23,9 @@ "env": { "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" }, - # Don’t let remote commands take longer than a minute - # (unless later overridden). - "timeout": 60, + # Don’t let remote commands take longer than five minutes + # (unless later overridden). This is to prevent hangs. + "timeout": 300, } } ) From e782e09e8731fff38838611ca7d9a1967db5c53f Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 21:03:09 -0700 Subject: [PATCH 25/84] Display logged stderr/stdout as it happens --- pytest/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytest/Makefile b/pytest/Makefile index fd931c8170..1c071a30b6 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -6,14 +6,14 @@ setup: # Run Pytest run: - @poetry run python -m pytest --setup-show -rA --tb=short + @poetry run python -m pytest -rA --capture=tee-sys --tb=short # Run semantic analysis test: @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' smoke: - @poetry run python -m pytest -rA -k smoke + @poetry run python -m pytest -rA --capture=tee-sys -k smoke # Print current Python virtualenv venv: From 61b54a22eae23a80c443826494e65a772f6c0b63 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 14 Oct 2020 21:17:59 -0700 Subject: [PATCH 26/84] Fix subtle break in Fabric MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Accidentally eliminated Fabric’s default overrides of Invoke by supplying my own config based on `invoke.Config` to Fabric. Oops. --- pytest/node_plugin.py | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index ecc55df97a..99c591abb2 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -5,34 +5,35 @@ from uuid import uuid4 import _pytest +import fabric +import invoke from fabric import Connection # type: ignore -from invoke import Config, Context # type: ignore +from invoke import Context # type: ignore from invoke.runners import Result # type: ignore import pytest -# Setup a sane configuration for local and remote commands. -config = Config( - overrides={ - "run": { - # Show each command as its run. - "echo": True, - # Disable stdin forwarding. - "in_stream": False, - # Set PATH since it’s not a login shell. - "env": { - "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" - }, - # Don’t let remote commands take longer than five minutes - # (unless later overridden). This is to prevent hangs. - "timeout": 300, - } +# Setup a sane configuration for local and remote commands. Note that +# the defaults between Fabric and Invoke are different, so we use +# their Config classes explicitly. +config = { + "run": { + # Show each command as its run. + "echo": True, + # Disable stdin forwarding. + "in_stream": False, + # Set PATH since it’s not a login shell. + "env": {"PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin"}, + # Don’t let remote commands take longer than five minutes + # (unless later overridden). This is to prevent hangs. + "timeout": 300, } -) +} # Provide a configured local Invoke context for running commands # before establishing a connection. (Use like `local.run(...)`). -local = Context(config=config) +invoke_config = invoke.Config(overrides=config) +local = Context(config=invoke_config) def check_az_cli() -> None: @@ -155,7 +156,8 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: host = connect_marker.args[0] # Yield the configured Node connection. - with Node(host, config=config, inline_ssh_env=True) as n: + fabric_config = fabric.Config(overrides=config) + with Node(host, config=fabric_config, inline_ssh_env=True) as n: n.name = name yield n From 7bdda2b5433a6014d77edf58026d977df499d199 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 12:07:36 -0700 Subject: [PATCH 27/84] Note use of az CLI instead of Azure Python APIs --- pytest/README.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index 5dc023742b..dd7d3cd183 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -66,12 +66,12 @@ implement (with more as found to be required): * Run a command (perhaps asynchronously) on the node using SSH * Download and upload files to the node (with retries and timeouts) -Our abstraction would leverage +Our abstraction leverages [Fabric](https://docs.fabfile.org/en/stable/index.html), which uses [paramiko](https://docs.paramiko.org/en/stable/) underneath, directly to -implement the SSH commands. For deployment logic, it would use existing Python APIs to deploy -[Azure](https://aka.ms/azsdk/python/all) nodes, and for Hyper-V (and other -virtualization platforms), it would use +implement the SSH commands. For deployment logic, it uses the [`az` +CLI](https://aka.ms/azureclidocs), wrapped by Fabric. For Hyper-V (and other +virtualization platforms), it could use [libvirt](https://libvirt.org/python.html). Other test specific requirements, such as installing software and daemons, @@ -88,6 +88,16 @@ a month. We don’t need to rewrite this. ## Alternatives considered +### Azure Python APIs instead of `az` CLI + +We do not use the [Azure Python APIs](https://aka.ms/azsdk/python/all) directly +because they are more complicated (and less documented) than the `az` CLI. Given +Fabric (and its underlying Invoke library), the CLI becomes incredibly easy to +work with. The `az` CLI lead developer states that they have [feature +parity](https://stackoverflow.com/a/50005660/1028665) and that the CLI is more +straightforward to use. Considering our ease-of-maintenance requirement, this +seems the apt choice. + ### pytest-xdist With the [pytest-xdist plugin](https://github.com/pytest-dev/pytest-xdist) there From de022b99debebe394e7fa0eae3ecb552046c633a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 15:50:29 -0700 Subject: [PATCH 28/84] Add a basic self test --- pytest/Makefile | 6 +++++- pytest/node_plugin.py | 3 ++- pytest/selftests/test_basic.py | 7 +++++++ 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 pytest/selftests/test_basic.py diff --git a/pytest/Makefile b/pytest/Makefile index 1c071a30b6..b823ce5adb 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -8,8 +8,12 @@ setup: run: @poetry run python -m pytest -rA --capture=tee-sys --tb=short -# Run semantic analysis +# Run local tests test: + @poetry run python -m pytest -rA --capture=tee-sys --tb=short selftests/ + +# Run semantic analysis +check: @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' smoke: diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 99c591abb2..77a6abab40 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -141,6 +141,7 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: # TODO: The deploy and connect markers should be mutually # exclusive. + name = "local" host = "localhost" # Deploy a node. @@ -152,8 +153,8 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: # Get the host from the test’s marker. connect_marker = request.node.get_closest_marker("connect") if connect_marker: - name = "local" host = connect_marker.args[0] + name = host # Yield the configured Node connection. fabric_config = fabric.Config(overrides=config) diff --git a/pytest/selftests/test_basic.py b/pytest/selftests/test_basic.py new file mode 100644 index 0000000000..a644b137ae --- /dev/null +++ b/pytest/selftests/test_basic.py @@ -0,0 +1,7 @@ +"""These tests are meant to run in a CI environment.""" +from node_plugin import Node + + +def test_basic(node: Node) -> None: + """Basic test which creates a Node connection to 'localhost'.""" + node.local("echo Hello World") From 33ea2dc98710134ab2ef3d72ecc245c3f461a1bb Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 15:51:38 -0700 Subject: [PATCH 29/84] Add a GitHub Action based CI workflow --- .github/.editorconfig | 2 ++ .github/workflows/ci-workflow.yaml | 43 ++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 .github/.editorconfig create mode 100644 .github/workflows/ci-workflow.yaml diff --git a/.github/.editorconfig b/.github/.editorconfig new file mode 100644 index 0000000000..15e6a1f149 --- /dev/null +++ b/.github/.editorconfig @@ -0,0 +1,2 @@ +# Ignore parent project’s config +root = true diff --git a/.github/workflows/ci-workflow.yaml b/.github/workflows/ci-workflow.yaml new file mode 100644 index 0000000000..f852e60dee --- /dev/null +++ b/.github/workflows/ci-workflow.yaml @@ -0,0 +1,43 @@ +name: CI Workflow for LISAv3 via Pytest + +on: + pull_request: + branches: + - pytest/main + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04, windows-2019] + fail-fast: false + steps: + - name: Checkout repository to $GITHUB_WORKSPACE + uses: actions/checkout@v2 + + - name: Setup bootstrap Python + uses: actions/setup-python@v2 + with: + python-version: "3.8" + + - name: Install Poetry for Linux + if: runner.os == 'Linux' + run: | + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - --preview --version 1.1.0b4 + echo "::add-path::$HOME/.poetry/bin" + + - name: Install Poetry for Windows + if: runner.os == 'Windows' + run: | + (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - --preview --version 1.1.0b4 + echo "::add-path::$env:USERPROFILE\.poetry\bin" + + - name: Install Python dependencies + run: cd pytest && make setup + + - name: Run self tests + run: cd pytest && make test + + - name: Run semantic analysis + run: cd pytest && make check From 6976545e1bfc120295b1cf561cbca3c299821560 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 16:05:56 -0700 Subject: [PATCH 30/84] Make az CLI check compatible with Windows --- pytest/node_plugin.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 77a6abab40..bdb008f75b 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -37,12 +37,8 @@ def check_az_cli() -> None: - if not local.run("which az", warn=True): - # TODO: Use Invoke for pipes. - local.run( - "curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash", - ) - + # E.g. on Ubuntu: `curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash` + assert local.run("az --version", warn=True), "Please install the `az` CLI!" # TODO: Login with service principal (az login) and set # default subscription (az account set -s) using secrets. account: Result = local.run("az account show") From fafa30343a72d92373052ce288366289fd575a6d Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 16:10:07 -0700 Subject: [PATCH 31/84] Only set PATH for SSH commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This also required patching the `local()` method of Fabric’s `Connection` class to _not_ replace the environment for local commands, since a Linux PATH on Windows does not work out. --- pytest/node_plugin.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index bdb008f75b..d933a347fc 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -22,14 +22,13 @@ "echo": True, # Disable stdin forwarding. "in_stream": False, - # Set PATH since it’s not a login shell. - "env": {"PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin"}, # Don’t let remote commands take longer than five minutes # (unless later overridden). This is to prevent hangs. "timeout": 300, } } + # Provide a configured local Invoke context for running commands # before establishing a connection. (Use like `local.run(...)`). invoke_config = invoke.Config(overrides=config) @@ -114,6 +113,10 @@ class Node(Connection): name: str + def local(self, *args, **kwargs): + """This patches Fabric's 'local()' function to ignore SSH environment.""" + return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) + def get_boot_diagnostics(self): """Gets the serial console logs.""" return self.local( @@ -153,7 +156,12 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: name = host # Yield the configured Node connection. - fabric_config = fabric.Config(overrides=config) + ssh_config = config.copy() + ssh_config["run"]["env"] = { + # Set PATH since it’s not a login shell. + "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" + } + fabric_config = fabric.Config(overrides=ssh_config) with Node(host, config=fabric_config, inline_ssh_env=True) as n: n.name = name yield n From 9e48f4042c5d7b01394358671ae5978559676c59 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 16:59:42 -0700 Subject: [PATCH 32/84] =?UTF-8?q?Make=20=E2=80=98ping=E2=80=99=20cross-pla?= =?UTF-8?q?tform?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pytest/testsuites/test_smoke.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index cbc8dfac82..d52ed6082b 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -1,4 +1,5 @@ """Runs a 'smoke' test for an Azure Linux VM deployment.""" +import platform import socket from invoke.runners import Result # type: ignore @@ -25,8 +26,10 @@ def test_smoke(node: Node) -> None: SSH failures DO NOT fail this test. TODO: Log warnings instead of printing. """ + # TODO: Move to ‘Node.ping()’ + ping_flag = "-c 1" if platform.system() == "Linux" else "-n 1" # TODO: Can’t ping by default, need to enable. - ping1_result: Result = node.local(f"ping {node.host} -c 1", warn=True) + ping1_result: Result = node.local(f"ping {ping_flag} {node.host}", warn=True) try: node.run("uptime") # If SSH fails, we catch it. @@ -36,7 +39,7 @@ def test_smoke(node: Node) -> None: node.platform_restart() # Try pinging and SSH again. - ping2_result: Result = node.local(f"ping {node.host} -c 1", warn=True) + ping2_result: Result = node.local(f"ping {ping_flag} {node.host}", warn=True) try: node.run("uptime") From b700a0d8139df49f4bc5570bfd0c0ea6fcd82ce8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 17:48:38 -0700 Subject: [PATCH 33/84] Fix types --- pytest/node_plugin.py | 29 +++++++++++++++-------------- pytest/testsuites/test_smoke.py | 2 +- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index d933a347fc..38cfe6f81f 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -1,14 +1,14 @@ """Pytest plugin implementing a Node fixture for running remote commands.""" import json from io import BytesIO -from typing import Dict, Iterator, Optional, Tuple +from typing import Any, Dict, Iterator, Optional, Tuple from uuid import uuid4 import _pytest -import fabric -import invoke -from fabric import Connection # type: ignore -from invoke import Context # type: ignore +import fabric # type: ignore +import invoke # type: ignore +from fabric import Connection +from invoke import Context from invoke.runners import Result # type: ignore import pytest @@ -67,14 +67,15 @@ def deploy_vm( ) -> Tuple[str, Dict[str, str]]: key = f"{location}/{vm_image}/{vm_size}" - name: Optional[str] = request.config.cache.get(key, None) + name: Optional[str] = request.config.cache.get(key, None) # type: ignore + result: Dict[str, str] = dict() if name: - result: Dict[str, str] = request.config.cache.get(name, {}) + result = request.config.cache.get(name, {}) # type: ignore assert result, "There was a cache problem, use --cache-clear and try again." return name, result name = f"pytest-{uuid4()}" - request.config.cache.set(key, name) + request.config.cache.set(key, name) # type: ignore check_az_cli() boot_storage = create_boot_storage(location) @@ -95,12 +96,12 @@ def deploy_vm( if networking == "SRIOV": vm_command.append("--accelerated-networking true") - result: Dict[str, str] = json.loads( + result = json.loads( local.run( " ".join(vm_command), ).stdout ) - request.config.cache.set(name, result) + request.config.cache.set(name, result) # type: ignore return name, result @@ -113,17 +114,17 @@ class Node(Connection): name: str - def local(self, *args, **kwargs): + def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) - def get_boot_diagnostics(self): + def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" return self.local( f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg" ) - def platform_restart(self): + def platform_restart(self) -> Result: """TODO: Should this '--force' and redeploy?""" return self.local(f"az vm restart -n {self.name} -g {self.name}-rg") @@ -157,7 +158,7 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: # Yield the configured Node connection. ssh_config = config.copy() - ssh_config["run"]["env"] = { + ssh_config["run"]["env"] = { # type: ignore # Set PATH since it’s not a login shell. "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" } diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index d52ed6082b..1fe16cad04 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -3,7 +3,7 @@ import socket from invoke.runners import Result # type: ignore -from paramiko import SSHException +from paramiko import SSHException # type: ignore import pytest from node_plugin import Node From e9e8c5cfc6e278a5650a653bbe69992a06f28ad8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 18:11:24 -0700 Subject: [PATCH 34/84] Add tenacity package --- pytest/poetry.lock | 20 +++++++++++++++++++- pytest/pyproject.toml | 1 + 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pytest/poetry.lock b/pytest/poetry.lock index 82eabae938..ac379eee62 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -550,6 +550,20 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tenacity" +version = "6.2.0" +description = "Retry code until it succeeds" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = ">=1.9.0" + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + [[package]] name = "testfixtures" version = "6.15.0" @@ -598,7 +612,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "307896057c574edcbf704e9060caabb3eb14b01c06ba841b8de5c7715ce86ecb" +content-hash = "42ece43921b68dfda0693eb450c7da2797f9971ee0824cc4ab7752691fd71552" [metadata.files] appdirs = [ @@ -889,6 +903,10 @@ six = [ {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, ] +tenacity = [ + {file = "tenacity-6.2.0-py2.py3-none-any.whl", hash = "sha256:5a5d3dcd46381abe8b4f82b5736b8726fd3160c6c7161f53f8af7f1eb9b82173"}, + {file = "tenacity-6.2.0.tar.gz", hash = "sha256:29ae90e7faf488a8628432154bb34ace1cca58244c6ea399fd33f066ac71339a"}, +] testfixtures = [ {file = "testfixtures-6.15.0-py2.py3-none-any.whl", hash = "sha256:e17f4f526fc90b0ac9bc7f8ca62b7dec17d9faf3d721f56bda4f0fd94d02f85a"}, {file = "testfixtures-6.15.0.tar.gz", hash = "sha256:409f77cfbdad822d12a8ce5c4aa8fb4d0bb38073f4a5444fede3702716a2cec2"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index c083e43bef..68b90e4371 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -10,6 +10,7 @@ python = "^3.8" pytest = "^6.1.1" fabric = "^2.5.0" pytest-timeout = "^1.4.2" +tenacity = "^6.2.0" [tool.poetry.dev-dependencies] black = "^20.8b1" From 7811037e835815e0edb9eea7e4d9514f3561593f Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 18:11:31 -0700 Subject: [PATCH 35/84] Add retry to get boot diagnostics with exponential wait --- pytest/node_plugin.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 38cfe6f81f..4d4bbc91c0 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -10,6 +10,7 @@ from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore +from tenacity import retry, stop_after_delay, wait_exponential import pytest @@ -118,6 +119,7 @@ def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) + @retry(wait=wait_exponential(), stop=stop_after_delay(60)) def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" return self.local( From e88cdae2708b1db5af5dddcbbc6542c3e5f757b4 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 15 Oct 2020 18:25:11 -0700 Subject: [PATCH 36/84] Ignore unclosed file/socket resource warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Need to figure out the unclosed files at some point, but punting. The unclosed socket happens because Paramiko doesn’t close sockets when connections abruptly end, which we’ll need to fix upstream. --- pytest/pytest.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/pytest/pytest.ini b/pytest/pytest.ini index ca01a9b06f..eb42e41f02 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -6,4 +6,5 @@ markers = timeout = 300 filterwarnings = error + ignore:unclosed:ResourceWarning ignore:the imp module is deprecated in favour of importlib:DeprecationWarning From 62a87f0422021d85a989e244c98846ca76721474 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 10:57:35 -0700 Subject: [PATCH 37/84] Fix VM caching --- pytest/conftest.py | 17 +++++++++ pytest/node_plugin.py | 83 ++++++++++++++++++++++++++----------------- 2 files changed, 68 insertions(+), 32 deletions(-) diff --git a/pytest/conftest.py b/pytest/conftest.py index fb5ebaeec6..80269b41ab 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -5,6 +5,23 @@ """ from pathlib import Path +from _pytest.config.argparsing import Parser + pytest_plugins = "node_plugin" + +def pytest_addoption(parser: Parser) -> None: + """Pytest hook for adding arbitrary CLI options. + + https://docs.pytest.org/en/latest/example/simple.html + + """ + parser.addoption( + "--keep-vms", + action="store_true", + default=False, + help="Keeps deployed VMs cached between test runs, useful for developers.", + ) + + LINUX_SCRIPTS = Path("../Testscripts/Linux") diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 4d4bbc91c0..bd9618e49c 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -37,6 +37,7 @@ def check_az_cli() -> None: + """Assert that the `az` CLI is installed and logged in.""" # E.g. on Ubuntu: `curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash` assert local.run("az --version", warn=True), "Please install the `az` CLI!" # TODO: Login with service principal (az login) and set @@ -59,25 +60,22 @@ def create_boot_storage(location: str) -> str: def deploy_vm( - request: _pytest.fixtures.FixtureRequest, + name: str, location: str = "westus2", vm_image: str = "UbuntuLTS", vm_size: str = "Standard_DS1_v2", setup: str = "", networking: str = "", ) -> Tuple[str, Dict[str, str]]: + """Given deployment info, deploy a new VM. - key = f"{location}/{vm_image}/{vm_size}" - name: Optional[str] = request.config.cache.get(key, None) # type: ignore - result: Dict[str, str] = dict() - if name: - result = request.config.cache.get(name, {}) # type: ignore - assert result, "There was a cache problem, use --cache-clear and try again." - return name, result - - name = f"pytest-{uuid4()}" - request.config.cache.set(key, name) # type: ignore + TODO: This along with the functions it calls are Azure specific + and so would be refactored to support other platforms. Hence it + returns both the host and the deployment data so that calling + functions don't have to know which field in the data corresponds + to the host. + """ check_az_cli() boot_storage = create_boot_storage(location) @@ -97,16 +95,14 @@ def deploy_vm( if networking == "SRIOV": vm_command.append("--accelerated-networking true") - result = json.loads( - local.run( - " ".join(vm_command), - ).stdout - ) - request.config.cache.set(name, result) # type: ignore - return name, result + data: Dict[str, str] = json.loads(local.run(" ".join(vm_command)).stdout) + host = data["publicIpAddress"] + return host, data def delete_vm(name: str) -> None: + """Delete the entire allocated resource group.""" + # TODO: Maybe don’t wait for this command to complete. local.run(f"az group delete -n {name}-rg --yes") @@ -114,6 +110,7 @@ class Node(Connection): """Extends 'fabric.Connection' with our own utilities.""" name: str + data: Dict[str, str] def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" @@ -139,23 +136,44 @@ def cat(self, path: str) -> str: @pytest.fixture def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: - """Yields a safe remote Node on which to run commands.""" + """Yields a safe remote Node on which to run commands. - # TODO: The deploy and connect markers should be mutually - # exclusive. - name = "local" - host = "localhost" + TODO: Currently this also manages the caching of the deployed VMs. + However, we should make a node pool (perhaps a session-scoped + fixture) which caches and deploys VMs, leaving this to perform its + original work as a connection creator. - # Deploy a node. + """ deploy_marker = request.node.get_closest_marker("deploy") - if deploy_marker: - name, result = deploy_vm(request, **deploy_marker.kwargs) - host = result["publicIpAddress"] - - # Get the host from the test’s marker. connect_marker = request.node.get_closest_marker("connect") - if connect_marker: + + data: Dict[str, str] = dict() + name: Optional[str] = None + host: Optional[str] = None + + # TODO: The deploy and connect markers should be mutually + # exclusive. + if deploy_marker: + # NOTE: https://docs.pytest.org/en/stable/cache.html + key = "/".join(["node"] + list(filter(None, deploy_marker.kwargs.values()))) + data = request.config.cache.get(key, None) # type: ignore + if not data: + # Cache miss, deploy new node... + name = f"pytest-{uuid4()}" + host, data = deploy_vm(name, **deploy_marker.kwargs) + data["name"] = name + data["host"] = host + request.config.cache.set(key, data) # type: ignore + name = data["name"] + host = data["host"] + elif connect_marker: + # Get the host from the test’s marker. host = connect_marker.args[0] + name = f"pre-deployed:{host}" + else: + # NOTE: This still uses SSH so the localhost must be + # connectable. + host = "localhost" name = host # Yield the configured Node connection. @@ -167,9 +185,10 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: fabric_config = fabric.Config(overrides=ssh_config) with Node(host, config=fabric_config, inline_ssh_env=True) as n: n.name = name + n.data = data yield n # Clean up! - # TODO: This logic is wrong. - if request.config.getoption("cacheclear") and name: + if not request.config.getoption("keep_vms") and key: delete_vm(name) + request.config.cache.set(key, None) # type: ignore From e7e4d25daab8f806ccfab04936bc3e09baa6199d Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 11:20:14 -0700 Subject: [PATCH 38/84] Clean up types --- pytest/node_plugin.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index bd9618e49c..e94b04ffc0 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -1,19 +1,25 @@ """Pytest plugin implementing a Node fixture for running remote commands.""" +from __future__ import annotations + import json +import typing from io import BytesIO -from typing import Any, Dict, Iterator, Optional, Tuple from uuid import uuid4 -import _pytest import fabric # type: ignore import invoke # type: ignore from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore -from tenacity import retry, stop_after_delay, wait_exponential +from tenacity import retry, stop_after_delay, wait_exponential # type: ignore import pytest +if typing.TYPE_CHECKING: + from typing import Any, Dict, Iterator, Optional, Tuple + + from _pytest.fixtures import FixtureRequest + # Setup a sane configuration for local and remote commands. Note that # the defaults between Fabric and Invoke are different, so we use # their Config classes explicitly. @@ -135,7 +141,7 @@ def cat(self, path: str) -> str: @pytest.fixture -def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: +def node(request: FixtureRequest) -> Iterator[Node]: """Yields a safe remote Node on which to run commands. TODO: Currently this also manages the caching of the deployed VMs. @@ -156,14 +162,15 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: if deploy_marker: # NOTE: https://docs.pytest.org/en/stable/cache.html key = "/".join(["node"] + list(filter(None, deploy_marker.kwargs.values()))) - data = request.config.cache.get(key, None) # type: ignore + assert request.config.cache is not None + data = request.config.cache.get(key, None) if not data: # Cache miss, deploy new node... name = f"pytest-{uuid4()}" host, data = deploy_vm(name, **deploy_marker.kwargs) data["name"] = name data["host"] = host - request.config.cache.set(key, data) # type: ignore + request.config.cache.set(key, data) name = data["name"] host = data["host"] elif connect_marker: @@ -177,8 +184,8 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: name = host # Yield the configured Node connection. - ssh_config = config.copy() - ssh_config["run"]["env"] = { # type: ignore + ssh_config: Dict[str, Any] = config.copy() + ssh_config["run"]["env"] = { # Set PATH since it’s not a login shell. "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" } @@ -191,4 +198,5 @@ def node(request: _pytest.fixtures.FixtureRequest) -> Iterator[Node]: # Clean up! if not request.config.getoption("keep_vms") and key: delete_vm(name) - request.config.cache.set(key, None) # type: ignore + assert request.config.cache is not None + request.config.cache.set(key, None) From ba26897a81c4839ad282d7895ae1f2ac5f8f20ed Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 15:02:54 -0700 Subject: [PATCH 39/84] Enable all junit logging --- pytest/pytest.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/pytest/pytest.ini b/pytest/pytest.ini index eb42e41f02..73894c5bf1 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -3,6 +3,7 @@ markers = lisa deploy connect +junit_logging = all timeout = 300 filterwarnings = error From 013b9c4029b6d8d281b9b16a16fb3239c156818f Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 15:03:25 -0700 Subject: [PATCH 40/84] Add logging to node plugin --- pytest/node_plugin.py | 20 +++++++++++++++----- pytest/pytest.ini | 2 ++ 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index e94b04ffc0..63082a00f3 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -2,6 +2,7 @@ from __future__ import annotations import json +import logging import typing from io import BytesIO from uuid import uuid4 @@ -50,8 +51,11 @@ def check_az_cli() -> None: # default subscription (az account set -s) using secrets. account: Result = local.run("az account show") assert account.ok, "Please `az login`!" - subs = json.loads(account.stdout) - assert subs["isDefault"], "Please `az account set -s `!" + sub = json.loads(account.stdout) + assert sub["isDefault"], "Please `az account set -s `!" + logging.info( + f"Using account '{sub['user']['name']}' with subscription '{sub['name']}'" + ) def create_boot_storage(location: str) -> str: @@ -85,10 +89,13 @@ def deploy_vm( check_az_cli() boot_storage = create_boot_storage(location) - local.run( - f"az group create -n {name}-rg --location {location}", + logging.info( + f"Deploying VM to resource group '{name}-rg' in '{location}' " + "with image '{vm_image}' and size '{vm_size}'" ) + local.run(f"az group create -n {name}-rg --location {location}") + vm_command = [ "az vm create", f"-g {name}-rg", @@ -109,6 +116,7 @@ def deploy_vm( def delete_vm(name: str) -> None: """Delete the entire allocated resource group.""" # TODO: Maybe don’t wait for this command to complete. + logging.info(f"Deleting resource group '{name}-rg'") local.run(f"az group delete -n {name}-rg --yes") @@ -164,7 +172,9 @@ def node(request: FixtureRequest) -> Iterator[Node]: key = "/".join(["node"] + list(filter(None, deploy_marker.kwargs.values()))) assert request.config.cache is not None data = request.config.cache.get(key, None) - if not data: + if data: + logging.info(f"Reusing node for cached key '{key}'") + else: # Cache miss, deploy new node... name = f"pytest-{uuid4()}" host, data = deploy_vm(name, **deploy_marker.kwargs) diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 73894c5bf1..890ddea1a0 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -3,6 +3,8 @@ markers = lisa deploy connect +log_cli = true +log_cli_level = INFO junit_logging = all timeout = 300 filterwarnings = From dfb6885b07d46e62175ee20fe3a523030613abf3 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 15:05:40 -0700 Subject: [PATCH 41/84] Split Node scopes to function and class fixtures --- pytest/node_plugin.py | 49 ++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 63082a00f3..ff31286cc5 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -91,7 +91,7 @@ def deploy_vm( logging.info( f"Deploying VM to resource group '{name}-rg' in '{location}' " - "with image '{vm_image}' and size '{vm_size}'" + f"with image '{vm_image}' and size '{vm_size}'" ) local.run(f"az group create -n {name}-rg --location {location}") @@ -148,8 +148,40 @@ def cat(self, path: str) -> str: return buf.getvalue().decode("utf-8").strip() -@pytest.fixture +@pytest.fixture(scope="function") def node(request: FixtureRequest) -> Iterator[Node]: + key, name, host, data, fabric_config = get_node(request) + with Node(host, config=fabric_config, inline_ssh_env=True) as n: + n.name = name + n.data = data + yield n + + # Clean up! + if not request.config.getoption("keep_vms") and key: + delete_vm(name) + assert request.config.cache is not None + request.config.cache.set(key, None) + + +@pytest.fixture(scope="class") +def class_node(request: FixtureRequest) -> Iterator[None]: + key, name, host, data, fabric_config = get_node(request) + with Node(host, config=fabric_config, inline_ssh_env=True) as n: + n.name = name + n.data = data + request.cls.n = n + yield + + # Clean up! + if not request.config.getoption("keep_vms") and key: + delete_vm(name) + assert request.config.cache is not None + request.config.cache.set(key, None) + + +def get_node( + request: FixtureRequest, +) -> Tuple[Optional[str], str, Optional[str], Dict[str, str], fabric.Config]: """Yields a safe remote Node on which to run commands. TODO: Currently this also manages the caching of the deployed VMs. @@ -157,10 +189,12 @@ def node(request: FixtureRequest) -> Iterator[Node]: fixture) which caches and deploys VMs, leaving this to perform its original work as a connection creator. + TODO: It's return type is garbage. """ deploy_marker = request.node.get_closest_marker("deploy") connect_marker = request.node.get_closest_marker("connect") + key: Optional[str] = None data: Dict[str, str] = dict() name: Optional[str] = None host: Optional[str] = None @@ -200,13 +234,4 @@ def node(request: FixtureRequest) -> Iterator[Node]: "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" } fabric_config = fabric.Config(overrides=ssh_config) - with Node(host, config=fabric_config, inline_ssh_env=True) as n: - n.name = name - n.data = data - yield n - - # Clean up! - if not request.config.getoption("keep_vms") and key: - delete_vm(name) - assert request.config.cache is not None - request.config.cache.set(key, None) + return key, name, host, data, fabric_config From 308d80c20d7e920042aa6a637d81ebc99d1b143f Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 15:05:55 -0700 Subject: [PATCH 42/84] Split smoke test into component tests with less verbose output --- pytest/Makefile | 2 +- pytest/testsuites/test_smoke.py | 70 ++++++++++++++++++--------------- 2 files changed, 40 insertions(+), 32 deletions(-) diff --git a/pytest/Makefile b/pytest/Makefile index b823ce5adb..90ffb1ca4e 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -17,7 +17,7 @@ check: @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' smoke: - @poetry run python -m pytest -rA --capture=tee-sys -k smoke + @poetry run python -m pytest --quiet --junit-xml=tests.xml --tb=no -rA --show-capture=log -k smoke # Print current Python virtualenv venv: diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 1fe16cad04..5f33d6c24d 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -1,6 +1,8 @@ """Runs a 'smoke' test for an Azure Linux VM deployment.""" +import logging import platform import socket +import time from invoke.runners import Result # type: ignore from paramiko import SSHException # type: ignore @@ -10,7 +12,8 @@ @pytest.mark.deploy(setup="OneVM", vm_size="Standard_DS2_v2") -def test_smoke(node: Node) -> None: +@pytest.mark.usefixtures("class_node") +class TestSmoke: """Check that a VM can be deployed and is responsive. 1. Deploy the VM (via 'node' fixture) and log it. @@ -19,36 +22,41 @@ def test_smoke(node: Node) -> None: 4. Attempt to reboot via SSH, otherwise use the platform. 5. Fetch the serial console logs. - For commands where we expect a possible non-zero exit code, we - pass 'warn=True' to prevent it from throwing 'UnexpectedExit' and - we instead check its result at the end. - - SSH failures DO NOT fail this test. - TODO: Log warnings instead of printing. """ + + n: Node + # TODO: Move to ‘Node.ping()’ ping_flag = "-c 1" if platform.system() == "Linux" else "-n 1" - # TODO: Can’t ping by default, need to enable. - ping1_result: Result = node.local(f"ping {ping_flag} {node.host}", warn=True) - - try: - node.run("uptime") # If SSH fails, we catch it. - reboot_result: Result = node.sudo("reboot", warn=True) # Expect -1 - except (TimeoutError, SSHException, socket.error) as e: - print(f"SSH failed '{e}', using platform to reboot...") - node.platform_restart() - - # Try pinging and SSH again. - ping2_result: Result = node.local(f"ping {ping_flag} {node.host}", warn=True) - - try: - node.run("uptime") - except (TimeoutError, SSHException, socket.error) as e: - print(f"SSH failed '{e}' after the reboot.") - - # Always download the serial console logs. - node.get_boot_diagnostics() - - assert ping1_result.ok - assert reboot_result.exited == -1, "Reboot failed, used platform instead" - assert ping2_result.ok + + def test_ping_1(self) -> None: + # TODO: Can’t ping by default, need to enable. + logging.warning("Expecting ping to fail because it's not enabled yet") + r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) + assert r.ok, f"Pinging {self.n.host} failed" + + def test_ssh_1(self) -> None: + self.n.run("uptime") + + def test_reboot(self) -> None: + try: + # If this succeeds, we should expect the exit code to be -1 + r: Result = self.n.sudo("reboot", warn=True) + except (TimeoutError, SSHException, socket.error) as e: + logging.warning(f"SSH failed '{e}', using platform to reboot") + self.n.platform_restart() + logging.info("Waiting 10 seconds for reboot to finish") + time.sleep(10) + assert r.exited == -1, "While SSH worked, reboot failed" + + def test_ping_2(self) -> None: + # TODO: Can’t ping by default, need to enable. + logging.warning("Expecting ping to fail for the same reason as above") + r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) + assert r.ok, f"Pinging {self.n.host} failed" + + def test_ssh_2(self) -> None: + self.n.run("uptime") + + def test_serial_log(self) -> None: + self.n.get_boot_diagnostics() From a1ed37bcbd09f640083b7c3f1d17506f2420263e Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 16:06:13 -0700 Subject: [PATCH 43/84] =?UTF-8?q?Add=20a=20=E2=80=98clean=E2=80=99=20make?= =?UTF-8?q?=20target=20to=20clear=20the=20cache=20and=20show=20the=20setup?= =?UTF-8?q?=20plan?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pytest/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pytest/Makefile b/pytest/Makefile index 90ffb1ca4e..a1c74c8246 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -16,6 +16,9 @@ test: check: @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' +clean: + @poetry run python -m pytest --cache-clear --setup-plan + smoke: @poetry run python -m pytest --quiet --junit-xml=tests.xml --tb=no -rA --show-capture=log -k smoke From d42ed9fbf52b8f37c01dc274d9188e43472d9639 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 16:19:50 -0700 Subject: [PATCH 44/84] =?UTF-8?q?Don=E2=80=99t=20wait=20for=20deletion=20o?= =?UTF-8?q?f=20resource=20group?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pytest/node_plugin.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index ff31286cc5..3336e88acd 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -115,9 +115,8 @@ def deploy_vm( def delete_vm(name: str) -> None: """Delete the entire allocated resource group.""" - # TODO: Maybe don’t wait for this command to complete. logging.info(f"Deleting resource group '{name}-rg'") - local.run(f"az group delete -n {name}-rg --yes") + local.run(f"az group delete -n {name}-rg --yes --no-wait") class Node(Connection): @@ -148,6 +147,8 @@ def cat(self, path: str) -> str: return buf.getvalue().decode("utf-8").strip() +# TODO: The fixtures need to be fixed up since we now have a pair, one +# for each scope. They need documentation and de-duplication too. @pytest.fixture(scope="function") def node(request: FixtureRequest) -> Iterator[Node]: key, name, host, data, fabric_config = get_node(request) @@ -158,9 +159,9 @@ def node(request: FixtureRequest) -> Iterator[Node]: # Clean up! if not request.config.getoption("keep_vms") and key: - delete_vm(name) assert request.config.cache is not None request.config.cache.set(key, None) + delete_vm(name) @pytest.fixture(scope="class") @@ -174,9 +175,9 @@ def class_node(request: FixtureRequest) -> Iterator[None]: # Clean up! if not request.config.getoption("keep_vms") and key: - delete_vm(name) assert request.config.cache is not None request.config.cache.set(key, None) + delete_vm(name) def get_node( From f415e4b16f9cc3352ad8b7f35a7e73119037a0c5 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 17:14:57 -0700 Subject: [PATCH 45/84] Better output --- pytest/Makefile | 2 +- pytest/node_plugin.py | 15 +++++++++++++-- pytest/pytest.ini | 2 ++ pytest/testsuites/test_smoke.py | 2 -- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/pytest/Makefile b/pytest/Makefile index a1c74c8246..2548396019 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -20,7 +20,7 @@ clean: @poetry run python -m pytest --cache-clear --setup-plan smoke: - @poetry run python -m pytest --quiet --junit-xml=tests.xml --tb=no -rA --show-capture=log -k smoke + @poetry run python -m pytest --quiet --junit-xml=tests.xml --tb=line --show-capture=log -k smoke # Print current Python virtualenv venv: diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 3336e88acd..32dfe89f67 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -90,8 +90,11 @@ def deploy_vm( boot_storage = create_boot_storage(location) logging.info( - f"Deploying VM to resource group '{name}-rg' in '{location}' " - f"with image '{vm_image}' and size '{vm_size}'" + f"""Deploying VM... + Resource Group: '{name}-rg' + Region: '{location}' + Image: '{vm_image}' + Size: '{vm_size}'""" ) local.run(f"az group create -n {name}-rg --location {location}") @@ -171,6 +174,14 @@ def class_node(request: FixtureRequest) -> Iterator[None]: n.name = name n.data = data request.cls.n = n + logging.info(f"Using VM at: '{host}'") + try: + r: Result = n.run("uname -r") + except Exception as e: + logging.warning(f"Kernel Version: Unknown due to '{e}'") + else: + assert r.ok + logging.info(f"Kernel Version: '{r.stdout.strip()}'") yield # Clean up! diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 890ddea1a0..5c6d5212b2 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -5,6 +5,8 @@ markers = connect log_cli = true log_cli_level = INFO +log_cli_format = %(asctime)s %(levelname)s %(message)s +log_cli_date_format = %Y-%m-%d %H:%M:%S junit_logging = all timeout = 300 filterwarnings = diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 5f33d6c24d..0e1f157468 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -31,7 +31,6 @@ class TestSmoke: def test_ping_1(self) -> None: # TODO: Can’t ping by default, need to enable. - logging.warning("Expecting ping to fail because it's not enabled yet") r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) assert r.ok, f"Pinging {self.n.host} failed" @@ -51,7 +50,6 @@ def test_reboot(self) -> None: def test_ping_2(self) -> None: # TODO: Can’t ping by default, need to enable. - logging.warning("Expecting ping to fail for the same reason as above") r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) assert r.ok, f"Pinging {self.n.host} failed" From a26b3d9b5eddd22e0f2901a4152d79dc6115034d Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 22:27:09 -0700 Subject: [PATCH 46/84] Enable ICMP on deployed Azure VMs --- pytest/conftest.py | 2 ++ pytest/node_plugin.py | 26 ++++++++++++++++++++++++++ pytest/testsuites/test_smoke.py | 2 -- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/pytest/conftest.py b/pytest/conftest.py index 80269b41ab..be29b17567 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -22,6 +22,8 @@ def pytest_addoption(parser: Parser) -> None: default=False, help="Keeps deployed VMs cached between test runs, useful for developers.", ) + # TODO: Add “--lisa” (and “--debug” etc.) options which set up our + # defaults, instead of encoding them in the Makefile LINUX_SCRIPTS = Path("../Testscripts/Linux") diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 32dfe89f67..5c9f12979d 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -69,6 +69,27 @@ def create_boot_storage(location: str) -> str: return account +def allow_ping(name: str) -> None: + """Create NSG rules to enable ICMP ping. + + ICMP ping is disallowed by the Azure load balancer by default, but + there’s strong debate about if this is necessary, and our tests + like to check if the host is up using ping, so we create inbound + and outbound rules in the VM's network security group to allow it. + + """ + try: + for d in ["Inbound", "Outbound"]: + local.run( + f"az network nsg rule create --name allow{d}ICMP " + f"--nsg-name {name}NSG --priority 100 --resource-group {name}-rg " + f"--access Allow --direction '{d}' --protocol Icmp " + "--source-port-ranges '*' --destination-port-ranges '*'" + ) + except Exception as e: + logging.warning(f"Failed to create ICMP allow rules in NSG due to '{e}'") + + def deploy_vm( name: str, location: str = "westus2", @@ -108,11 +129,16 @@ def deploy_vm( f"--boot-diagnostics-storage {boot_storage}", "--generate-ssh-keys", ] + # TODO: Support setting up to NICs. if networking == "SRIOV": vm_command.append("--accelerated-networking true") data: Dict[str, str] = json.loads(local.run(" ".join(vm_command)).stdout) host = data["publicIpAddress"] + + allow_ping(name) + # TODO: Enable auto-shutdown 4 hours from deployment. + return host, data diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 0e1f157468..70bbe75af6 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -30,7 +30,6 @@ class TestSmoke: ping_flag = "-c 1" if platform.system() == "Linux" else "-n 1" def test_ping_1(self) -> None: - # TODO: Can’t ping by default, need to enable. r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) assert r.ok, f"Pinging {self.n.host} failed" @@ -49,7 +48,6 @@ def test_reboot(self) -> None: assert r.exited == -1, "While SSH worked, reboot failed" def test_ping_2(self) -> None: - # TODO: Can’t ping by default, need to enable. r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) assert r.ok, f"Pinging {self.n.host} failed" From 981559d9f46da8e161a6b14b8bd8abb0d9094623 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 16 Oct 2020 23:40:43 -0700 Subject: [PATCH 47/84] Add retry with exponential backoff after reboot --- pytest/testsuites/test_smoke.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 70bbe75af6..948818a5f6 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -2,10 +2,10 @@ import logging import platform import socket -import time from invoke.runners import Result # type: ignore from paramiko import SSHException # type: ignore +from tenacity import Retrying, stop_after_delay, wait_exponential # type: ignore import pytest from node_plugin import Node @@ -43,16 +43,24 @@ def test_reboot(self) -> None: except (TimeoutError, SSHException, socket.error) as e: logging.warning(f"SSH failed '{e}', using platform to reboot") self.n.platform_restart() - logging.info("Waiting 10 seconds for reboot to finish") - time.sleep(10) assert r.exited == -1, "While SSH worked, reboot failed" def test_ping_2(self) -> None: - r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) - assert r.ok, f"Pinging {self.n.host} failed" + for attempt in Retrying( + wait=wait_exponential(), stop=stop_after_delay(30) + ): # type: ignore + with attempt: + r: Result = self.n.local( + f"ping {self.ping_flag} {self.n.host}", warn=True + ) + assert r.ok, f"Pinging {self.n.host} failed" def test_ssh_2(self) -> None: - self.n.run("uptime") + for attempt in Retrying( + wait=wait_exponential(), stop=stop_after_delay(30) + ): # type: ignore + with attempt: + self.n.run("uptime") def test_serial_log(self) -> None: self.n.get_boot_diagnostics() From 310732e8d7dccb98c74462dfe5f24dfedb3129a6 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sat, 17 Oct 2020 18:16:21 -0700 Subject: [PATCH 48/84] Replace tenacity with pytest-rerunfailures MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tenacity’s `stop_after_delay` seems to get confused underneath pytest, applying the delay as a total to all code using it, meaning the last test `get_boot_diagnostics` started always failing due to being canceled. There appears to be a similar issue (though fixed) in the library for async functions, where the state on their `RetryState` object doesn’t get reset properly. --- pytest/node_plugin.py | 2 -- pytest/poetry.lock | 35 +++++++++++++++------------------ pytest/pyproject.toml | 2 +- pytest/testsuites/test_smoke.py | 22 ++++++++------------- 4 files changed, 25 insertions(+), 36 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 5c9f12979d..1c4514876e 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -12,7 +12,6 @@ from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore -from tenacity import retry, stop_after_delay, wait_exponential # type: ignore import pytest @@ -158,7 +157,6 @@ def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) - @retry(wait=wait_exponential(), stop=stop_after_delay(60)) def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" return self.local( diff --git a/pytest/poetry.lock b/pytest/poetry.lock index ac379eee62..35ffe4d457 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -472,6 +472,17 @@ filelock = ">=3.0" mypy = {version = ">=0.700", markers = "python_version >= \"3.8\""} pytest = ">=3.5" +[[package]] +name = "pytest-rerunfailures" +version = "9.1.1" +description = "pytest plugin to re-run tests to eliminate flaky failures" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +pytest = ">=5.0" + [[package]] name = "pytest-timeout" version = "1.4.2" @@ -550,20 +561,6 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -[[package]] -name = "tenacity" -version = "6.2.0" -description = "Retry code until it succeeds" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -six = ">=1.9.0" - -[package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] - [[package]] name = "testfixtures" version = "6.15.0" @@ -612,7 +609,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "42ece43921b68dfda0693eb450c7da2797f9971ee0824cc4ab7752691fd71552" +content-hash = "700617dcc49319fa2ef80157ce05864824e81482bd31f36306be622aff7f385c" [metadata.files] appdirs = [ @@ -855,6 +852,10 @@ pytest-mypy = [ {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, ] +pytest-rerunfailures = [ + {file = "pytest-rerunfailures-9.1.1.tar.gz", hash = "sha256:1cb11a17fc121b3918414eb5eaf314ee325f2e693ac7cb3f6abf7560790827f2"}, + {file = "pytest_rerunfailures-9.1.1-py3-none-any.whl", hash = "sha256:2eb7d0ad651761fbe80e064b0fd415cf6730cdbc53c16a145fd84b66143e609f"}, +] pytest-timeout = [ {file = "pytest-timeout-1.4.2.tar.gz", hash = "sha256:20b3113cf6e4e80ce2d403b6fb56e9e1b871b510259206d40ff8d609f48bda76"}, {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, @@ -903,10 +904,6 @@ six = [ {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, ] -tenacity = [ - {file = "tenacity-6.2.0-py2.py3-none-any.whl", hash = "sha256:5a5d3dcd46381abe8b4f82b5736b8726fd3160c6c7161f53f8af7f1eb9b82173"}, - {file = "tenacity-6.2.0.tar.gz", hash = "sha256:29ae90e7faf488a8628432154bb34ace1cca58244c6ea399fd33f066ac71339a"}, -] testfixtures = [ {file = "testfixtures-6.15.0-py2.py3-none-any.whl", hash = "sha256:e17f4f526fc90b0ac9bc7f8ca62b7dec17d9faf3d721f56bda4f0fd94d02f85a"}, {file = "testfixtures-6.15.0.tar.gz", hash = "sha256:409f77cfbdad822d12a8ce5c4aa8fb4d0bb38073f4a5444fede3702716a2cec2"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index 68b90e4371..c14a068ac7 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -10,7 +10,7 @@ python = "^3.8" pytest = "^6.1.1" fabric = "^2.5.0" pytest-timeout = "^1.4.2" -tenacity = "^6.2.0" +pytest-rerunfailures = "^9.1.1" [tool.poetry.dev-dependencies] black = "^20.8b1" diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 948818a5f6..588f8b0a94 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -5,7 +5,6 @@ from invoke.runners import Result # type: ignore from paramiko import SSHException # type: ignore -from tenacity import Retrying, stop_after_delay, wait_exponential # type: ignore import pytest from node_plugin import Node @@ -29,10 +28,12 @@ class TestSmoke: # TODO: Move to ‘Node.ping()’ ping_flag = "-c 1" if platform.system() == "Linux" else "-n 1" + @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ping_1(self) -> None: r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) assert r.ok, f"Pinging {self.n.host} failed" + @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ssh_1(self) -> None: self.n.run("uptime") @@ -45,22 +46,15 @@ def test_reboot(self) -> None: self.n.platform_restart() assert r.exited == -1, "While SSH worked, reboot failed" + @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ping_2(self) -> None: - for attempt in Retrying( - wait=wait_exponential(), stop=stop_after_delay(30) - ): # type: ignore - with attempt: - r: Result = self.n.local( - f"ping {self.ping_flag} {self.n.host}", warn=True - ) - assert r.ok, f"Pinging {self.n.host} failed" + r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) + assert r.ok, f"Pinging {self.n.host} failed" + @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ssh_2(self) -> None: - for attempt in Retrying( - wait=wait_exponential(), stop=stop_after_delay(30) - ): # type: ignore - with attempt: - self.n.run("uptime") + self.n.run("uptime") + @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_serial_log(self) -> None: self.n.get_boot_diagnostics() From 13329149660a87785c6e3b9b89eb014f8f524a8a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sat, 17 Oct 2020 20:50:23 -0700 Subject: [PATCH 49/84] Generate HTML report instead of JUnit (XML) --- pytest/Makefile | 6 +++--- pytest/conftest.py | 4 ++++ pytest/poetry.lock | 33 ++++++++++++++++++++++++++++++++- pytest/pyproject.toml | 1 + pytest/pytest.ini | 1 + 5 files changed, 41 insertions(+), 4 deletions(-) diff --git a/pytest/Makefile b/pytest/Makefile index 2548396019..72cb97df8b 100644 --- a/pytest/Makefile +++ b/pytest/Makefile @@ -10,17 +10,17 @@ run: # Run local tests test: - @poetry run python -m pytest -rA --capture=tee-sys --tb=short selftests/ + @poetry run python -m pytest --html=test.html -rA --capture=tee-sys --tb=short selftests/ # Run semantic analysis check: - @poetry run python -X dev -X tracemalloc -m pytest --flake8 --mypy -m 'flake8 or mypy' + @poetry run python -X dev -X tracemalloc -m pytest --html=check.html --flake8 --mypy -m 'flake8 or mypy' clean: @poetry run python -m pytest --cache-clear --setup-plan smoke: - @poetry run python -m pytest --quiet --junit-xml=tests.xml --tb=line --show-capture=log -k smoke + @poetry run python -m pytest --quiet --html=smoke.html --self-contained-html --tb=line --show-capture=log -k smoke # Print current Python virtualenv venv: diff --git a/pytest/conftest.py b/pytest/conftest.py index be29b17567..68b1af6ba7 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -26,4 +26,8 @@ def pytest_addoption(parser: Parser) -> None: # defaults, instead of encoding them in the Makefile +def pytest_html_report_title(report): # type: ignore + report.title = "LISAv3 (Using Pytest) Results" + + LINUX_SCRIPTS = Path("../Testscripts/Linux") diff --git a/pytest/poetry.lock b/pytest/poetry.lock index 35ffe4d457..9e8582d0cd 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -459,6 +459,29 @@ python-versions = "*" flake8 = ">=3.5" pytest = ">=3.5" +[[package]] +name = "pytest-html" +version = "2.1.1" +description = "pytest plugin for generating HTML reports" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pytest = ">=5.0" +pytest-metadata = "*" + +[[package]] +name = "pytest-metadata" +version = "1.10.0" +description = "pytest plugin for test session metadata" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" + +[package.dependencies] +pytest = ">=2.9.0" + [[package]] name = "pytest-mypy" version = "0.7.0" @@ -609,7 +632,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "700617dcc49319fa2ef80157ce05864824e81482bd31f36306be622aff7f385c" +content-hash = "42edf55535a6b5670f6c14a231f00ae5740743eac5d8599b7e32304b9983049e" [metadata.files] appdirs = [ @@ -848,6 +871,14 @@ pytest-flake8 = [ {file = "pytest-flake8-1.0.6.tar.gz", hash = "sha256:1b82bb58c88eb1db40524018d3fcfd0424575029703b4e2d8e3ee873f2b17027"}, {file = "pytest_flake8-1.0.6-py2.py3-none-any.whl", hash = "sha256:2e91578ecd9b200066f99c1e1de0f510fbb85bcf43712d46ea29fe47607cc234"}, ] +pytest-html = [ + {file = "pytest-html-2.1.1.tar.gz", hash = "sha256:6a4ac391e105e391208e3eb9bd294a60dd336447fd8e1acddff3a6de7f4e57c5"}, + {file = "pytest_html-2.1.1-py2.py3-none-any.whl", hash = "sha256:9e4817e8be8ddde62e8653c8934d0f296b605da3d2277a052f762c56a8b32df2"}, +] +pytest-metadata = [ + {file = "pytest-metadata-1.10.0.tar.gz", hash = "sha256:b7e6e0a45adacb17a03a97bf7a2ef60cc1f4e172bcce9732ce5e814191932315"}, + {file = "pytest_metadata-1.10.0-py2.py3-none-any.whl", hash = "sha256:fcbcc5781aee450107c620c79c57e50796b6777b82b3c504be9cbc3017201169"}, +] pytest-mypy = [ {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index c14a068ac7..4e8b6ef2e0 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -11,6 +11,7 @@ pytest = "^6.1.1" fabric = "^2.5.0" pytest-timeout = "^1.4.2" pytest-rerunfailures = "^9.1.1" +pytest-html = "^2.1.1" [tool.poetry.dev-dependencies] black = "^20.8b1" diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 5c6d5212b2..ee05d83097 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -7,6 +7,7 @@ log_cli = true log_cli_level = INFO log_cli_format = %(asctime)s %(levelname)s %(message)s log_cli_date_format = %Y-%m-%d %H:%M:%S +render_collapsed = true junit_logging = all timeout = 300 filterwarnings = From 5cba575afc001202bec507e4644d663293b5d276 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sat, 17 Oct 2020 20:58:15 -0700 Subject: [PATCH 50/84] Add reports to gitignore --- pytest/.gitignore | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 pytest/.gitignore diff --git a/pytest/.gitignore b/pytest/.gitignore new file mode 100644 index 0000000000..e1711c78f3 --- /dev/null +++ b/pytest/.gitignore @@ -0,0 +1,4 @@ +# Pytest report files +/*.xml +/*.html +/assets From 645785ca62bce01c57281c845a280bc75edad1b8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sat, 17 Oct 2020 22:19:26 -0700 Subject: [PATCH 51/84] Revert "Replace tenacity with pytest-rerunfailures" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit a9e4afffa26cc0d7275e6ef593225419afbaf716. The plugin is incompatible with class fixtures, so when the last test in a test class fails, the fixture is torn down (in this case, deleting the node) before the test is retried, which doesn’t work for us. --- pytest/node_plugin.py | 2 ++ pytest/poetry.lock | 35 ++++++++++++++++++--------------- pytest/pyproject.toml | 2 +- pytest/testsuites/test_smoke.py | 22 +++++++++++++-------- 4 files changed, 36 insertions(+), 25 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 1c4514876e..5c9f12979d 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -12,6 +12,7 @@ from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore +from tenacity import retry, stop_after_delay, wait_exponential # type: ignore import pytest @@ -157,6 +158,7 @@ def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) + @retry(wait=wait_exponential(), stop=stop_after_delay(60)) def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" return self.local( diff --git a/pytest/poetry.lock b/pytest/poetry.lock index 9e8582d0cd..a665674e39 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -495,17 +495,6 @@ filelock = ">=3.0" mypy = {version = ">=0.700", markers = "python_version >= \"3.8\""} pytest = ">=3.5" -[[package]] -name = "pytest-rerunfailures" -version = "9.1.1" -description = "pytest plugin to re-run tests to eliminate flaky failures" -category = "main" -optional = false -python-versions = ">=3.5" - -[package.dependencies] -pytest = ">=5.0" - [[package]] name = "pytest-timeout" version = "1.4.2" @@ -584,6 +573,20 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tenacity" +version = "6.2.0" +description = "Retry code until it succeeds" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = ">=1.9.0" + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + [[package]] name = "testfixtures" version = "6.15.0" @@ -632,7 +635,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "42edf55535a6b5670f6c14a231f00ae5740743eac5d8599b7e32304b9983049e" +content-hash = "b46f526aeb728c87b2bb8e3cfb91d66634e9c81e82c0f70c2e48045eba56c915" [metadata.files] appdirs = [ @@ -883,10 +886,6 @@ pytest-mypy = [ {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, ] -pytest-rerunfailures = [ - {file = "pytest-rerunfailures-9.1.1.tar.gz", hash = "sha256:1cb11a17fc121b3918414eb5eaf314ee325f2e693ac7cb3f6abf7560790827f2"}, - {file = "pytest_rerunfailures-9.1.1-py3-none-any.whl", hash = "sha256:2eb7d0ad651761fbe80e064b0fd415cf6730cdbc53c16a145fd84b66143e609f"}, -] pytest-timeout = [ {file = "pytest-timeout-1.4.2.tar.gz", hash = "sha256:20b3113cf6e4e80ce2d403b6fb56e9e1b871b510259206d40ff8d609f48bda76"}, {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, @@ -935,6 +934,10 @@ six = [ {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, ] +tenacity = [ + {file = "tenacity-6.2.0-py2.py3-none-any.whl", hash = "sha256:5a5d3dcd46381abe8b4f82b5736b8726fd3160c6c7161f53f8af7f1eb9b82173"}, + {file = "tenacity-6.2.0.tar.gz", hash = "sha256:29ae90e7faf488a8628432154bb34ace1cca58244c6ea399fd33f066ac71339a"}, +] testfixtures = [ {file = "testfixtures-6.15.0-py2.py3-none-any.whl", hash = "sha256:e17f4f526fc90b0ac9bc7f8ca62b7dec17d9faf3d721f56bda4f0fd94d02f85a"}, {file = "testfixtures-6.15.0.tar.gz", hash = "sha256:409f77cfbdad822d12a8ce5c4aa8fb4d0bb38073f4a5444fede3702716a2cec2"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index 4e8b6ef2e0..dfa4d7df93 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -10,8 +10,8 @@ python = "^3.8" pytest = "^6.1.1" fabric = "^2.5.0" pytest-timeout = "^1.4.2" -pytest-rerunfailures = "^9.1.1" pytest-html = "^2.1.1" +tenacity = "^6.2.0" [tool.poetry.dev-dependencies] black = "^20.8b1" diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 588f8b0a94..948818a5f6 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -5,6 +5,7 @@ from invoke.runners import Result # type: ignore from paramiko import SSHException # type: ignore +from tenacity import Retrying, stop_after_delay, wait_exponential # type: ignore import pytest from node_plugin import Node @@ -28,12 +29,10 @@ class TestSmoke: # TODO: Move to ‘Node.ping()’ ping_flag = "-c 1" if platform.system() == "Linux" else "-n 1" - @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ping_1(self) -> None: r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) assert r.ok, f"Pinging {self.n.host} failed" - @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ssh_1(self) -> None: self.n.run("uptime") @@ -46,15 +45,22 @@ def test_reboot(self) -> None: self.n.platform_restart() assert r.exited == -1, "While SSH worked, reboot failed" - @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ping_2(self) -> None: - r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) - assert r.ok, f"Pinging {self.n.host} failed" + for attempt in Retrying( + wait=wait_exponential(), stop=stop_after_delay(30) + ): # type: ignore + with attempt: + r: Result = self.n.local( + f"ping {self.ping_flag} {self.n.host}", warn=True + ) + assert r.ok, f"Pinging {self.n.host} failed" - @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_ssh_2(self) -> None: - self.n.run("uptime") + for attempt in Retrying( + wait=wait_exponential(), stop=stop_after_delay(30) + ): # type: ignore + with attempt: + self.n.run("uptime") - @pytest.mark.flaky(reruns=5, reruns_delay=5) def test_serial_log(self) -> None: self.n.get_boot_diagnostics() From bfd45f63efa36c9336428e642f3be5acd3c7123c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Sun, 18 Oct 2020 15:10:43 -0700 Subject: [PATCH 52/84] Document use of Tenacity over pytest-rerunfailures --- pytest/README.md | 16 ++++++++++++++-- pytest/node_plugin.py | 4 ++-- pytest/testsuites/test_smoke.py | 6 +++--- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index dd7d3cd183..b82fbfb7f5 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -119,7 +119,7 @@ However, this is only one approach, and we may prefer to run the Python code on the user’s machine, with pytest-lisa instead providing the previously mentioned node fixtures, default marks, and requirements logic. -## Paramiko instead of Fabric +### Paramiko instead of Fabric The Paramiko library is less complex (smaller library footprint) than Fabric, as the latter wraps the former, but it is a bit more difficult to use, and doesn’t @@ -154,7 +154,8 @@ def test_lis_version(node: SSHClient) -> None: with Path("state.txt").open as f: assert f.readline() == "TestCompleted" ``` -## StringIO + +### StringIO For `Node.cat()` it would seem we could use `StringIO` like so: @@ -169,3 +170,14 @@ with StringIO() as result: However, the data returned by Paramiko is in bytes, which in Python 3 are not equivalent to strings, hence the existing implementation which uses `BytesIO` and decodes the bytes to a string. + +### pytest-rerunfailures instead of Tenacity + +Due to an open +[bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) this popular +Pytest plugin is incompatible with module/class/session fixtures. What this +means is given a class of tests with a class fixture (say a shared `Node`), if +the last test is marked as flaky and is rerun, the class fixture is unexpectedly +torn down and then the test is rerun. That is, the rerun happens too late, and +the test is then performed against a new `Node`. So while slightly more verbose, +we’re back to using [Tenacity](https://github.com/jd/tenacity). diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 5c9f12979d..2eb642ba4d 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -12,7 +12,7 @@ from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore -from tenacity import retry, stop_after_delay, wait_exponential # type: ignore +from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore import pytest @@ -158,7 +158,7 @@ def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) - @retry(wait=wait_exponential(), stop=stop_after_delay(60)) + @retry(wait=wait_exponential(), stop=stop_after_attempt(5)) def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" return self.local( diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 948818a5f6..279858c72b 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -5,7 +5,7 @@ from invoke.runners import Result # type: ignore from paramiko import SSHException # type: ignore -from tenacity import Retrying, stop_after_delay, wait_exponential # type: ignore +from tenacity import Retrying, stop_after_attempt, wait_exponential # type: ignore import pytest from node_plugin import Node @@ -47,7 +47,7 @@ def test_reboot(self) -> None: def test_ping_2(self) -> None: for attempt in Retrying( - wait=wait_exponential(), stop=stop_after_delay(30) + wait=wait_exponential(), stop=stop_after_attempt(5) ): # type: ignore with attempt: r: Result = self.n.local( @@ -57,7 +57,7 @@ def test_ping_2(self) -> None: def test_ssh_2(self) -> None: for attempt in Retrying( - wait=wait_exponential(), stop=stop_after_delay(30) + wait=wait_exponential(), stop=stop_after_attempt(5) ): # type: ignore with attempt: self.n.run("uptime") From 629e6c4c42ea4064821ea650e2ce4f333c5f425c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 20 Oct 2020 11:29:29 -0700 Subject: [PATCH 53/84] Add `ping()` to Node --- pytest/node_plugin.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 2eb642ba4d..46be85a6c7 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -3,6 +3,7 @@ import json import logging +import platform import typing from io import BytesIO from uuid import uuid4 @@ -12,7 +13,7 @@ from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore -from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore +from tenacity import retry, stop_after_delay, wait_exponential # type: ignore import pytest @@ -158,13 +159,18 @@ def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) - @retry(wait=wait_exponential(), stop=stop_after_attempt(5)) + @retry(wait=wait_exponential(), stop=stop_after_delay(60)) def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" return self.local( f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg" ) + @retry(wait=wait_exponential(), stop=stop_after_delay(30)) + def ping(self, **kwargs: Any) -> Result: + flag = "-c 1" if platform.system() == "Linux" else "-n 1" + return self.local(f"ping {flag} {self.host}", **kwargs) + def platform_restart(self) -> Result: """TODO: Should this '--force' and redeploy?""" return self.local(f"az vm restart -n {self.name} -g {self.name}-rg") From 08007f38053a74f4c3a4d0db65080a9ffb96a50a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 20 Oct 2020 13:52:42 -0700 Subject: [PATCH 54/84] Redo smoke test using single function --- pytest/README.md | 16 ++++++- pytest/testsuites/test_smoke.py | 78 +++++++++++++++++---------------- 2 files changed, 55 insertions(+), 39 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index b82fbfb7f5..33770ab600 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -119,6 +119,8 @@ However, this is only one approach, and we may prefer to run the Python code on the user’s machine, with pytest-lisa instead providing the previously mentioned node fixtures, default marks, and requirements logic. +Note that pytest-dist can still be useful for locally running tests in parallel. + ### Paramiko instead of Fabric The Paramiko library is less complex (smaller library footprint) than Fabric, as @@ -171,7 +173,7 @@ However, the data returned by Paramiko is in bytes, which in Python 3 are not equivalent to strings, hence the existing implementation which uses `BytesIO` and decodes the bytes to a string. -### pytest-rerunfailures instead of Tenacity +### Tenacity instead of pytest-rerunfailures Due to an open [bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) this popular @@ -181,3 +183,15 @@ the last test is marked as flaky and is rerun, the class fixture is unexpectedly torn down and then the test is rerun. That is, the rerun happens too late, and the test is then performed against a new `Node`. So while slightly more verbose, we’re back to using [Tenacity](https://github.com/jd/tenacity). + +### Function per test instead of class + +An option I explored to make an “executive summary” of the smoke test was to use +a class where each functionality was tested as individual function (meaning they +could fail independently without failing the whole smoke test), accompanied by a +class-scoped node fixture. This had its advantages, however, it was difficult to +parameterize and also overly verbose. We should instead keep each test as Pytest +intends: as a function. This allows the fixtures to be written in a simpler +manner (not rely on caching between functions) and allows parameterization using +the built-in decorator +[`@pytest.mark.parametrize`](https://docs.pytest.org/en/stable/parametrize.html). diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 279858c72b..315f500c15 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -1,19 +1,16 @@ """Runs a 'smoke' test for an Azure Linux VM deployment.""" import logging -import platform import socket from invoke.runners import Result # type: ignore from paramiko import SSHException # type: ignore -from tenacity import Retrying, stop_after_attempt, wait_exponential # type: ignore import pytest from node_plugin import Node @pytest.mark.deploy(setup="OneVM", vm_size="Standard_DS2_v2") -@pytest.mark.usefixtures("class_node") -class TestSmoke: +def test_smoke(urn: str, node: Node) -> None: """Check that a VM can be deployed and is responsive. 1. Deploy the VM (via 'node' fixture) and log it. @@ -22,45 +19,50 @@ class TestSmoke: 4. Attempt to reboot via SSH, otherwise use the platform. 5. Fetch the serial console logs. - """ + For commands where we expect a possible non-zero exit code, we + pass 'warn=True' to prevent it from throwing 'UnexpectedExit' and + we instead check its result at the end. + + SSH failures DO NOT fail this test. - n: Node + """ + logging.info("Pinging before reboot...") + ping1: Result = node.ping(warn=True) - # TODO: Move to ‘Node.ping()’ - ping_flag = "-c 1" if platform.system() == "Linux" else "-n 1" + ssh_errors = (TimeoutError, SSHException, socket.error) - def test_ping_1(self) -> None: - r: Result = self.n.local(f"ping {self.ping_flag} {self.n.host}", warn=True) - assert r.ok, f"Pinging {self.n.host} failed" + try: + logging.info("SSHing before reboot...") + ssh1: Result = node.run("uptime", warn=True) + except ssh_errors as e: + logging.warning(f"SSH before reboot failed: '{e}'") - def test_ssh_1(self) -> None: - self.n.run("uptime") + try: + logging.info("Rebooting...") + # If this succeeds, we should expect the exit code to be -1 + reboot: Result = node.sudo("reboot", warn=True) + except ssh_errors as e: + logging.warning(f"SSH failed, using platform to reboot: '{e}'") + node.platform_restart() + else: + if reboot.exited != -1: + logging.warning("While SSH worked, 'reboot' command failed") - def test_reboot(self) -> None: - try: - # If this succeeds, we should expect the exit code to be -1 - r: Result = self.n.sudo("reboot", warn=True) - except (TimeoutError, SSHException, socket.error) as e: - logging.warning(f"SSH failed '{e}', using platform to reboot") - self.n.platform_restart() - assert r.exited == -1, "While SSH worked, reboot failed" + logging.info("Pinging after reboot...") + ping2: Result = node.ping(warn=True) - def test_ping_2(self) -> None: - for attempt in Retrying( - wait=wait_exponential(), stop=stop_after_attempt(5) - ): # type: ignore - with attempt: - r: Result = self.n.local( - f"ping {self.ping_flag} {self.n.host}", warn=True - ) - assert r.ok, f"Pinging {self.n.host} failed" + try: + logging.info("SSHing after reboot...") + ssh2: Result = node.run("uptime", warn=True) + except ssh_errors as e: + logging.warning(f"SSH after reboot failed: '{e}'") - def test_ssh_2(self) -> None: - for attempt in Retrying( - wait=wait_exponential(), stop=stop_after_attempt(5) - ): # type: ignore - with attempt: - self.n.run("uptime") + logging.info("Retrieving boot diagnostics...") + node.get_boot_diagnostics() - def test_serial_log(self) -> None: - self.n.get_boot_diagnostics() + assert ping1.ok, f"Pinging {node.host} before reboot failed" + if not ssh1.ok: + logging.warning(f"SSH command '{ssh1.command}' before reboot failed") + assert ping2.ok, f"Pinging {node.host} after reboot failed" + if not ssh2.ok: + logging.warning(f"SSH command '{ssh2.command}' after reboot failed") From e228031e9011ee676a4300ded11d5e733c997648 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 20 Oct 2020 14:53:18 -0700 Subject: [PATCH 55/84] Demo test parameterization using smoke test --- pytest/node_plugin.py | 10 +++++++++- pytest/pytest.ini | 2 +- pytest/testsuites/test_smoke.py | 15 ++++++++++++++- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 46be85a6c7..57897ead64 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -120,6 +120,12 @@ def deploy_vm( ) local.run(f"az group create -n {name}-rg --location {location}") + # TODO: Accept EULA terms when necessary. Like: + # + # local.run(f"az vm image terms accept --urn {vm_image}") + # + # However, this command fails unless the terms exist and have yet + # to be accepted. vm_command = [ "az vm create", @@ -166,7 +172,7 @@ def get_boot_diagnostics(self) -> Result: f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg" ) - @retry(wait=wait_exponential(), stop=stop_after_delay(30)) + @retry(wait=wait_exponential(), stop=stop_after_delay(60)) def ping(self, **kwargs: Any) -> Result: flag = "-c 1" if platform.system() == "Linux" else "-n 1" return self.local(f"ping {flag} {self.host}", **kwargs) @@ -199,6 +205,7 @@ def node(request: FixtureRequest) -> Iterator[Node]: delete_vm(name) +# TODO: Delete this and resurrect at a later date if we need it again. @pytest.fixture(scope="class") def class_node(request: FixtureRequest) -> Iterator[None]: key, name, host, data, fabric_config = get_node(request) @@ -278,4 +285,5 @@ def get_node( "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" } fabric_config = fabric.Config(overrides=ssh_config) + logging.info(f"Using VM at: '{host}'") return key, name, host, data, fabric_config diff --git a/pytest/pytest.ini b/pytest/pytest.ini index ee05d83097..6ade0166fe 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -9,7 +9,7 @@ log_cli_format = %(asctime)s %(levelname)s %(message)s log_cli_date_format = %Y-%m-%d %H:%M:%S render_collapsed = true junit_logging = all -timeout = 300 +timeout = 600 filterwarnings = error ignore:unclosed:ResourceWarning diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 315f500c15..98736d5f8a 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -8,8 +8,21 @@ import pytest from node_plugin import Node +# TODO: This is an example of leveraging Pytest’s parameterization +# support. We can implement a small YAML parser to read a playbook at +# runtime to generate this instead of using the below list. +params = [ + pytest.param(i, marks=pytest.mark.deploy(vm_image=i, vm_size="Standard_DS2_v2")) + for i in [ + "citrix:netscalervpx-130:netscalerbyol:latest", + "audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest", + "credativ:Debian:9:9.0.201706190", + "github:github-enterprise:github-enterprise:latest", + ] +] -@pytest.mark.deploy(setup="OneVM", vm_size="Standard_DS2_v2") + +@pytest.mark.parametrize("urn", params) def test_smoke(urn: str, node: Node) -> None: """Check that a VM can be deployed and is responsive. From ee14936f5eadd7f725451a579d480a00331f3c97 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 20 Oct 2020 16:34:16 -0700 Subject: [PATCH 56/84] Change SSH test to just connecting --- pytest/testsuites/test_smoke.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 98736d5f8a..6e7d523c78 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -46,7 +46,7 @@ def test_smoke(urn: str, node: Node) -> None: try: logging.info("SSHing before reboot...") - ssh1: Result = node.run("uptime", warn=True) + node.open() except ssh_errors as e: logging.warning(f"SSH before reboot failed: '{e}'") @@ -66,7 +66,7 @@ def test_smoke(urn: str, node: Node) -> None: try: logging.info("SSHing after reboot...") - ssh2: Result = node.run("uptime", warn=True) + node.open() except ssh_errors as e: logging.warning(f"SSH after reboot failed: '{e}'") @@ -74,8 +74,4 @@ def test_smoke(urn: str, node: Node) -> None: node.get_boot_diagnostics() assert ping1.ok, f"Pinging {node.host} before reboot failed" - if not ssh1.ok: - logging.warning(f"SSH command '{ssh1.command}' before reboot failed") assert ping2.ok, f"Pinging {node.host} after reboot failed" - if not ssh2.ok: - logging.warning(f"SSH command '{ssh2.command}' after reboot failed") From 91efe4e52e6c797317d1f99d713072b0d84104dd Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 20 Oct 2020 17:02:45 -0700 Subject: [PATCH 57/84] Fix timeout of reboot command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Note that `warn=True` causes timeouts to be ignored. On some images, the invoked program (it’s not even a shell) just hangs waiting for manual input. So we set a proper timeout. --- pytest/node_plugin.py | 5 ++++- pytest/testsuites/test_smoke.py | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index 57897ead64..b711f7b4a7 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -33,7 +33,7 @@ "in_stream": False, # Don’t let remote commands take longer than five minutes # (unless later overridden). This is to prevent hangs. - "timeout": 300, + "command_timeout": 300, } } @@ -168,6 +168,9 @@ def local(self, *args: Any, **kwargs: Any) -> Result: @retry(wait=wait_exponential(), stop=stop_after_delay(60)) def get_boot_diagnostics(self) -> Result: """Gets the serial console logs.""" + # NOTE: Some images can cause the `az` CLI to crash because + # their logs aren’t UTF-8 encoded. I’ve filed a bug: + # https://github.com/Azure/azure-cli/issues/15590 return self.local( f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg" ) diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 6e7d523c78..e443355e40 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -2,7 +2,7 @@ import logging import socket -from invoke.runners import Result # type: ignore +from invoke.runners import CommandTimedOut, Result, UnexpectedExit # type: ignore from paramiko import SSHException # type: ignore import pytest @@ -42,7 +42,7 @@ def test_smoke(urn: str, node: Node) -> None: logging.info("Pinging before reboot...") ping1: Result = node.ping(warn=True) - ssh_errors = (TimeoutError, SSHException, socket.error) + ssh_errors = (TimeoutError, CommandTimedOut, SSHException, socket.error) try: logging.info("SSHing before reboot...") @@ -50,15 +50,18 @@ def test_smoke(urn: str, node: Node) -> None: except ssh_errors as e: logging.warning(f"SSH before reboot failed: '{e}'") + reboot_exit = 0 try: logging.info("Rebooting...") # If this succeeds, we should expect the exit code to be -1 - reboot: Result = node.sudo("reboot", warn=True) + reboot_exit = node.sudo("reboot", timeout=5).exited except ssh_errors as e: logging.warning(f"SSH failed, using platform to reboot: '{e}'") node.platform_restart() - else: - if reboot.exited != -1: + except UnexpectedExit: + # TODO: How do we differentiate reboot working and the SSH + # connection disconnecting for other reasons? + if reboot_exit != -1: logging.warning("While SSH worked, 'reboot' command failed") logging.info("Pinging after reboot...") @@ -71,7 +74,10 @@ def test_smoke(urn: str, node: Node) -> None: logging.warning(f"SSH after reboot failed: '{e}'") logging.info("Retrieving boot diagnostics...") - node.get_boot_diagnostics() + if node.get_boot_diagnostics(warn=True).ok: + logging.info("See full report for boot diagnostics.") + else: + logging.warning("Retrieving boot diagnostics failed.") assert ping1.ok, f"Pinging {node.host} before reboot failed" assert ping2.ok, f"Pinging {node.host} after reboot failed" From 60b4dda054a052e7fb7fbfafd8e8b519cf15b7a0 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 21 Oct 2020 10:29:10 -0700 Subject: [PATCH 58/84] Improve retry logic and increase command timeouts --- pytest/node_plugin.py | 13 +++++++------ pytest/pytest.ini | 2 +- pytest/testsuites/test_smoke.py | 29 ++++++++++++++++++++++------- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index b711f7b4a7..b5453eca1f 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -13,7 +13,7 @@ from fabric import Connection from invoke import Context from invoke.runners import Result # type: ignore -from tenacity import retry, stop_after_delay, wait_exponential # type: ignore +from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore import pytest @@ -33,7 +33,7 @@ "in_stream": False, # Don’t let remote commands take longer than five minutes # (unless later overridden). This is to prevent hangs. - "command_timeout": 300, + "command_timeout": 1200, } } @@ -165,17 +165,18 @@ def local(self, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) - @retry(wait=wait_exponential(), stop=stop_after_delay(60)) - def get_boot_diagnostics(self) -> Result: + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def get_boot_diagnostics(self, **kwargs: Any) -> Result: """Gets the serial console logs.""" # NOTE: Some images can cause the `az` CLI to crash because # their logs aren’t UTF-8 encoded. I’ve filed a bug: # https://github.com/Azure/azure-cli/issues/15590 return self.local( - f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg" + f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg", + **kwargs, ) - @retry(wait=wait_exponential(), stop=stop_after_delay(60)) + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) def ping(self, **kwargs: Any) -> Result: flag = "-c 1" if platform.system() == "Linux" else "-n 1" return self.local(f"ping {flag} {self.host}", **kwargs) diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 6ade0166fe..37ec5db1b4 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -9,7 +9,7 @@ log_cli_format = %(asctime)s %(levelname)s %(message)s log_cli_date_format = %Y-%m-%d %H:%M:%S render_collapsed = true junit_logging = all -timeout = 600 +timeout = 1200 filterwarnings = error ignore:unclosed:ResourceWarning diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index e443355e40..4ee97e0bd8 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -1,6 +1,7 @@ """Runs a 'smoke' test for an Azure Linux VM deployment.""" import logging import socket +import time from invoke.runners import CommandTimedOut, Result, UnexpectedExit # type: ignore from paramiko import SSHException # type: ignore @@ -40,7 +41,11 @@ def test_smoke(urn: str, node: Node) -> None: """ logging.info("Pinging before reboot...") - ping1: Result = node.ping(warn=True) + ping1 = Result() + try: + ping1 = node.ping() + except UnexpectedExit: + logging.warning(f"Pinging {node.host} before reboot failed") ssh_errors = (TimeoutError, CommandTimedOut, SSHException, socket.error) @@ -64,8 +69,15 @@ def test_smoke(urn: str, node: Node) -> None: if reboot_exit != -1: logging.warning("While SSH worked, 'reboot' command failed") + logging.info("Sleeping for 10 seconds after reboot...") + time.sleep(10) + logging.info("Pinging after reboot...") - ping2: Result = node.ping(warn=True) + ping2 = Result() + try: + ping2 = node.ping() + except UnexpectedExit: + logging.warning(f"Pinging {node.host} after reboot failed") try: logging.info("SSHing after reboot...") @@ -74,10 +86,13 @@ def test_smoke(urn: str, node: Node) -> None: logging.warning(f"SSH after reboot failed: '{e}'") logging.info("Retrieving boot diagnostics...") - if node.get_boot_diagnostics(warn=True).ok: - logging.info("See full report for boot diagnostics.") - else: + try: + node.get_boot_diagnostics() + except UnexpectedExit: logging.warning("Retrieving boot diagnostics failed.") + else: + logging.info("See full report for boot diagnostics.") - assert ping1.ok, f"Pinging {node.host} before reboot failed" - assert ping2.ok, f"Pinging {node.host} after reboot failed" + # NOTE: The test criteria is to fail only if ping fails. + assert ping1.ok + assert ping2.ok From 004d80aaed8e58182a11ac89759ebcf6a79d6120 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 21 Oct 2020 10:40:05 -0700 Subject: [PATCH 59/84] Use pytest-rerunfailures in addition to Tenacity They can serve completely different purposes. In this case, re-running the whole test and reporting it as a second test run. --- pytest/README.md | 34 ++++++++++++++++++++++----------- pytest/poetry.lock | 17 ++++++++++++++++- pytest/pyproject.toml | 1 + pytest/testsuites/test_smoke.py | 1 + 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/pytest/README.md b/pytest/README.md index 33770ab600..778372dbba 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -173,17 +173,6 @@ However, the data returned by Paramiko is in bytes, which in Python 3 are not equivalent to strings, hence the existing implementation which uses `BytesIO` and decodes the bytes to a string. -### Tenacity instead of pytest-rerunfailures - -Due to an open -[bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) this popular -Pytest plugin is incompatible with module/class/session fixtures. What this -means is given a class of tests with a class fixture (say a shared `Node`), if -the last test is marked as flaky and is rerun, the class fixture is unexpectedly -torn down and then the test is rerun. That is, the rerun happens too late, and -the test is then performed against a new `Node`. So while slightly more verbose, -we’re back to using [Tenacity](https://github.com/jd/tenacity). - ### Function per test instead of class An option I explored to make an “executive summary” of the smoke test was to use @@ -195,3 +184,26 @@ intends: as a function. This allows the fixtures to be written in a simpler manner (not rely on caching between functions) and allows parameterization using the built-in decorator [`@pytest.mark.parametrize`](https://docs.pytest.org/en/stable/parametrize.html). + +### Tenacity _and_ pytest-rerunfailures + +Due to an open +[bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) this popular +Pytest plugin is incompatible with module/class/session fixtures. What this +means is given a class of tests with a class fixture (say a shared `Node`), if +the last test is marked as flaky and is rerun, the class fixture is unexpectedly +torn down and then the test is rerun. That is, the rerun happens too late, and +the test is then performed against a new `Node`. For this reason, to use this +plugin effectively tests would need to be contained to one function per test, +but as written above, that seems to be the best route. + +However, this plugin is otherwise very useful for marking tests as flaky, and is +already integrated with pytest-html such that reruns are reported correctly in +the report. + +For instances where particular parts of code are flaky and need to be rerun, +such as `ping`, we use the modern Python retry library, +[Tenacity](https://github.com/jd/tenacity), which has easy-to-use decorators to +retry functions (and context managers to use within functions), as well as good +wait and timeout support. The `ping()` function currently uses it with +exponential back-off to great effect. diff --git a/pytest/poetry.lock b/pytest/poetry.lock index a665674e39..6c4d1291ed 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -495,6 +495,17 @@ filelock = ">=3.0" mypy = {version = ">=0.700", markers = "python_version >= \"3.8\""} pytest = ">=3.5" +[[package]] +name = "pytest-rerunfailures" +version = "9.1.1" +description = "pytest plugin to re-run tests to eliminate flaky failures" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +pytest = ">=5.0" + [[package]] name = "pytest-timeout" version = "1.4.2" @@ -635,7 +646,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "b46f526aeb728c87b2bb8e3cfb91d66634e9c81e82c0f70c2e48045eba56c915" +content-hash = "d41a721b35ca455e53b898026dcd034d40c64d8837141393e335d4829e293c71" [metadata.files] appdirs = [ @@ -886,6 +897,10 @@ pytest-mypy = [ {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, ] +pytest-rerunfailures = [ + {file = "pytest-rerunfailures-9.1.1.tar.gz", hash = "sha256:1cb11a17fc121b3918414eb5eaf314ee325f2e693ac7cb3f6abf7560790827f2"}, + {file = "pytest_rerunfailures-9.1.1-py3-none-any.whl", hash = "sha256:2eb7d0ad651761fbe80e064b0fd415cf6730cdbc53c16a145fd84b66143e609f"}, +] pytest-timeout = [ {file = "pytest-timeout-1.4.2.tar.gz", hash = "sha256:20b3113cf6e4e80ce2d403b6fb56e9e1b871b510259206d40ff8d609f48bda76"}, {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index dfa4d7df93..a6499d9d10 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -12,6 +12,7 @@ fabric = "^2.5.0" pytest-timeout = "^1.4.2" pytest-html = "^2.1.1" tenacity = "^6.2.0" +pytest-rerunfailures = "^9.1.1" [tool.poetry.dev-dependencies] black = "^20.8b1" diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 4ee97e0bd8..bb807b07ab 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -24,6 +24,7 @@ @pytest.mark.parametrize("urn", params) +@pytest.mark.flaky(reruns=1) def test_smoke(urn: str, node: Node) -> None: """Check that a VM can be deployed and is responsive. From e1068c26fa297df1bfa74620467d54e3f6284af8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 21 Oct 2020 10:40:36 -0700 Subject: [PATCH 60/84] Use East US 2 Azure region by default --- pytest/node_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index b5453eca1f..f118f64843 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -93,7 +93,7 @@ def allow_ping(name: str) -> None: def deploy_vm( name: str, - location: str = "westus2", + location: str = "eastus2", vm_image: str = "UbuntuLTS", vm_size: str = "Standard_DS1_v2", setup: str = "", From eb91f8115071c949dc305de6ab9352e8b63b1cd7 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 21 Oct 2020 13:31:48 -0700 Subject: [PATCH 61/84] Move pytest/Makefile to root directory For simpler use. --- .github/workflows/ci-workflow.yaml | 6 +++--- Makefile | 27 +++++++++++++++++++++++++++ pytest/Makefile | 27 --------------------------- 3 files changed, 30 insertions(+), 30 deletions(-) create mode 100644 Makefile delete mode 100644 pytest/Makefile diff --git a/.github/workflows/ci-workflow.yaml b/.github/workflows/ci-workflow.yaml index f852e60dee..b37a79c046 100644 --- a/.github/workflows/ci-workflow.yaml +++ b/.github/workflows/ci-workflow.yaml @@ -34,10 +34,10 @@ jobs: echo "::add-path::$env:USERPROFILE\.poetry\bin" - name: Install Python dependencies - run: cd pytest && make setup + run: make setup - name: Run self tests - run: cd pytest && make test + run: make test - name: Run semantic analysis - run: cd pytest && make check + run: make check diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..bbb3479c4e --- /dev/null +++ b/Makefile @@ -0,0 +1,27 @@ +all: setup test run + +# Install Python packages +setup: + cd pytest && poetry install --no-ansi --remove-untracked + +# Run Pytest +run: + cd pytest && poetry run python -m pytest -rA --capture=tee-sys --tb=short + +# Run local tests +test: + cd pytest && poetry run python -m pytest --html=test.html -rA --capture=tee-sys --tb=short selftests/ + +# Run semantic analysis +check: + cd pytest && poetry run python -X dev -X tracemalloc -m pytest --html=check.html --flake8 --mypy -m 'flake8 or mypy' + +clean: + cd pytest && poetry run python -m pytest --cache-clear --setup-plan + +smoke: + cd pytest && poetry run python -m pytest --quiet --html=smoke.html --self-contained-html --tb=line --show-capture=log -k smoke + +# Print current Python virtualenv +venv: + cd pytest && poetry env list --no-ansi --full-path diff --git a/pytest/Makefile b/pytest/Makefile deleted file mode 100644 index 72cb97df8b..0000000000 --- a/pytest/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -all: setup test run - -# Install Python packages -setup: - @poetry install --no-ansi --remove-untracked - -# Run Pytest -run: - @poetry run python -m pytest -rA --capture=tee-sys --tb=short - -# Run local tests -test: - @poetry run python -m pytest --html=test.html -rA --capture=tee-sys --tb=short selftests/ - -# Run semantic analysis -check: - @poetry run python -X dev -X tracemalloc -m pytest --html=check.html --flake8 --mypy -m 'flake8 or mypy' - -clean: - @poetry run python -m pytest --cache-clear --setup-plan - -smoke: - @poetry run python -m pytest --quiet --html=smoke.html --self-contained-html --tb=line --show-capture=log -k smoke - -# Print current Python virtualenv -venv: - @poetry env list --no-ansi --full-path From d977b387b5984ee72c64ec1c8140828f8c830687 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 21 Oct 2020 17:19:24 -0700 Subject: [PATCH 62/84] Add PyYAML package --- pytest/poetry.lock | 23 ++++++++++++++++++++++- pytest/pyproject.toml | 1 + 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/pytest/poetry.lock b/pytest/poetry.lock index 6c4d1291ed..5d7c68ca9b 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -557,6 +557,14 @@ rope = ["rope (>0.10.5)"] test = ["versioneer", "pylint (>=2.5.0)", "pytest", "mock", "pytest-cov", "coverage", "numpy", "pandas", "matplotlib", "flaky", "pyqt5"] yapf = ["yapf"] +[[package]] +name = "pyyaml" +version = "5.3.1" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + [[package]] name = "regex" version = "2020.9.27" @@ -646,7 +654,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "d41a721b35ca455e53b898026dcd034d40c64d8837141393e335d4829e293c71" +content-hash = "42b398ae9b15852176c7d822f2e27cfb2a50892e031b1e187475ffa0deabcef9" [metadata.files] appdirs = [ @@ -913,6 +921,19 @@ python-language-server = [ {file = "python-language-server-0.35.1.tar.gz", hash = "sha256:6e0c9a3b2ae98e0eb22e98ed6b3c4e190a6bf9e27af53efd2396da60cd92b221"}, {file = "python_language_server-0.35.1-py2.py3-none-any.whl", hash = "sha256:7051090259e3e81c0cdb140de8e32b8f11219808cda4427e6faf61f9ff9a3bf4"}, ] +pyyaml = [ + {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, + {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, + {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, + {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, +] regex = [ {file = "regex-2020.9.27-cp27-cp27m-win32.whl", hash = "sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3"}, {file = "regex-2020.9.27-cp27-cp27m-win_amd64.whl", hash = "sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index a6499d9d10..c75adfb2d8 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -13,6 +13,7 @@ pytest-timeout = "^1.4.2" pytest-html = "^2.1.1" tenacity = "^6.2.0" pytest-rerunfailures = "^9.1.1" +PyYAML = "^5.3.1" [tool.poetry.dev-dependencies] black = "^20.8b1" From 934827043193b14bd82384e5ed20e5a8ebe3ef45 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 21 Oct 2020 17:20:00 -0700 Subject: [PATCH 63/84] Add proof-of-concept YAML playbook parsing --- Makefile | 2 + pytest/conftest.py | 91 +++++++++++++++++++++++++++++++-- pytest/criteria.yaml | 18 +++++++ pytest/testsuites/test_smoke.py | 1 + 4 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 pytest/criteria.yaml diff --git a/Makefile b/Makefile index bbb3479c4e..de8b29b295 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,8 @@ check: clean: cd pytest && poetry run python -m pytest --cache-clear --setup-plan +yaml: + cd pytest && poetry run python -m pytest --collect-only --playbook=criteria.yaml smoke: cd pytest && poetry run python -m pytest --quiet --html=smoke.html --self-contained-html --tb=line --show-capture=log -k smoke diff --git a/pytest/conftest.py b/pytest/conftest.py index 68b1af6ba7..274af6e3f3 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -3,9 +3,26 @@ https://docs.pytest.org/en/stable/writing_plugins.html """ +from __future__ import annotations + +import typing +from functools import partial from pathlib import Path -from _pytest.config.argparsing import Parser +import yaml + +try: + from yaml import CLoader as Loader +except ImportError: + from yaml import Loader # type: ignore + +if typing.TYPE_CHECKING: + from typing import List, Optional + + from _pytest.config import Config + from _pytest.config.argparsing import Parser + + from pytest import Item, Session pytest_plugins = "node_plugin" @@ -16,14 +33,82 @@ def pytest_addoption(parser: Parser) -> None: https://docs.pytest.org/en/latest/example/simple.html """ + # TODO: Add “--lisa” (and “--debug” etc.) options which set up our + # defaults, instead of encoding them in the Makefile parser.addoption( "--keep-vms", action="store_true", default=False, help="Keeps deployed VMs cached between test runs, useful for developers.", ) - # TODO: Add “--lisa” (and “--debug” etc.) options which set up our - # defaults, instead of encoding them in the Makefile + parser.addoption( + "--playbook", type=Path, help="Path to playbook of test selection criteria." + ) + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: List[Item] +) -> None: + """Pytest hook for modifying the selected items (tests). + + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems + + """ + playbook_path: Optional[Path] = config.getoption("--playbook") + new_items: List[Item] = [] + force_exclude: List[Item] = [] + + def select_item(action: Optional[str], times: int, item: Item) -> None: + """Includes or excludes the item as appropriate.""" + if action == "forceExclude": + print(f" Forcing exclusion of item {item}") + force_exclude.append(item) + else: + print(f" Keeping {item} selected {times} times") + for _ in range(times - new_items.count(item)): + new_items.append(item) + + # TODO: Review, refactor, and fix logging. If we do schema + # validation and have reasonable defaults we can delete most of + # the `is not None` checks. Suggest using: + # https://pypi.org/project/schema/ + if playbook_path: + playbook = dict() + with open(playbook_path) as f: + playbook = yaml.load(f, Loader=Loader) + for play in playbook: + criteria = play.get("criteria") + if criteria is None: + print(f"Criteria missing, cannot parse play {play}") + continue + else: + print(f"Parsing criteria {criteria}") + select_action = play.get("select_action", "forceInclude") + times = play.get("times", 1) + select = partial(select_item, select_action, times) + + name = criteria.get("name") + priority = criteria.get("priority") + area = criteria.get("area") + for i in items: + marker = i.get_closest_marker("lisa") + if marker is None: + # TODO: This should be a warning. + continue + lisa = marker.kwargs + if name is not None: + if i.name.startswith(name): + print(f" Selecting test {i} because name is {name}!") + select(i) + if priority is not None: + if lisa.get("priority") == priority: + print(f" Selecting test {i} because priority is {priority}!") + select(i) + if area and lisa.get("area"): + if lisa["area"].lower() == area: + print(f" Selecting test {i} because area is {area}!") + select(i) + items[:] = [i for i in new_items if i not in force_exclude] def pytest_html_report_title(report): # type: ignore diff --git a/pytest/criteria.yaml b/pytest/criteria.yaml new file mode 100644 index 0000000000..0758fa3f0a --- /dev/null +++ b/pytest/criteria.yaml @@ -0,0 +1,18 @@ +# NOTE: This is a proof-of-concept ask from Chi. + +# select all p0 cases +# for example, selected three cases: a,b,c +- criteria: + priority: 0 +# drop all cases of xdp, +# because it's not ready on a tested distro. +# for example, droped c, so now is: a,b +- criteria: + area: xdp + # forceExclude means not to pick up it again in next rules. + select_action: forceExclude +# run smoke_test cases twice, to prove a distro stable enough +# after this rule, the picked test cases is like a,b,b +- criteria: + name: test_smoke + times: 2 diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index bb807b07ab..fa0108493b 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -23,6 +23,7 @@ ] +@pytest.mark.lisa(priority=0) @pytest.mark.parametrize("urn", params) @pytest.mark.flaky(reruns=1) def test_smoke(urn: str, node: Node) -> None: From 92253e4e707888696483693ac9a63d966b676d8a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 27 Oct 2020 12:12:55 -0700 Subject: [PATCH 64/84] Add basic user modes --- Makefile | 22 +++++++++++++--------- pytest/conftest.py | 43 +++++++++++++++++++++++++++++++------------ pytest/pytest.ini | 8 +++++++- 3 files changed, 51 insertions(+), 22 deletions(-) diff --git a/Makefile b/Makefile index de8b29b295..cd7c2ec581 100644 --- a/Makefile +++ b/Makefile @@ -5,24 +5,28 @@ setup: cd pytest && poetry install --no-ansi --remove-untracked # Run Pytest -run: - cd pytest && poetry run python -m pytest -rA --capture=tee-sys --tb=short +run: setup + cd pytest && poetry run pytest # Run local tests -test: - cd pytest && poetry run python -m pytest --html=test.html -rA --capture=tee-sys --tb=short selftests/ +test: setup + cd pytest && poetry run pytest --debug selftests/ # Run semantic analysis -check: - cd pytest && poetry run python -X dev -X tracemalloc -m pytest --html=check.html --flake8 --mypy -m 'flake8 or mypy' +check: setup + cd pytest && poetry run pytest --check +# Clear cache and show when each fixture would be setup and torn down. clean: - cd pytest && poetry run python -m pytest --cache-clear --setup-plan + cd pytest && poetry run pytest --cache-clear --setup-plan +# Demonstrate test selection via YAML playbook. yaml: - cd pytest && poetry run python -m pytest --collect-only --playbook=criteria.yaml + cd pytest && poetry run pytest --collect-only --playbook=criteria.yaml + +# Run the smoke test demo. smoke: - cd pytest && poetry run python -m pytest --quiet --html=smoke.html --self-contained-html --tb=line --show-capture=log -k smoke + cd pytest && poetry run pytest --demo -k smoke # Print current Python virtualenv venv: diff --git a/pytest/conftest.py b/pytest/conftest.py index 274af6e3f3..01f043e022 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -17,7 +17,7 @@ from yaml import Loader # type: ignore if typing.TYPE_CHECKING: - from typing import List, Optional + from typing import Any, Dict, List, Optional from _pytest.config import Config from _pytest.config.argparsing import Parser @@ -33,17 +33,36 @@ def pytest_addoption(parser: Parser) -> None: https://docs.pytest.org/en/latest/example/simple.html """ - # TODO: Add “--lisa” (and “--debug” etc.) options which set up our - # defaults, instead of encoding them in the Makefile - parser.addoption( - "--keep-vms", - action="store_true", - default=False, - help="Keeps deployed VMs cached between test runs, useful for developers.", - ) - parser.addoption( - "--playbook", type=Path, help="Path to playbook of test selection criteria." - ) + parser.addoption("--keep-vms", action="store_true", help="Keeps deployed VMs.") + parser.addoption("--check", action="store_true", help="Run semantic analysis.") + parser.addoption("--demo", action="store_true", help="Run in demo mode.") + parser.addoption("--playbook", type=Path, help="Path to test playbook.") + + +def pytest_configure(config: Config) -> None: + """Set default configurations passed on custom flags.""" + # Search ‘_pytest’ for ‘addoption’ to find these. + options: Dict[str, Any] = {} # See ‘pytest.ini’ for defaults. + if config.getoption("--check"): + options.update( + { + "flake8": True, + "mypy": True, + "markexpr": "flake8 or mypy", + "reportchars": "fE", + } + ) + if config.getoption("--demo"): + options.update( + { + "html": "demo.html", + "no_header": True, + "showcapture": "log", + "tb": "line", + } + ) + for attr, value in options.items(): + setattr(config.option, attr, value) def pytest_collection_modifyitems( diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 37ec5db1b4..543a90e16a 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -1,10 +1,16 @@ [pytest] +addopts = + --strict-markers + --self-contained-html + --capture=tee-sys + --tb=short + -rA markers = lisa deploy connect log_cli = true -log_cli_level = INFO +log_cli_level = WARNING log_cli_format = %(asctime)s %(levelname)s %(message)s log_cli_date_format = %Y-%m-%d %H:%M:%S render_collapsed = true From f8f73bf546923e12773ae70d5937bb21b6ec9c3d Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 27 Oct 2020 16:13:06 -0700 Subject: [PATCH 65/84] Draft the Technical Specification Document --- pytest/DESIGN.md | 704 ++++++++++++++++++++++++++++++++++++++++++ pytest/README.md | 218 ++----------- pytest/node_plugin.py | 1 + 3 files changed, 725 insertions(+), 198 deletions(-) create mode 100644 pytest/DESIGN.md diff --git a/pytest/DESIGN.md b/pytest/DESIGN.md new file mode 100644 index 0000000000..57acb4848b --- /dev/null +++ b/pytest/DESIGN.md @@ -0,0 +1,704 @@ +# LISAv3 Technical Specification Document + +This document outlines the technical specifications for LISAv3. We are +evaluating the feasibility of leveraging +[Pytest](https://docs.pytest.org/en/stable/) as our test runner. + +Please see [PR #1065](https://github.com/LIS/LISAv2/pull/1065) for a working, +proof-of-concept prototype. + +Authored by Andrew Schwartzmeyer (he/him), version 0.1.0. + +## Why Pytest? + +Pytest is an [incredibly popular](https://docs.pytest.org/en/stable/talks.html) +MIT licensed open source Python testing framework. It has a thriving community +and plugin framework, with over 750 +[plugins](https://plugincompat.herokuapp.com/). Instead of writing (and +therefore maintaining) yet another test framework, we would do less with more by +reusing Pytest and existing plugins. This will allow us to focus on our unique +problems: organizing and understanding our tests, deploying necessary resources +(such as Azure or Hyper-V virtual machines), and analyzing our results. + +In fact, most of Pytest itself is implemented via [built-in +plugins](https://docs.pytest.org/en/stable/plugins.html), providing us with many +useful and well-documented examples. Furthermore, when others were confronted +with a problem similar to our own they also chose to use Pytest. +[Labgrid](https://github.com/labgrid-project/labgrid) is an open source embedded +board control library that delegated the testing framework logic to Pytest in +their [design](https://labgrid.readthedocs.io/en/latest/design_decisions.html), +and [U-Boot](https://github.com/u-boot/u-boot), an embedded board boot loader, +similarly leveraged Pytest in their +[tests](https://github.com/u-boot/u-boot/tree/master/test/py). KernelCI and +Avocado were also evaluated by the Labgrid developers at an [Embedded Linux +Conference](https://youtu.be/S0EJJM5bVUY) and both ruled out for reasons similar +to our own before they settled on Pytest. + +The [fundamental features](https://youtu.be/CMuSn9cofbI) of Pytest match our +needs very well: + +* Automatic test discovery, no boiler-plate test code +* Useful information when a test fails (assertions are introspected) +* Test and fixture parameterization +* Modular setup/teardown via fixtures +* Incredibly customizable (as detailed above) + +So all the logic for describing, discovering, running, skipping based on +requirements, and reporting results of the tests is already written and +maintained by the greater open source community, leaving us to focus on our hard +and specific problem: creating an abstraction to launch the necessary nodes in +our environments. Using Pytest would also allow us the space to abstract other +commonalities in our specific tests. In this way, LISAv3 could solve the +difficulties we have at hand without creating yet another test framework. + +## High-Level Design Decisions + +### What are the User Modes? + +Because Pytest is infinitely customizable, we want to provide a few sets of +reasonable default configurations for some common scenarios. We will add a flag +like `--mode=[dev,debug,ci,demo]` to change the default options and output of +Pytest. Doing so is readily supported by Pytest via the `pytest_addoption` and +`pytest_configure` hooks. We call these the provided “user modes.” + +* The dev(eloper) mode is intended for use by test developers while writing a + new test. It is verbose, caches the deployed VMs between runs, and generates a + digestible [HTML](https://pypi.org/project/pytest-html/) report. + +* The debug mode is like dev mode but with all possible information shown, and + will open the Python debugger automatically on failures (which is provided by + Pytest with the `--pdb` flag). + +* The CI mode will be fairly quiet on the console, showing all test results, but + putting the full info output into the generated report file (HTML for sharing + with humans and + [JUnit](https://docs.pytest.org/en/stable/_modules/_pytest/junitxml.html) for + the associated CI environment, which presents as native test results). + +* The demo mode will show the “executive summary” (a lot like CI, but finely + tuned for demos). For example, what `make smoke` currently shows. + +### How Are Tests Described? + +The built-in [pytest-mark](https://docs.pytest.org/en/stable/mark.html) plugin +already provides functionality for adding metadata to tests, where we +specifically want: + +* Platform: used to skip tests inapplicable to the current system-under-test +* Category: our high-level test organization +* Area: feature being tested (could default to module name) +* Priority: self-explanatory +* Tags: optional additional metadata for test organization + +We simply reuse this with minimal logic to enforce our required metadata, with +sane defaults (perhaps setting the area to the name of the module), and to list +statistics about our test coverage. This is already included in the prototype. +It looks like this: + +```python +import pytest + +@pytest.mark.lisa( + platform="Azure", category="Functional", area="LIS_DEPLOY", priority=0, tags=["lis"] +) +def test_lis_driver_version(node: Node) -> None: + """Checks that the installed drivers have the correct version.""" + ... +``` + +This is a functional example, which takes zero implementation. With this simple +decorator, all test collection hooks can introspect the metadata, enforce +required parameters and set defaults, select tests based on arbitrary criteria, +and list test coverage statistics. + +Note that Pytest leverages Python’s docstrings for built-in documentation (and +can even run tests discovered in such strings, like doctest). Being just Python +code, this decorator need not be `@pytest.mark.lisa(...)` but can trivially be +provided as simply `@lisa(...)`. + +This mark also does need to be repeated for each test, as marks can be scoped to +a module, and so one line could describe defaults for every test in a file, with +individual tests overriding parameters as needed. We may also introduce marks +such as `@pytest.mark.slow` to allow for easier test selection. + +We even have a prototype +[generator](https://github.com/LIS/LISAv2/tree/pytest/generator) which parses +LISAv2 XML test descriptions and generates stubs with this mark filled in +correctly. + +### How Are Tests Selected? + +Pytest already allows a user to specify which exact tests to run: + +* Listing folders on the CLI (see below on where tests should live) +* Specifying a name expression on the CLI (e.g. `-k smoke and xdp`) +* Specifying a mark expression on the CLI (e.g. `-m functional and not slow`) + +We can also implement any other mechanism via the +`pytest_collection_modifyitems` hook. There’s already a +[proof-of-concept](https://github.com/LIS/LISAv2/blob/ab01c33f1f1e1ffac7100f6a69beda07192f05bb/pytest/conftest.py#L49) +which uses selection criteria read from a YAML file: + +```yaml +# Select all Priority 0 tests +- criteria: + priority: 0 +# Exclude all tests in Area "xdp" +- criteria: + area: xdp + select_action: forceExclude +# Run test with name `test_smoke` twice +- criteria: + name: test_smoke + times: 2 +``` + +However, before we settle on the basic schema understood by the +proof-of-concept, we should write and _review_ a full schema. + +### How Are Results Reported? + +Parsing the results of a large test suite can be difficult. Fortunately, because +Pytest is a testing framework, there already exists support for generating +excellent reports. For developers, the +[HTML](https://pypi.org/project/pytest-html/) report is easy to read: it is +self-contained, holds all the results and logs, and each test can be expanded +and collapsed. Tests which were rerun are recorded separately. For CI pipelines, +Pytest has integrated +[JUnit](https://docs.pytest.org/en/stable/_modules/_pytest/junitxml.html) XML +test report support. This is the standard method of reporting results to CI +servers like Jenkins and are natively parsed into the CI system’s built-in test +display page. Finally, Azure DevOps pipelines are even supported with a +community plugin +[pytest-azurepipelines](https://pypi.org/project/pytest-azurepipelines/) which +enhances the standard JUnit report for ADO. + +### How Are Nodes Provided and Accessed? + +First we need to define “node” as an instance of a system-under-test. That is, +given some environment requirements, such an Azure image (URN) and image (SKU), +a node would be a virtual machine deployed by Pytest with SSH access provided to +the tests. A node could optionally be deployed outside Pytest. + +Pytest uses [fixtures](https://docs.pytest.org/en/stable/fixture.html), which +are the primary way of setting up test requirements. They replace less flexible +alternatives like setup/teardown functions. It is through fixtures that we +implement remote node setup/teardown. Our node fixture currently provides: + +* Automatic provisioning of an Azure VM given URN and SKU +* Remote shell access via SSH +* Data including hostname / IP address for local tools +* Cross-platform ping functionality with exponential back-off +* Allowing ICMP ping via Azure firewall rules +* Platform API reboot +* Uploading of local files to arbitrary remote destinations +* Downloading of remote file contents into local string variable +* Downloading boot diagnostics (serial console log) from platform +* Asynchronous remote command execution with promises + +The prototype demonstrates how easy it is to quickly implement these features. +As we need more features, they can be readily added and shared among tests. + +Our abstraction leverages [Fabric](https://www.fabfile.org/) which is a popular +high-level Python library for executing shell commands on remote systems over +SSH. Underneath the covers it uses +[paramiko](https://docs.paramiko.org/en/stable/), the most popular low-level +Python SSH library. Fabric does the heavy lifting of safely connecting and +disconnecting from the node, executing the shell command (synchronously or +asynchronously), reporting the exit status, gathering the stdout and stderr, +providing stdin (or interactive auto-responses, similar to `expect`), uploading +and downloading files, and much more. In fact, these APIs are all available and +implemented for the local machine by the underlying +[Inovke](https://www.pyinvoke.org/) library, which is essentially a Python +`subprocess` wrapper with “a powerful and clean feature set.” + +Other test specific requirements, such as installing software and daemons, +downloading files from remote storage, or checking the state of our Bash test +scripts, would similarly be implemented by methods on the `Node` class or via +additional fixtures and thus shared among tests. + +For Azure, we use the [Azure CLI](https://aka.ms/azureclidocs) to deploy a +virtual machine. For Hyper-V (and other virtualization platforms), we would like +to use [libvirt](https://libvirt.org/python.html), and for embedded +environments we are evaluating +[labgrid](https://github.com/labgrid-project/labgrid). + +Tests do not need to explicitly call for a node to be provided, and we do not +need to write much code to setup this resource-provider logic. We simply define +a `Node` class and a Pytest fixture which returns one: + +```python +@pytest.fixture(scope="session") +def node(request: FixtureRequest) -> Iterator[Node]: + """Return the current node for any test which requests it.""" + with Node() as n: + yield n + +@pytest.mark.lisa(...) +def test_uptime(node: Node) -> None + """Automatically has access to the current node because of the argument.""" + # Runs `uname` via SSH and asserts it's Linux. + assert node.run("uname").stdout.strip() == "Linux" +``` + +When created, the `Node` instance either uses a cached node or deploys a new one +based on the given parameters (which can be provided at runtime). When the scope +of the fixture is exited (in this example, the test session), the `Node` +instance deletes its deployed resource unless requested not to by the user, +which is currently controlled by the `--keep-vms` flag. + +To provide the parameters to the node fixture, the prototype currently +implements a simple `@pytest.mark.deploy(...)` mark which takes `vm_image`, +`vm_size`, etc., and it’s applied to each function. This worked for the demo, +and proved the concept; however, we will want to provide a mechanism for +specifying lists of environments and their required resources to the tests at +runtime. This will likely be a YAML file that is parsed at initialization and +used to parameterize the node fixture itself, causing all the tests to be +executed for each environment. For more details, see the section “Where Does +Parameterization Happen?” + +See the Detailed Design Decisions below for what the `Node` class looks like. + +#### Interaction with Azure + +We do not use the [Azure Python APIs](https://aka.ms/azsdk/python/all) directly +because they are more complicated (and less documented) than the [Azure +CLI](https://aka.ms/azureclidocs). With Invoke (as discussed above), `az` +becomes incredibly easy to work with. The Azure CLI lead developer states that +they have [feature parity](https://stackoverflow.com/a/50005660/1028665) and +that the CLI is more straightforward to use. Considering our ease-of-maintenance +requirement, this seems the apt choice. If it later becomes necessary to use the +Python APIs directly, that is, of course, still allowed by our design. + +### How Are Tests Timed Out? + +The [pytest-timeout](https://pypi.org/project/pytest-timeout/) plugin provides +integrated timeouts via `@pytest.mark.timeout()`, a configuration +file option, environment variable, and CLI flag. The Fabric library provides +timeouts in both the configuration and per-command usage. These are already used +to satisfaction in the prototype. + +### How Are Tests Organized? + +That is, what does a folder of tests map to: a platform, feature, or owner? + +In my opinion it is likely to be both. Tests which are common to a platform and +written by our team are probably best placed in a folder like `tests/azure` +whereas tests for a particular scenario which limits their image and SKU +applicability should be in a folder like `tests/acc`. It’s going to depend on +how often the tests are run together. + +Because Pytest can run tests and `conftest.py` files from arbitrary folders, +maintaining sets of tests and plugins separately from the base LISA repository +is easy. Custom repositories with new tests, plugins, fixtures, +platform-specific support, etc. can simply be cloned anywhere, and provided on +the command-line to Pytest. + +Test authors should keep tests which share requirements and are otherwise +similar to a single module (Python file). Not only is this well-organized, but +because marks can be applied at the module level, setting all the tests to be +skipped or expected to fail (with the built-in `skip` and `xfail` Pytest marks) +becomes even easier. + +An open question is if we really want to bring every test from LISAv2 directly +over, or if we should carefully analyze our tests to craft a new set of +high-level scenarios. An interesting result of reorganizing and rewriting the +tests would be the ability to have test layers, where the result of a high-level +test dictates if the tests below it should be skipped. If it passes, it implies +the tests underneath it would pass, and so skips them; but if it fails, the next +test below it runs and so on until a passing layer is found. + +### How Will We Port LISAv2 Tests? + +Given the above, we still must decide if we want to put the engineering effort +into porting _every_ LISAv2 test. However, the prototype started by porting the +`LIS-DRIVER-VERSION-CHECK` test, proving that tests which exclusively use Bash +scripts are trivially portable. Unfortunately, most tests use an associated +PowerShell script which is tightly coupled to the LISAv2 framework. + +We believe that it is _possible_ to port these tests without untoward +modifications. We would need to write a mock library that implements (or stubs +where appropriate) LISAv2 framework functionality such as +`Provision-VMsForLisa`, `Copy-RemoteFiles`, `Run-LinuxCmd`, etc., and provides +both the expected “global” objects and the test function parameters `AllVmData` +and `CurrentTestData`. + +This work needs to be done regardless of the approach we take with our framework +(leveraging Pytest or writing our own), and it is not inconsequential work. It +needs to be thoroughly planned and executed, and is certainly a ways off. + +### What Do Parallel Tests Mean? + +While our original list of goals stated that we want to run tests “in parallel” +we were not specific about what was meant, and the topic of parallelism and +concurrency is understandably complex. We certainly don’t mean running two tests +at once on the same node, as this would undoubtedly lead to flaky tests. + +Assuming that we care about a set of tests passing on a particular image and +size combination, but not necessarily on a particular deployed instance, then we +can run tests concurrently by deploying multiple “identical” nodes and splitting +the tests across them. The tests would still run in isolation on each node. This +sounds hard, but actually it’s practically free with Pytest if the node fixture +is session scoped and we use +[pytest-xdist](https://pypi.org/project/pytest-xdist/) as described below. + +It’s also unlikely that we want to write our tests using the Async I/O pattern, +because we do not want tests to accidentally conflict with each other. While +[pytest-asyncio](https://pypi.org/project/pytest-asyncio/) exists, our +concurrency model is probably as described above: split the tests among multiple +identical nodes. + +### How Are Tests and Functions Retried? + +Testing remote instances is inherently flaky, so we take a two-pronged approach +to dealing with the flakiness. + +The [pytest-rerunfailures](https://pypi.org/project/pytest-rerunfailures/) +plugin will be used to easily mark a test itself as flaky. It has the nice +feature of recording each rerun in the produced report. It looks like this: + +```python +@pytest.mark.flaky(reruns=5) +def test_something_flaky(...): + """This fails most of the time.""" + ... +``` + +> Note that there is an open +> [bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) in this +> plugin which can cause issues with fixtures using scopes other than “function” +> but it can be worked around. + +The [Tenacity](https://tenacity.readthedocs.io/en/latest/) library should be +used to retry flaky functions that are not tests, such as downloading boot +diagnostics or pinging a node. As the modern Python retry library it has +easy-to-use decorators to retry functions (and context managers to use within +functions), as well as excellent wait and timeout support. It looks like this: + +```python +from tenacity import retry, stop_after_attempt, wait_exponential + +class Node: + ... + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def ping(self, **kwargs): + """Ping the node from the local system in a cross-platform manner.""" + flag = "-c 1" if platform.system() == "Linux" else "-n 1" + return self.local(f"ping {flag} {self.host}", **kwargs) + ... +``` + +We can additionally list a test twice when modifying the items collection, as +implemented in the criteria proof-of-concept. However, given the above +abilities, this may not be desired. + +### Where Does Parameterization Happen? + +Do we parameterize +[tests](https://docs.pytest.org/en/stable/parametrize.html#parametrizemark) or +[fixtures](https://docs.pytest.org/en/stable/fixture.html#fixture-parametrize)? + +This all comes down to how we want to use LISA. If we want to put a single +system under test at a time, and run all possible tests against it, then it +would make sense to parameterize the node fixture across the set of images to +test. I believe this to likely be the case. + +A parameterized node fixture would be session-scoped. This would enable us to +take advantage of [pytest-xdist](https://pypi.org/project/pytest-xdist/) for +running the tests concurrently against multiple nodes, where each forked runner +has its own node. Note that the cache key for deployed nodes will need to +include an identifier to separate the parallel runs, but this is available. + +This approach would let us list a number of images and sizes (or a matrix +combination of them) and then run all requested tests against each of those. +However, it means that tests will need to be intelligent enough to [skip or +xfail](https://docs.pytest.org/en/stable/skipping.html) on systems where they do +not apply. This can be done in test code to start with. As commonalities are +realized they can be refactored into simple, reusable feature checks. + +Finally, while the base (and most common) case of tests which require one node +becomes trivially solved, we still have to deal with the edge cases of tests +which use two or three nodes. Determining the best course of action here +requires investigating how and when those tests are run, and if the node pair or +triple all use the same image and size. An easy solution would be to have a test +which requires a second or third node to simply deploy them through a +function-scoped fixture, and tear them down at the end. This may be costly in +terms of time if there are many of these tests and they run frequently, but for +long “performance” tests it would be an adequate option. Alternatively, we could +have a node pool that the session-scoped node fixture uses, where each node is +locked while in use. While this would take more engineering effort, it means we +could use the nodes for running tests concurrently, and “borrow” a runner when a +test needs another. + +Other ideas are welcome, but what we don’t want to do is change the environment +a user is expecting their tests to run in. I do not think that we should use a +“least common denominator” approach that collects feature requests and deploys +nodes which match those features, as the user will lose control over their +environment. We still want to enumerate features so tests can check if they’re +applicable, but the user’s environment request should be respected. + +Alternatively, parameterizing tests means that each test (or module, or class, +as the fixture could no longer be session-scoped) specifies in some way (whether +in code or read at runtime from a file) what image/size combinations it should +run against. This generally eliminates having to check if it should skip, but +means that running the test suite will put multiple systems under test at once, +the results of which may be difficult to interpret. While this is a viable +route, it means maintaining a comprehensive list of which environments each +tests use, and I think feature-checking is more scalable. + +This is an open question which we need to settle as the two methods can +technically be combined, but we will want to be careful if we do this. + +Regardless of approach, we will want to write and _review_ a simple YAML schema +for specifying the system-under-test targets. As described above, the prototype +currently reads this information from a mark, but if we move forward with the +suggestion above, the scope of the node fixture will change to session and it +will become parameterized. Those parameters would be set at runtime by reading a +given YAML file. + +### When Do We Export a Plugin? + +The current prototype is simply using Pytest. All the implementation is in the files +`conftest.py` and `node_plugin.py`, the former of which is Pytest’s default +“user plugin” file. We likely want to create a proper `pytest-lisa` package +which provides our marks, fixtures, command-line parameters, user modes, and +hook modifications for reading YAML files. + +This requires more research as doing so is obviously not necessary but is nice. + +## Detailed Design Decisions + +This section contains truly technical specifications of our current plans to +bring the prototype to production. + +### Planned `Node` Class Refactor + +#### Basic Shape + +`Node` should still subclass `fabric.Connection`. It should be a partially +abstract class with platform-specific subclasses (Azure, libvirt, an embedded +device, etc.). However, the initializer and context manager methods _should not_ +need to be reimplemented by a platform subclass. Most added methods like +`ping()` and `reboot()` should also be shared. This is where static type +checking will help. + +An `Environment` class will be a collection of nodes in a group, for tests which +require multiple nodes. It is important that `Node` is self-contained and does +not require an `Environment` instance because the base case of most tests is to +use a `Node`. + +#### Caching + +A `Node` should be able to be cached. If `--keep-vms` is given to Pytest, it +should not delete the deployed VM resource and should instead cache its data so +that a subsequent invocation can connect directly to it. A `Node` should also be +able to connect directly to a system deployed outside Pytest, reusing the cache +hydration logic. The `init()` and `__exit__()` methods will handle checking and +updating the cache so that this logic is shared. + +Note that cross-session [caching](https://docs.pytest.org/en/stable/cache.html) +is provided by Pytest, and very easy to work with. The existing prototype +already implements `--keep-vms`. + +#### Initializing + +The `init()` method does the following: + +* Takes an optional group ID (provided by Environment for instance so that it’s + easy to create/deploy multiple nodes into one group) to generate its name and + deduce its group. + +* Checks the cache for the node’s key. + +* On a cache miss, calls `deploy()` and saves the returned host to the field + inherited from `Connection` and the rest of the platform-specific information + to a `data` dictionary field. Caches the data dictionary for the node’s key. + +* On a cache hit, saves the cached host and data to the instance. + +* Calls `super()` to setup `Connection` with our default Fabric configuration. + +#### Deploy and Delete + +* The `deploy()` and `delete()` methods are abstract and implemented by + platform-specific node classes to actually deploy the VM. For Azure, note that + `deploy()` will check if the resource group exists, and if not, creates it. + For `delete()` it will check if it is the last VM in the group, and if so + deletes the group too. Again this is to keep `Environment` from being a + requirement. + +* The group ID is `pytest-{uuid4()}` (maybe with `pytest` being replaced by a + user- or run-specific short identifier). The ID should be returned by a static + method so that when an `Environment` creates a collection of nodes, it can + simply use the static method to generate a shared group ID. + +* The context manager’s `__exit__()` method calls `super()` to disconnect and + potentially `delete()` the VM. If it’s to be deleted, the key/value pair is + also removed from the cache. + +* Because of how Python’s context managers work, we may not need to reimplement + `__enter__()` but will want to check its inherited implementation. + +#### Common Tasks + +Common tasks for systems under tests like rebooting and pinging should be +implemented on the `Node` class. + +* Methods inherited from `Connection` include `run()`, `sudo()` and `local()` + which are used to easily run arbitrary commands, and `get()` and `put()` to + download and upload arbitrary files. + +* The `cat()` method (already implemented in the prototype) wraps `get()` and + returns the file as data in a string. This makes test code like this possible: + + ```python + assert node.cat("state.txt") == "TestCompleted" + ``` + +* Reboot should first try to use `self.sudo("reboot", timeout=5)` (with a short + timeout to avoid a hung SSH session). It should retry with an exponential + back-off to see if the machine has rebooted by checking either `uptime` or the + existence of a file created before the reboot. This is to avoid having to + `sleep()` and just guess the amount of time it takes to reboot. + +* Restart should “power cycle” the machine using the platform’s API, and thus is + in abstract method. It should optionally be able to redeploy the node too, + which can be used by tests which require a completely fresh node. + +* Note that the `local()` method is already overridden to patch Fabric so as to + ignore the provided SSH environment. This demonstrates that we can easily + provide necessary changes to users while still leveraging the library. For + instance, we may want an alternative to `run()` which, instead of taking a + string, takes a list of arguments and quotes them correctly so as to deal with + difficult shell quoting edge cases. + +* One new method we’ve already identified is `copy_scripts()` which will copy a + list of scripts to the node and mark them executable. It could even be a + context manager which deletes the scripts when exited. + +## Alternatives Considered + +### Writing Another Framework + +I believe the above set of technical specifications clearly describes how we can +leverage Pytest for our needs. Furthermore, the existing prototype proves this +is a viable option. Therefore I do not think we should consider writing and +maintaining a _new_ Python testing framework. We should avoid falling for “not +invented here” syndrome. The alternative prototype which does implement a new +framework required over five thousand lines of code, the Pytest-based prototype +used less than two hundred, or less than three percent. We do not want to take +on the maintenance cost of yet another framework, the maintenance cost of LISAv2 +already caused this mess in the first place. I think the work of prototyping +said new framework was valuable, as it provided insight into the eventual +technical design of LISAv3. + +### Using Remote Capabilities of pytest-xdist + +With the [pytest-xdist plugin](https://github.com/pytest-dev/pytest-xdist) there +already exists support for running a folder of tests on an arbitrary remote host +via SSH. + +The LISA tests could be written as Python code suitable for running on the +target test system, which means direct access to the system in the test code +itself (subprocesses are still available, without having to use SSH within the +test, but would become far less necessary), something that is not possible with +any current prototype. Where the pytest-xdist plugin copies the package of code +to the target node and runs it, the pytest-lisa plugin could instantiate that +node (boot the necessary image on a remote machine or launch a new Hyper-V or +Azure VM, etc.) for the tests. + +However, this use of pytest-dist requires full Python support on the target +machines, and drastically changes how developers write tests. Furthermore, it +would not support running local commands against the remote node (like ping) or +running the test across a reboot of the node. Thus we do not want to use this +functionality of pytest-xdist. That said, pytest-xdist will still be useful for +running tests concurrently, as described above. + +### Using Paramiko Instead of Fabric + +The Paramiko library is less complex (smaller library footprint) than Fabric, as +the latter wraps the former, but it is a bit more difficult to use, and doesn’t +support reading existing SSH config files, nor does it support “ProxyJump” which +we use heavily. Fabric instead provides a clean high-level interface for +existing shell commands, handling all the connection abstractions for us. + +Using Paramiko looked like this: + +```python +from pathlib import Path +from typing import List + +from paramiko import SSHClient + +import pytest + +@pytest.fixture +def node() -> SSHClient: + with SSHClient() as client: + client.load_system_host_keys() + client.connect(hostname="...") + yield client + + +def test_lis_version(node: SSHClient) -> None: + with node.open_sftp() as sftp: + for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: + sftp.put(LINUX_SCRIPTS / f, f) + _, stdout, stderr = node.exec_command("./LIS-VERSION-CHECK.sh") + sftp.get("state.txt", "state.txt") + with Path("state.txt").open as f: + assert f.readline() == "TestCompleted" +``` + +It is more verbose than necessary when compared to Fabric. + +### StringIO + +For `Node.cat()` it would seem we could use `StringIO` like so: + +```python +from io import StringIO + +with StringIO() as result: + node.get("state.txt", result) + assert result.getvalue().strip() == "TestCompleted" +``` + +However, the data returned by Paramiko is in bytes, which in Python 3 are not +equivalent to strings, hence the existing implementation which uses `BytesIO` +and decodes the bytes to a string. + +### Writing a Class of Individual Test Methods + +An option I explored to make an “executive summary” of the smoke test was to use +a class where each functionality was tested as individual function (meaning they +could fail independently without failing the whole smoke test), accompanied by a +class-scoped node fixture. This had its advantages, however, it was difficult to +parameterize and also overly verbose. We should instead keep each test as Pytest +intends: as a function. This allows the fixtures to be written in a simpler +manner (not rely on caching between functions) and allows +[parameterization](https://docs.pytest.org/en/stable/parametrize.html) using the +built-in decorator `@pytest.mark.parametrize`. + +However, this decision may be reconsidered if we session-scope and parameterize +the `Node` fixture, in which case these issues are resolved. + +## What Else? + +There’s still a lot more to think about and design. A non-exhaustive list of +future topics (some touched on above): + +* Tests inventory (generating statistics from metadata) +* ARM template support (with Azure CLI) +* Servicing Azure CLI (how stable is their API?) +* libvirt driver support (gives us Hyper-V and more) +* Duration reporting (built-in) +* Self-documentation (via Pydoc) +* Environment class design +* Feature requests (NICs in particular) +* Selection and targets YAML schema +* Secret management +* External results reporting (database and emails) +* Embedded systems / bare metal support +* Managing Python `logging` records +* Managing shell command stdout/stderr diff --git a/pytest/README.md b/pytest/README.md index 778372dbba..c564d256cf 100644 --- a/pytest/README.md +++ b/pytest/README.md @@ -1,209 +1,31 @@ # LISAv3 via pytest-lisa -[Pytest](https://docs.pytest.org/en/stable/) is an [incredibly -popular](https://docs.pytest.org/en/stable/talks.html) MIT licensed open source -Python testing framework. It has a thriving community and plugin framework, with -[over 750 plugins](https://plugincompat.herokuapp.com/). There is even a YAML -example of writing a Domain Specific Language -[DSL](https://docs.pytest.org/en/stable/example/nonpython.html#yaml-plugin) for -specifying tests. Instead of writing yet another test framework, LISAv3 could be -written as pytest-lisa, a [plugin for -Pytest](https://docs.pytest.org/en/stable/writing_plugins.html) which implements -our requirements. In fact, most of Pytest itself is implemented via [built-in -plugins](https://docs.pytest.org/en/stable/plugins.html), providing us with a -lot to leverage. +Basic instructions for testing the prototype: -The [fundamental features](https://www.youtube.com/watch?v=CMuSn9cofbI) of -Pytest match our needs very well: +```bash +# Install Poetry, make sure `poetry` is in your `PATH` +curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python -* Automatic test discovery, no boiler-plate test code -* Useful information when a test fails (assertions are introspected) -* Test parameterization -* Modular setup/teardown via fixtures -* Customizable (as detailed above) +# Install Azure CLI, make sure `az` is in your `PATH` +curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash -So all the logic for discovering, running, skipping based on requirements, and -reporting the tests is already written and maintained by the greater open source -community, leaving us to focus on the hard and unique problem: creating an API -to launch the necessary nodes. It would also allow us the space to abstract the -installation of tools required by tests. In this way, LISAv3 could solve the -difficulties we have at hand without creating yet another unit test framework. +# Login and set subscription +az login +az account set -s -## Design +# Clone LISAv2 with the Pytest prototype +git clone -b pytest/main https://github.com/LIS/LISAv2.git +cd LISAv2 -### pytest-mark +# Install Python packages +make setup -The [pytest-mark](https://docs.pytest.org/en/stable/mark.html) already provides -functionality for adding metadata to tests, where we specifically want: +# Run some local demos +make test +make yaml -* Owner -* Category -* Area -* Tags -* Priority - -We could simply reuse this built-in plugin with minimal logic to enforce our -required metadata, with sane defaults (such as setting the area to the name of -the module), and to list statistics about our test coverage. - -It also through pytest-mark that [skipping -functionality](https://docs.pytest.org/en/stable/skipping.html) exists, which we -would leverage for ensuring our environmental requirements are met. - -Note that Pytest leverages Python’s docstrings for built-in documentation (and -can even run tests discovered in such strings, like doctest). - -### Fixtures - -Pytest supports [fixtures](https://docs.pytest.org/en/stable/fixture.html), -which are the primary way of setting up test requirements. They replace less -flexible alternatives like setup/teardown functions. It is through fixtures that -pytest-lisa would implement remote node setup/teardown. Our node fixture would -implement (with more as found to be required): - -* Provision a node based on parameterized requirements -* Reboot the node if requested -* Run a command (perhaps asynchronously) on the node using SSH -* Download and upload files to the node (with retries and timeouts) - -Our abstraction leverages -[Fabric](https://docs.fabfile.org/en/stable/index.html), which uses -[paramiko](https://docs.paramiko.org/en/stable/) underneath, directly to -implement the SSH commands. For deployment logic, it uses the [`az` -CLI](https://aka.ms/azureclidocs), wrapped by Fabric. For Hyper-V (and other -virtualization platforms), it could use -[libvirt](https://libvirt.org/python.html). - -Other test specific requirements, such as installing software and daemons, -downloading files from remote storage, or checking the state of our Bash test -scripts, would similarly be implemented via fixtures and shared among tests. - -### Test result output - -Instead of writing our own test result output, we can leverage existing plugins. -For instance, there already exists -[pytest-azurepipelines](https://pypi.org/project/pytest-azurepipelines/) which -transforms results into the format consumed by ADO. It has over 90,000 downloads -a month. We don’t need to rewrite this. - -## Alternatives considered - -### Azure Python APIs instead of `az` CLI - -We do not use the [Azure Python APIs](https://aka.ms/azsdk/python/all) directly -because they are more complicated (and less documented) than the `az` CLI. Given -Fabric (and its underlying Invoke library), the CLI becomes incredibly easy to -work with. The `az` CLI lead developer states that they have [feature -parity](https://stackoverflow.com/a/50005660/1028665) and that the CLI is more -straightforward to use. Considering our ease-of-maintenance requirement, this -seems the apt choice. - -### pytest-xdist - -With the [pytest-xdist plugin](https://github.com/pytest-dev/pytest-xdist) there -already exists support for running a folder of tests on an arbitrary remote host -via SSH. - -The LISA tests could be written as Python code suitable for running on the -target test system, which means direct access to the system in the test code -itself (subprocesses are still available, without having to use SSH within the -test, but would become far less necessary), something that is not possible with -the current prototype. Where the pytest-xdist plugin copies the package of code -to the target node and runs it, the pytest-lisa plugin could instantiate that -node (boot the necessary image on a remote machine or launch a new Hyper-V or -Azure VM, etc.) for the tests. YAML playbooks (AKA “runbooks” in the current -prototype) could be interpreted by the pytest-lisa plugin to determine how to -create those nodes. - -However, this is only one approach, and we may prefer to run the Python code on -the user’s machine, with pytest-lisa instead providing the previously mentioned -node fixtures, default marks, and requirements logic. - -Note that pytest-dist can still be useful for locally running tests in parallel. - -### Paramiko instead of Fabric - -The Paramiko library is less complex (smaller library footprint) than Fabric, as -the latter wraps the former, but it is a bit more difficult to use, and doesn’t -support reading existing SSH config files, nor does it support “ProxyJump” which -we use heavily. Fabric instead provides a clean high-level interface for -existing shell commands, handling all the connection abstractions for us. - -It looked a like this: - -```python -from pathlib import Path -from typing import List - -from paramiko import SSHClient - -import pytest - -@pytest.fixture -def node() -> SSHClient: - with SSHClient() as client: - client.load_system_host_keys() - client.connect(hostname="...") - yield client - - -def test_lis_version(node: SSHClient) -> None: - with node.open_sftp() as sftp: - for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: - sftp.put(LINUX_SCRIPTS / f, f) - _, stdout, stderr = node.exec_command("./LIS-VERSION-CHECK.sh") - sftp.get("state.txt", "state.txt") - with Path("state.txt").open as f: - assert f.readline() == "TestCompleted" +# Run a demo which deployes Azure resources +make smoke ``` -### StringIO - -For `Node.cat()` it would seem we could use `StringIO` like so: - -```python -from io import StringIO - -with StringIO() as result: - node.get("state.txt", result) - assert result.getvalue().strip() == "TestCompleted" -``` - -However, the data returned by Paramiko is in bytes, which in Python 3 are not -equivalent to strings, hence the existing implementation which uses `BytesIO` -and decodes the bytes to a string. - -### Function per test instead of class - -An option I explored to make an “executive summary” of the smoke test was to use -a class where each functionality was tested as individual function (meaning they -could fail independently without failing the whole smoke test), accompanied by a -class-scoped node fixture. This had its advantages, however, it was difficult to -parameterize and also overly verbose. We should instead keep each test as Pytest -intends: as a function. This allows the fixtures to be written in a simpler -manner (not rely on caching between functions) and allows parameterization using -the built-in decorator -[`@pytest.mark.parametrize`](https://docs.pytest.org/en/stable/parametrize.html). - -### Tenacity _and_ pytest-rerunfailures - -Due to an open -[bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) this popular -Pytest plugin is incompatible with module/class/session fixtures. What this -means is given a class of tests with a class fixture (say a shared `Node`), if -the last test is marked as flaky and is rerun, the class fixture is unexpectedly -torn down and then the test is rerun. That is, the rerun happens too late, and -the test is then performed against a new `Node`. For this reason, to use this -plugin effectively tests would need to be contained to one function per test, -but as written above, that seems to be the best route. - -However, this plugin is otherwise very useful for marking tests as flaky, and is -already integrated with pytest-html such that reruns are reported correctly in -the report. - -For instances where particular parts of code are flaky and need to be rerun, -such as `ping`, we use the modern Python retry library, -[Tenacity](https://github.com/jd/tenacity), which has easy-to-use decorators to -retry functions (and context managers to use within functions), as well as good -wait and timeout support. The `ping()` function currently uses it with -exponential back-off to great effect. +See the [design document](DESIGN.md) for details. diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py index f118f64843..b2b3cc0a3d 100644 --- a/pytest/node_plugin.py +++ b/pytest/node_plugin.py @@ -178,6 +178,7 @@ def get_boot_diagnostics(self, **kwargs: Any) -> Result: @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) def ping(self, **kwargs: Any) -> Result: + """Ping the node from the local system in a cross-platform manner.""" flag = "-c 1" if platform.system() == "Linux" else "-n 1" return self.local(f"ping {flag} {self.host}", **kwargs) From bb28a64f7c5d8821d4f5f2b2d5606bb8aff87667 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 3 Nov 2020 12:54:11 -0800 Subject: [PATCH 66/84] Basic automatic grouping of tests based on feature requirement --- Makefile | 2 +- pytest/selftests/setup_plan/__init__.py | 0 pytest/selftests/setup_plan/conftest.py | 6 ++++++ pytest/selftests/setup_plan/test_plan_A.py | 16 ++++++++++++++++ pytest/selftests/setup_plan/test_plan_B.py | 16 ++++++++++++++++ 5 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 pytest/selftests/setup_plan/__init__.py create mode 100644 pytest/selftests/setup_plan/conftest.py create mode 100644 pytest/selftests/setup_plan/test_plan_A.py create mode 100644 pytest/selftests/setup_plan/test_plan_B.py diff --git a/Makefile b/Makefile index cd7c2ec581..bb12c2e28c 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ run: setup # Run local tests test: setup - cd pytest && poetry run pytest --debug selftests/ + cd pytest && poetry run pytest --debug --setup-show selftests/ # Run semantic analysis check: setup diff --git a/pytest/selftests/setup_plan/__init__.py b/pytest/selftests/setup_plan/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pytest/selftests/setup_plan/conftest.py b/pytest/selftests/setup_plan/conftest.py new file mode 100644 index 0000000000..86f2de80fc --- /dev/null +++ b/pytest/selftests/setup_plan/conftest.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.fixture(scope="session", params=["xdp", "gpu", "rdma"]) +def feature(request) -> str: + yield request.param diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py new file mode 100644 index 0000000000..dcc713205d --- /dev/null +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -0,0 +1,16 @@ +import pytest + + +def test_xdp_a(feature) -> None: + if feature != "xdp": + pytest.skip("Required feature missing") + + +def test_gpu_a(feature) -> None: + if feature != "gpu": + pytest.skip("Required feature missing") + + +def test_rdma_a(feature) -> None: + if feature != "rdma": + pytest.skip("Required feature missing") diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py new file mode 100644 index 0000000000..2104200ad5 --- /dev/null +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -0,0 +1,16 @@ +import pytest + + +def test_xdp_b(feature) -> None: + if feature != "xdp": + pytest.skip("Required feature missing") + + +def test_gpu_b(feature) -> None: + if feature != "gpu": + pytest.skip("Required feature missing") + + +def test_rdma_b(feature) -> None: + if feature != "rdma": + pytest.skip("Required feature missing") From 5eaf157a96cee732a25594c7656ab3678207d49a Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 3 Nov 2020 16:42:52 -0800 Subject: [PATCH 67/84] Slightly extended example which removes instead of skips tests --- pytest/pytest.ini | 1 + pytest/selftests/setup_plan/conftest.py | 29 +++++++++++++++++++++- pytest/selftests/setup_plan/test_plan_A.py | 18 +++++++------- pytest/selftests/setup_plan/test_plan_B.py | 18 +++++++------- pytest/selftests/setup_plan/test_plan_C.py | 16 ++++++++++++ 5 files changed, 63 insertions(+), 19 deletions(-) create mode 100644 pytest/selftests/setup_plan/test_plan_C.py diff --git a/pytest/pytest.ini b/pytest/pytest.ini index 543a90e16a..b242158aa3 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -9,6 +9,7 @@ markers = lisa deploy connect + feature log_cli = true log_cli_level = WARNING log_cli_format = %(asctime)s %(levelname)s %(message)s diff --git a/pytest/selftests/setup_plan/conftest.py b/pytest/selftests/setup_plan/conftest.py index 86f2de80fc..854916983e 100644 --- a/pytest/selftests/setup_plan/conftest.py +++ b/pytest/selftests/setup_plan/conftest.py @@ -1,6 +1,33 @@ +"""Proof-of-concept to schedule a comprehensive test plan.""" +from __future__ import annotations + +import typing + import pytest +if typing.TYPE_CHECKING: + from typing import List + + from _pytest.config import Config + + from pytest import Item, Session + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: List[Item] +) -> None: + """For each item keep only instances using the feature.""" + keep: List[Item] = [] + for item in items: + marker = item.get_closest_marker("feature") + if marker is None: + continue + feature = marker.args[0] + if item.name.endswith(f"[{feature}]"): + keep.append(item) + items[:] = keep + -@pytest.fixture(scope="session", params=["xdp", "gpu", "rdma"]) +@pytest.fixture(scope="session", autouse=True, params=["xdp", "gpu", "rdma"]) def feature(request) -> str: yield request.param diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py index dcc713205d..d0edba2ba1 100644 --- a/pytest/selftests/setup_plan/test_plan_A.py +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -1,16 +1,16 @@ import pytest -def test_xdp_a(feature) -> None: - if feature != "xdp": - pytest.skip("Required feature missing") +@pytest.mark.feature("xdp") +def test_xdp_a() -> None: + pass -def test_gpu_a(feature) -> None: - if feature != "gpu": - pytest.skip("Required feature missing") +@pytest.mark.feature("gpu") +def test_gpu_a() -> None: + pass -def test_rdma_a(feature) -> None: - if feature != "rdma": - pytest.skip("Required feature missing") +@pytest.mark.feature("rdma") +def test_rdma_a() -> None: + pass diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py index 2104200ad5..c415d8bda7 100644 --- a/pytest/selftests/setup_plan/test_plan_B.py +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -1,16 +1,16 @@ import pytest -def test_xdp_b(feature) -> None: - if feature != "xdp": - pytest.skip("Required feature missing") +@pytest.mark.feature("xdp") +def test_xdp_b() -> None: + pass -def test_gpu_b(feature) -> None: - if feature != "gpu": - pytest.skip("Required feature missing") +@pytest.mark.feature("gpu") +def test_gpu_b() -> None: + pass -def test_rdma_b(feature) -> None: - if feature != "rdma": - pytest.skip("Required feature missing") +@pytest.mark.feature("rdma") +def test_rdma_b() -> None: + pass diff --git a/pytest/selftests/setup_plan/test_plan_C.py b/pytest/selftests/setup_plan/test_plan_C.py new file mode 100644 index 0000000000..ce9d2155fe --- /dev/null +++ b/pytest/selftests/setup_plan/test_plan_C.py @@ -0,0 +1,16 @@ +import pytest + + +@pytest.mark.feature("xdp") +def test_xdp_c() -> None: + pass + + +@pytest.mark.feature("gpu") +def test_gpu_c() -> None: + pass + + +@pytest.mark.feature("rdma") +def test_rdma_c() -> None: + pass From 23f4bb95120b21173c61b600d74ead2e0c41c3fe Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 3 Nov 2020 16:43:30 -0800 Subject: [PATCH 68/84] Add pytest-xdist --- Makefile | 2 +- pytest/poetry.lock | 69 ++++++++++++++++++++++++++++++++++++++++++- pytest/pyproject.toml | 1 + 3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bb12c2e28c..4633afd681 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ run: setup # Run local tests test: setup - cd pytest && poetry run pytest --debug --setup-show selftests/ + cd pytest && poetry run pytest -n 4 --setup-show selftests/ # Run semantic analysis check: setup diff --git a/pytest/poetry.lock b/pytest/poetry.lock index 5d7c68ca9b..a4a4ef9725 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -1,3 +1,11 @@ +[[package]] +name = "apipkg" +version = "1.5" +description = "apipkg: namespace control and lazy-import mechanism" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + [[package]] name = "appdirs" version = "1.4.4" @@ -112,6 +120,20 @@ pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] ssh = ["bcrypt (>=3.1.5)"] test = ["pytest (>=3.6.0,<3.9.0 || >3.9.0,<3.9.1 || >3.9.1,<3.9.2 || >3.9.2)", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,<3.79.2 || >3.79.2)"] +[[package]] +name = "execnet" +version = "1.7.1" +description = "execnet: rapid multi-Python deployment" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +apipkg = ">=1.4" + +[package.extras] +testing = ["pre-commit"] + [[package]] name = "fabric" version = "2.5.0" @@ -459,6 +481,18 @@ python-versions = "*" flake8 = ">=3.5" pytest = ">=3.5" +[[package]] +name = "pytest-forked" +version = "1.3.0" +description = "run tests in isolated forked subprocesses" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +py = "*" +pytest = ">=3.10" + [[package]] name = "pytest-html" version = "2.1.1" @@ -517,6 +551,23 @@ python-versions = "*" [package.dependencies] pytest = ">=3.6.0" +[[package]] +name = "pytest-xdist" +version = "2.1.0" +description = "pytest xdist plugin for distributed testing and loop-on-failing modes" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +execnet = ">=1.1" +pytest = ">=6.0.0" +pytest-forked = "*" + +[package.extras] +psutil = ["psutil (>=3.0)"] +testing = ["filelock"] + [[package]] name = "python-jsonrpc-server" version = "0.4.0" @@ -654,9 +705,13 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "42b398ae9b15852176c7d822f2e27cfb2a50892e031b1e187475ffa0deabcef9" +content-hash = "6b221105c12de9baa5fda48ab1839efcb4337e94f02560942d8fa165128fd74f" [metadata.files] +apipkg = [ + {file = "apipkg-1.5-py2.py3-none-any.whl", hash = "sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"}, + {file = "apipkg-1.5.tar.gz", hash = "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6"}, +] appdirs = [ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, @@ -751,6 +806,10 @@ cryptography = [ {file = "cryptography-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:99d4984aabd4c7182050bca76176ce2dbc9fa9748afe583a7865c12954d714ba"}, {file = "cryptography-3.1.1.tar.gz", hash = "sha256:9d9fc6a16357965d282dd4ab6531013935425d0dc4950df2e0cf2a1b1ac1017d"}, ] +execnet = [ + {file = "execnet-1.7.1-py2.py3-none-any.whl", hash = "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"}, + {file = "execnet-1.7.1.tar.gz", hash = "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50"}, +] fabric = [ {file = "fabric-2.5.0-py2.py3-none-any.whl", hash = "sha256:160331934ea60036604928e792fa8e9f813266b098ef5562aa82b88527740389"}, {file = "fabric-2.5.0.tar.gz", hash = "sha256:24842d7d51556adcabd885ac3cf5e1df73fc622a1708bf3667bf5927576cdfa6"}, @@ -893,6 +952,10 @@ pytest-flake8 = [ {file = "pytest-flake8-1.0.6.tar.gz", hash = "sha256:1b82bb58c88eb1db40524018d3fcfd0424575029703b4e2d8e3ee873f2b17027"}, {file = "pytest_flake8-1.0.6-py2.py3-none-any.whl", hash = "sha256:2e91578ecd9b200066f99c1e1de0f510fbb85bcf43712d46ea29fe47607cc234"}, ] +pytest-forked = [ + {file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"}, + {file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"}, +] pytest-html = [ {file = "pytest-html-2.1.1.tar.gz", hash = "sha256:6a4ac391e105e391208e3eb9bd294a60dd336447fd8e1acddff3a6de7f4e57c5"}, {file = "pytest_html-2.1.1-py2.py3-none-any.whl", hash = "sha256:9e4817e8be8ddde62e8653c8934d0f296b605da3d2277a052f762c56a8b32df2"}, @@ -913,6 +976,10 @@ pytest-timeout = [ {file = "pytest-timeout-1.4.2.tar.gz", hash = "sha256:20b3113cf6e4e80ce2d403b6fb56e9e1b871b510259206d40ff8d609f48bda76"}, {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, ] +pytest-xdist = [ + {file = "pytest-xdist-2.1.0.tar.gz", hash = "sha256:82d938f1a24186520e2d9d3a64ef7d9ac7ecdf1a0659e095d18e596b8cbd0672"}, + {file = "pytest_xdist-2.1.0-py3-none-any.whl", hash = "sha256:7c629016b3bb006b88ac68e2b31551e7becf173c76b977768848e2bbed594d90"}, +] python-jsonrpc-server = [ {file = "python-jsonrpc-server-0.4.0.tar.gz", hash = "sha256:62c543e541f101ec5b57dc654efc212d2c2e3ea47ff6f54b2e7dcb36ecf20595"}, {file = "python_jsonrpc_server-0.4.0-py3-none-any.whl", hash = "sha256:e5a908ff182e620aac07db5f57887eeb0afe33993008f57dc1b85b594cea250c"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index c75adfb2d8..7b24e9418d 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -13,6 +13,7 @@ pytest-timeout = "^1.4.2" pytest-html = "^2.1.1" tenacity = "^6.2.0" pytest-rerunfailures = "^9.1.1" +pytest-xdist = "^2.1.0" PyYAML = "^5.3.1" [tool.poetry.dev-dependencies] From f6acd78cbac51313b8cc823cf84844507e4a0436 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 3 Nov 2020 17:15:25 -0800 Subject: [PATCH 69/84] Add filelock package --- pytest/poetry.lock | 4 ++-- pytest/pyproject.toml | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pytest/poetry.lock b/pytest/poetry.lock index a4a4ef9725..fc2c2f3273 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -154,7 +154,7 @@ testing = ["mock (>=2.0.0,<3.0)"] name = "filelock" version = "3.0.12" description = "A platform independent file lock." -category = "dev" +category = "main" optional = false python-versions = "*" @@ -705,7 +705,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "6b221105c12de9baa5fda48ab1839efcb4337e94f02560942d8fa165128fd74f" +content-hash = "ee86abdeec8b63e0ff22b16e7d9d6e8aedce399ada42a0cbe395ecd917a42703" [metadata.files] apipkg = [ diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index 7b24e9418d..01ba6f0db0 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -8,6 +8,7 @@ license = "MIT License" [tool.poetry.dependencies] python = "^3.8" pytest = "^6.1.1" +filelock = "^3.0.12" fabric = "^2.5.0" pytest-timeout = "^1.4.2" pytest-html = "^2.1.1" From 31f0fa0b63c8c7a7b8e64f8912aa952a30f4599e Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 3 Nov 2020 17:45:30 -0800 Subject: [PATCH 70/84] Demonstrate parallelism with shared feature fixture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Running with ‘-n 1’ takes 10 seconds, or 3 seconds for each feature in sequence plus overhead. Running with ‘-n 8’ takes 4 seconds, 3 seconds for each in parallel plus overhead. --- Makefile | 2 +- pytest/selftests/setup_plan/conftest.py | 29 +++++++++++++++++++++++-- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 4633afd681..9df6f350fa 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ run: setup # Run local tests test: setup - cd pytest && poetry run pytest -n 4 --setup-show selftests/ + cd pytest && poetry run pytest -n 8 --setup-show selftests/ # Run semantic analysis check: setup diff --git a/pytest/selftests/setup_plan/conftest.py b/pytest/selftests/setup_plan/conftest.py index 854916983e..fb9db52c51 100644 --- a/pytest/selftests/setup_plan/conftest.py +++ b/pytest/selftests/setup_plan/conftest.py @@ -1,14 +1,19 @@ """Proof-of-concept to schedule a comprehensive test plan.""" from __future__ import annotations +import time import typing +from filelock import FileLock # type: ignore + import pytest if typing.TYPE_CHECKING: from typing import List from _pytest.config import Config + from _pytest.fixtures import SubRequest + from _pytest.tmpdir import TempPathFactory from pytest import Item, Session @@ -21,6 +26,7 @@ def pytest_collection_modifyitems( for item in items: marker = item.get_closest_marker("feature") if marker is None: + keep.append(item) continue feature = marker.args[0] if item.name.endswith(f"[{feature}]"): @@ -29,5 +35,24 @@ def pytest_collection_modifyitems( @pytest.fixture(scope="session", autouse=True, params=["xdp", "gpu", "rdma"]) -def feature(request) -> str: - yield request.param +def feature( + request: SubRequest, tmp_path_factory: TempPathFactory, worker_id: str +) -> str: + """Pretend that this sets up the environment.""" + assert request.param + if worker_id == "master": + return str(request.param) + # Get the shared temp directory. + tmp_dir = tmp_path_factory.getbasetemp().parent + fn = tmp_dir / request.param + data: str = "" + with FileLock(str(fn) + ".lock"): + print(f"Worker {worker_id} using feature {request.param}") + if fn.is_file(): + data = fn.read_text() + else: + # Pretend to do some expensive setup and cache it. + time.sleep(3) + data = request.param + fn.write_text(data) + return data From 91595c05640b8bec6e6a89179e2671638aa586b3 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Thu, 5 Nov 2020 17:43:35 -0800 Subject: [PATCH 71/84] Proof-of-concept redux with dynamic feature requests Example from `make test`: ``` Created target: set() / {'platform': 'Azure', 'image': 'citrix:netscalervpx-130:netscalerbyol:latest', 'sku': 'Standard_DS1_v2'} Created target: set() / {'platform': 'Azure', 'image': 'audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest', 'sku': 'Standard_DS1_v2'} Created target: set() / {'platform': 'Azure', 'image': 'credativ:Debian:9:9.0.201706190', 'sku': 'Standard_DS1_v2'} Created target: set() / {'platform': 'Azure', 'image': 'github:github-enterprise:github-enterprise:latest', 'sku': 'Standard_DS1_v2'} Created target: {'xdp'} / {'platform': 'Azure', 'image': 'citrix:netscalervpx-130:netscalerbyol:latest', 'sku': 'Standard_DS1_v2'} Created target: {'xdp'} / {'platform': 'Azure', 'image': 'audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest', 'sku': 'Standard_DS1_v2'} Created target: {'xdp'} / {'platform': 'Azure', 'image': 'credativ:Debian:9:9.0.201706190', 'sku': 'Standard_DS1_v2'} Created target: {'xdp'} / {'platform': 'Azure', 'image': 'github:github-enterprise:github-enterprise:latest', 'sku': 'Standard_DS1_v2'} Created target: {'gpu'} / {'platform': 'Azure', 'image': 'citrix:netscalervpx-130:netscalerbyol:latest', 'sku': 'Standard_DS1_v2'} Created target: {'gpu'} / {'platform': 'Azure', 'image': 'audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest', 'sku': 'Standard_DS1_v2'} Created target: {'gpu'} / {'platform': 'Azure', 'image': 'credativ:Debian:9:9.0.201706190', 'sku': 'Standard_DS1_v2'} Created target: {'gpu'} / {'platform': 'Azure', 'image': 'github:github-enterprise:github-enterprise:latest', 'sku': 'Standard_DS1_v2'} Created target: {'rdma'} / {'platform': 'Azure', 'image': 'citrix:netscalervpx-130:netscalerbyol:latest', 'sku': 'Standard_DS1_v2'} Created target: {'rdma'} / {'platform': 'Azure', 'image': 'audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest', 'sku': 'Standard_DS1_v2'} Created target: {'rdma'} / {'platform': 'Azure', 'image': 'credativ:Debian:9:9.0.201706190', 'sku': 'Standard_DS1_v2'} Created target: {'rdma'} / {'platform': 'Azure', 'image': 'github:github-enterprise:github-enterprise:latest', 'sku': 'Standard_DS1_v2'} ``` --- Makefile | 2 +- pytest/azure.py | 138 ++++++++++ pytest/conftest.py | 141 ++++++++-- pytest/criteria.yaml | 1 + pytest/lisa.py | 48 ++++ pytest/node_plugin.py | 294 --------------------- pytest/selftests/setup_plan/conftest.py | 58 ---- pytest/selftests/setup_plan/test_plan_A.py | 15 +- pytest/selftests/setup_plan/test_plan_B.py | 15 +- pytest/selftests/setup_plan/test_plan_C.py | 15 +- pytest/selftests/test_basic.py | 8 +- pytest/target.py | 95 +++++++ pytest/targets.yaml | 12 + pytest/testsuites/test_lis.py | 29 +- pytest/testsuites/test_smoke.py | 45 ++-- 15 files changed, 478 insertions(+), 438 deletions(-) create mode 100644 pytest/azure.py create mode 100644 pytest/lisa.py delete mode 100644 pytest/node_plugin.py delete mode 100644 pytest/selftests/setup_plan/conftest.py create mode 100644 pytest/target.py create mode 100644 pytest/targets.yaml diff --git a/Makefile b/Makefile index 9df6f350fa..fab052e9f9 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ run: setup # Run local tests test: setup - cd pytest && poetry run pytest -n 8 --setup-show selftests/ + cd pytest && poetry run pytest --keep-vms --targets=targets.yaml --setup-show selftests/ # Run semantic analysis check: setup diff --git a/pytest/azure.py b/pytest/azure.py new file mode 100644 index 0000000000..a20b4bbb32 --- /dev/null +++ b/pytest/azure.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +import json +import logging +import typing + +from invoke.runners import Result # type: ignore +from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore + +from target import Target + +if typing.TYPE_CHECKING: + from typing import Any + + +class Azure(Target): + """Implements Azure-specific target methods.""" + + az_ok = False + + def check_az_cli(self) -> None: + """Assert that the `az` CLI is installed and logged in.""" + if Azure.az_ok: + return + # E.g. on Ubuntu: `curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash` + assert self.local("az --version", warn=True), "Please install the `az` CLI!" + # TODO: Login with service principal (az login) and set + # default subscription (az account set -s) using secrets. + account: Result = self.local("az account show") + assert account.ok, "Please `az login`!" + sub = json.loads(account.stdout) + assert sub["isDefault"], "Please `az account set -s `!" + logging.info( + f"Using account '{sub['user']['name']}' with subscription '{sub['name']}'" + ) + Azure.az_ok = True + + def create_boot_storage(self, location: str) -> str: + """Create a separate resource group and storage account for boot diagnostics.""" + account = "pytestbootdiag" + # This command always exits with 0 but returns a string. + if self.local("az group exists -n pytest-lisa").stdout.strip() == "false": + self.local(f"az group create -n pytest-lisa --location {location}") + if not self.local( + f"az storage account show -g pytest-lisa -n {account}", warn=True + ): + self.local(f"az storage account create -g pytest-lisa -n {account}") + return account + + def allow_ping(self) -> None: + """Create NSG rules to enable ICMP ping. + + ICMP ping is disallowed by the Azure load balancer by default, but + there’s strong debate about if this is necessary, and our tests + like to check if the host is up using ping, so we create inbound + and outbound rules in the VM's network security group to allow it. + + """ + try: + for d in ["Inbound", "Outbound"]: + self.local( + f"az network nsg rule create --name allow{d}ICMP " + f"--nsg-name {self.name}NSG --priority 100 --resource-group {self.name}-rg " + f"--access Allow --direction '{d}' --protocol Icmp " + "--source-port-ranges '*' --destination-port-ranges '*'" + ) + except Exception as e: + logging.warning(f"Failed to create ICMP allow rules in NSG due to '{e}'") + + def deploy(self): + """Given deployment info, deploy a new VM.""" + image = self.params["image"] + sku = self.params["sku"] + location = self.params.get("location", "eastus2") + networking = self.params.get("networking", "") + + self.check_az_cli() + + logging.info( + f"""Deploying VM... + Resource Group: '{self.name}-rg' + Region: '{location}' + Image: '{image}' + SKU: '{sku}'""" + ) + + boot_storage = self.create_boot_storage(location) + + self.local(f"az group create -n {self.name}-rg --location {location}") + # TODO: Accept EULA terms when necessary. Like: + # + # local.run(f"az vm image terms accept --urn {vm_image}") + # + # However, this command fails unless the terms exist and have yet + # to be accepted. + + vm_command = [ + "az vm create", + f"-g {self.name}-rg", + f"-n {self.name}", + f"--image {image}", + f"--size {sku}", + f"--boot-diagnostics-storage {boot_storage}", + "--generate-ssh-keys", + ] + # TODO: Support setting up to NICs. + if networking == "SRIOV": + vm_command.append("--accelerated-networking true") + + self.data = json.loads(self.local(" ".join(vm_command)).stdout) + self.allow_ping(self.name) + # TODO: Enable auto-shutdown 4 hours from deployment. + return self.data["publicIpAddress"] + + def delete(self) -> None: + """Delete the entire allocated resource group. + + TODO: Delete VM itself. Only if it was the last VM then delete + the entire resource group. + + """ + logging.info(f"Deleting resource group '{self.name}-rg'") + self.local(f"az group delete -n {self.name}-rg --yes --no-wait") + + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def get_boot_diagnostics(self, **kwargs: Any) -> Result: + """Gets the serial console logs.""" + # NOTE: Some images can cause the `az` CLI to crash because + # their logs aren’t UTF-8 encoded. I’ve filed a bug: + # https://github.com/Azure/azure-cli/issues/15590 + return self.local( + f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg", + **kwargs, + ) + + def platform_restart(self) -> Result: + """TODO: Should this '--force' and redeploy?""" + return self.local(f"az vm restart -n {self.name} -g {self.name}-rg") diff --git a/pytest/conftest.py b/pytest/conftest.py index 01f043e022..0c80e01648 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -5,26 +5,92 @@ """ from __future__ import annotations +import sys import typing from functools import partial from pathlib import Path import yaml +import lisa + +# TODO: Use importlib instead +from azure import Azure +from target import Target + try: from yaml import CLoader as Loader except ImportError: from yaml import Loader # type: ignore +import pytest + if typing.TYPE_CHECKING: - from typing import Any, Dict, List, Optional + from typing import Any, Dict, Iterator, List, Optional from _pytest.config import Config from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureRequest + from _pytest.python import Metafunc from pytest import Item, Session -pytest_plugins = "node_plugin" + +LISA = pytest.mark.lisa +LINUX_SCRIPTS = Path("../Testscripts/Linux") + + +@pytest.fixture(scope="session") +def pool(request: FixtureRequest) -> Iterator[List[Target]]: + """This fixture tracks all deployed target resources.""" + targets: List[Target] = [] + yield targets + for t in targets: + print(f"Created target: {t.features} / {t.params}") + if not request.config.getoption("keep_vms"): + t.delete() + + +@pytest.fixture +def target(pool, worker_id, request: FixtureRequest) -> Iterator[Target]: + """This fixture provides a connected target for each test. + + It is parametrized indirectly in 'pytest_generate_tests'. + + In this fixture we can check if any existing target matches all + the requirements. If so, we can re-use that target, and if not, we + can deallocate the currently running target and allocate a new + one. When all tests are finished, the pool fixture above will + delete all created VMs. Coupled with performing discrete + optimization in the test collection phase and ordering the tests + such that the test(s) with the lowest common denominator + requirements are executed first, we have the two-layer scheduling + as asked. + + However, this feels like putting the cart before the horse to me. + It would be much simpler in terms of design, implementation, and + usage that features are specified upfront when the targets are + specified. Then all this goes away, and tests are skipped when the + feature is missing, which also leaves users in full control of + their environments. + + """ + params = request.param + marker = request.node.get_closest_marker("lisa") + features = marker.kwargs["features"] + for t in pool: + # TODO: Implement full feature comparison, etc. and not just + # proof-of-concept string set comparison. + if params == t.params and features <= t.features: + yield t + break + else: + # TODO: Reimplement caching. + # TODO: Dynamically load platform module and use it. + t = Azure(params, features) + pool.append(t) + yield t + t.connection.close() def pytest_addoption(parser: Parser) -> None: @@ -36,11 +102,39 @@ def pytest_addoption(parser: Parser) -> None: parser.addoption("--keep-vms", action="store_true", help="Keeps deployed VMs.") parser.addoption("--check", action="store_true", help="Run semantic analysis.") parser.addoption("--demo", action="store_true", help="Run in demo mode.") - parser.addoption("--playbook", type=Path, help="Path to test playbook.") + parser.addoption("--targets", type=Path, help="Path to targets playbook.") + parser.addoption("--criteria", type=Path, help="Path to criteria playbook.") + + +TARGETS = [] +TARGET_IDS = [] def pytest_configure(config: Config) -> None: - """Set default configurations passed on custom flags.""" + """Parse provided user inputs to setup configuration. + + Determines the targets based on the playbook and sets default + configurations based user mode. + """ + playbook_path: Optional[Path] = config.getoption("--targets") + if playbook_path: + playbook = dict() + with open(playbook_path) as f: + playbook = yaml.load(f, Loader=Loader) + for play in playbook: + t = play.get("target") + if t is None: + continue + else: + print(f"Parsing target {t}") + setup = { + "platform": t.get("platform", "Azure"), + "image": t.get("image", "UbuntuLTS"), + "sku": t.get("sku", "Standard_DS1_v2"), + } + TARGETS.append(setup) + TARGET_IDS.append("-".join(setup.values())) + # Search ‘_pytest’ for ‘addoption’ to find these. options: Dict[str, Any] = {} # See ‘pytest.ini’ for defaults. if config.getoption("--check"): @@ -65,6 +159,19 @@ def pytest_configure(config: Config) -> None: setattr(config.option, attr, value) +def pytest_generate_tests(metafunc: Metafunc): + """Parametrize the tests based on our inputs. + + Note that this hook is run for each test, so we do the file I/O in + 'pytest_configure' and save the results. + + """ + # TODO: Provide a default target? + assert TARGETS, "No targets specified!" + if "target" in metafunc.fixturenames: + metafunc.parametrize("target", TARGETS, True, TARGET_IDS) + + def pytest_collection_modifyitems( session: Session, config: Config, items: List[Item] ) -> None: @@ -73,7 +180,14 @@ def pytest_collection_modifyitems( https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems """ - playbook_path: Optional[Path] = config.getoption("--playbook") + # Validate LISA mark on every item. + for item in items: + mark = item.get_closest_marker("lisa") + assert mark, f"{item} is missing required LISA marker!" + lisa.validate(mark) + + # Optionally select tests based on a playbook. + playbook_path: Optional[Path] = config.getoption("--criteria") new_items: List[Item] = [] force_exclude: List[Item] = [] @@ -98,7 +212,6 @@ def select_item(action: Optional[str], times: int, item: Item) -> None: for play in playbook: criteria = play.get("criteria") if criteria is None: - print(f"Criteria missing, cannot parse play {play}") continue else: print(f"Parsing criteria {criteria}") @@ -110,21 +223,18 @@ def select_item(action: Optional[str], times: int, item: Item) -> None: priority = criteria.get("priority") area = criteria.get("area") for i in items: - marker = i.get_closest_marker("lisa") - if marker is None: - # TODO: This should be a warning. - continue - lisa = marker.kwargs + marker = i.get_closest_marker("LISA") + args = marker.kwargs if name is not None: if i.name.startswith(name): print(f" Selecting test {i} because name is {name}!") select(i) if priority is not None: - if lisa.get("priority") == priority: + if args.get("priority") == priority: print(f" Selecting test {i} because priority is {priority}!") select(i) - if area and lisa.get("area"): - if lisa["area"].lower() == area: + if area and args.get("area"): + if args["area"].lower() == area: print(f" Selecting test {i} because area is {area}!") select(i) items[:] = [i for i in new_items if i not in force_exclude] @@ -132,6 +242,3 @@ def select_item(action: Optional[str], times: int, item: Item) -> None: def pytest_html_report_title(report): # type: ignore report.title = "LISAv3 (Using Pytest) Results" - - -LINUX_SCRIPTS = Path("../Testscripts/Linux") diff --git a/pytest/criteria.yaml b/pytest/criteria.yaml index 0758fa3f0a..3e08a20942 100644 --- a/pytest/criteria.yaml +++ b/pytest/criteria.yaml @@ -1,5 +1,6 @@ # NOTE: This is a proof-of-concept ask from Chi. + # select all p0 cases # for example, selected three cases: a,b,c - criteria: diff --git a/pytest/lisa.py b/pytest/lisa.py new file mode 100644 index 0000000000..d0d906a9c6 --- /dev/null +++ b/pytest/lisa.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from _pytest.mark.structures import Mark + +# Setup a sane configuration for local and remote commands. Note that +# the defaults between Fabric and Invoke are different, so we use +# their Config classes explicitly. +config = { + "run": { + # Show each command as its run. + "echo": True, + # Disable stdin forwarding. + "in_stream": False, + # Don’t let remote commands take longer than five minutes + # (unless later overridden). This is to prevent hangs. + "command_timeout": 1200, + } +} + + +def validate(mark: Mark): + """Validate each test's LISA parameters.""" + assert not mark.args, "LISA marker cannot have positional arguments!" + args = mark.kwargs + + if args.get("platform"): + assert type(args["platform"]) is str, "Platform must be a string!" + + if args.get("priority") is not None: + assert type(args["priority"]) is int, "Priority must be an integer!" + + if args.get("features") is not None: + if type(args["features"]) is str: + # Convert single ‘str’ argument to ‘Set[str]’ + features = set() + features.add(args["features"]) + args["features"] = features + elif type(args["features"]) is list: + # Convert ‘list’ to ‘set’ + args["features"] = set(args["features"]) + assert type(args["features"]) is set, "Features must be a set!" + for feature in args["features"]: + assert type(feature) is str, "Features must be strings!" + else: + args["features"] = set() diff --git a/pytest/node_plugin.py b/pytest/node_plugin.py deleted file mode 100644 index b2b3cc0a3d..0000000000 --- a/pytest/node_plugin.py +++ /dev/null @@ -1,294 +0,0 @@ -"""Pytest plugin implementing a Node fixture for running remote commands.""" -from __future__ import annotations - -import json -import logging -import platform -import typing -from io import BytesIO -from uuid import uuid4 - -import fabric # type: ignore -import invoke # type: ignore -from fabric import Connection -from invoke import Context -from invoke.runners import Result # type: ignore -from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore - -import pytest - -if typing.TYPE_CHECKING: - from typing import Any, Dict, Iterator, Optional, Tuple - - from _pytest.fixtures import FixtureRequest - -# Setup a sane configuration for local and remote commands. Note that -# the defaults between Fabric and Invoke are different, so we use -# their Config classes explicitly. -config = { - "run": { - # Show each command as its run. - "echo": True, - # Disable stdin forwarding. - "in_stream": False, - # Don’t let remote commands take longer than five minutes - # (unless later overridden). This is to prevent hangs. - "command_timeout": 1200, - } -} - - -# Provide a configured local Invoke context for running commands -# before establishing a connection. (Use like `local.run(...)`). -invoke_config = invoke.Config(overrides=config) -local = Context(config=invoke_config) - - -def check_az_cli() -> None: - """Assert that the `az` CLI is installed and logged in.""" - # E.g. on Ubuntu: `curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash` - assert local.run("az --version", warn=True), "Please install the `az` CLI!" - # TODO: Login with service principal (az login) and set - # default subscription (az account set -s) using secrets. - account: Result = local.run("az account show") - assert account.ok, "Please `az login`!" - sub = json.loads(account.stdout) - assert sub["isDefault"], "Please `az account set -s `!" - logging.info( - f"Using account '{sub['user']['name']}' with subscription '{sub['name']}'" - ) - - -def create_boot_storage(location: str) -> str: - """Create a separate resource group and storage account for boot diagnostics.""" - account = "pytestbootdiag" - # This command always exits with 0 but returns a string. - if local.run("az group exists -n pytest-lisa").stdout.strip() == "false": - local.run(f"az group create -n pytest-lisa --location {location}") - if not local.run(f"az storage account show -g pytest-lisa -n {account}", warn=True): - local.run(f"az storage account create -g pytest-lisa -n {account}") - return account - - -def allow_ping(name: str) -> None: - """Create NSG rules to enable ICMP ping. - - ICMP ping is disallowed by the Azure load balancer by default, but - there’s strong debate about if this is necessary, and our tests - like to check if the host is up using ping, so we create inbound - and outbound rules in the VM's network security group to allow it. - - """ - try: - for d in ["Inbound", "Outbound"]: - local.run( - f"az network nsg rule create --name allow{d}ICMP " - f"--nsg-name {name}NSG --priority 100 --resource-group {name}-rg " - f"--access Allow --direction '{d}' --protocol Icmp " - "--source-port-ranges '*' --destination-port-ranges '*'" - ) - except Exception as e: - logging.warning(f"Failed to create ICMP allow rules in NSG due to '{e}'") - - -def deploy_vm( - name: str, - location: str = "eastus2", - vm_image: str = "UbuntuLTS", - vm_size: str = "Standard_DS1_v2", - setup: str = "", - networking: str = "", -) -> Tuple[str, Dict[str, str]]: - """Given deployment info, deploy a new VM. - - TODO: This along with the functions it calls are Azure specific - and so would be refactored to support other platforms. Hence it - returns both the host and the deployment data so that calling - functions don't have to know which field in the data corresponds - to the host. - - """ - check_az_cli() - boot_storage = create_boot_storage(location) - - logging.info( - f"""Deploying VM... - Resource Group: '{name}-rg' - Region: '{location}' - Image: '{vm_image}' - Size: '{vm_size}'""" - ) - - local.run(f"az group create -n {name}-rg --location {location}") - # TODO: Accept EULA terms when necessary. Like: - # - # local.run(f"az vm image terms accept --urn {vm_image}") - # - # However, this command fails unless the terms exist and have yet - # to be accepted. - - vm_command = [ - "az vm create", - f"-g {name}-rg", - f"-n {name}", - f"--image {vm_image}", - f"--size {vm_size}", - f"--boot-diagnostics-storage {boot_storage}", - "--generate-ssh-keys", - ] - # TODO: Support setting up to NICs. - if networking == "SRIOV": - vm_command.append("--accelerated-networking true") - - data: Dict[str, str] = json.loads(local.run(" ".join(vm_command)).stdout) - host = data["publicIpAddress"] - - allow_ping(name) - # TODO: Enable auto-shutdown 4 hours from deployment. - - return host, data - - -def delete_vm(name: str) -> None: - """Delete the entire allocated resource group.""" - logging.info(f"Deleting resource group '{name}-rg'") - local.run(f"az group delete -n {name}-rg --yes --no-wait") - - -class Node(Connection): - """Extends 'fabric.Connection' with our own utilities.""" - - name: str - data: Dict[str, str] - - def local(self, *args: Any, **kwargs: Any) -> Result: - """This patches Fabric's 'local()' function to ignore SSH environment.""" - return super(Connection, self).run(replace_env=False, env={}, *args, **kwargs) - - @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) - def get_boot_diagnostics(self, **kwargs: Any) -> Result: - """Gets the serial console logs.""" - # NOTE: Some images can cause the `az` CLI to crash because - # their logs aren’t UTF-8 encoded. I’ve filed a bug: - # https://github.com/Azure/azure-cli/issues/15590 - return self.local( - f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg", - **kwargs, - ) - - @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) - def ping(self, **kwargs: Any) -> Result: - """Ping the node from the local system in a cross-platform manner.""" - flag = "-c 1" if platform.system() == "Linux" else "-n 1" - return self.local(f"ping {flag} {self.host}", **kwargs) - - def platform_restart(self) -> Result: - """TODO: Should this '--force' and redeploy?""" - return self.local(f"az vm restart -n {self.name} -g {self.name}-rg") - - def cat(self, path: str) -> str: - """Gets the value of a remote file without a temporary file.""" - with BytesIO() as buf: - self.get(path, buf) - return buf.getvalue().decode("utf-8").strip() - - -# TODO: The fixtures need to be fixed up since we now have a pair, one -# for each scope. They need documentation and de-duplication too. -@pytest.fixture(scope="function") -def node(request: FixtureRequest) -> Iterator[Node]: - key, name, host, data, fabric_config = get_node(request) - with Node(host, config=fabric_config, inline_ssh_env=True) as n: - n.name = name - n.data = data - yield n - - # Clean up! - if not request.config.getoption("keep_vms") and key: - assert request.config.cache is not None - request.config.cache.set(key, None) - delete_vm(name) - - -# TODO: Delete this and resurrect at a later date if we need it again. -@pytest.fixture(scope="class") -def class_node(request: FixtureRequest) -> Iterator[None]: - key, name, host, data, fabric_config = get_node(request) - with Node(host, config=fabric_config, inline_ssh_env=True) as n: - n.name = name - n.data = data - request.cls.n = n - logging.info(f"Using VM at: '{host}'") - try: - r: Result = n.run("uname -r") - except Exception as e: - logging.warning(f"Kernel Version: Unknown due to '{e}'") - else: - assert r.ok - logging.info(f"Kernel Version: '{r.stdout.strip()}'") - yield - - # Clean up! - if not request.config.getoption("keep_vms") and key: - assert request.config.cache is not None - request.config.cache.set(key, None) - delete_vm(name) - - -def get_node( - request: FixtureRequest, -) -> Tuple[Optional[str], str, Optional[str], Dict[str, str], fabric.Config]: - """Yields a safe remote Node on which to run commands. - - TODO: Currently this also manages the caching of the deployed VMs. - However, we should make a node pool (perhaps a session-scoped - fixture) which caches and deploys VMs, leaving this to perform its - original work as a connection creator. - - TODO: It's return type is garbage. - """ - deploy_marker = request.node.get_closest_marker("deploy") - connect_marker = request.node.get_closest_marker("connect") - - key: Optional[str] = None - data: Dict[str, str] = dict() - name: Optional[str] = None - host: Optional[str] = None - - # TODO: The deploy and connect markers should be mutually - # exclusive. - if deploy_marker: - # NOTE: https://docs.pytest.org/en/stable/cache.html - key = "/".join(["node"] + list(filter(None, deploy_marker.kwargs.values()))) - assert request.config.cache is not None - data = request.config.cache.get(key, None) - if data: - logging.info(f"Reusing node for cached key '{key}'") - else: - # Cache miss, deploy new node... - name = f"pytest-{uuid4()}" - host, data = deploy_vm(name, **deploy_marker.kwargs) - data["name"] = name - data["host"] = host - request.config.cache.set(key, data) - name = data["name"] - host = data["host"] - elif connect_marker: - # Get the host from the test’s marker. - host = connect_marker.args[0] - name = f"pre-deployed:{host}" - else: - # NOTE: This still uses SSH so the localhost must be - # connectable. - host = "localhost" - name = host - - # Yield the configured Node connection. - ssh_config: Dict[str, Any] = config.copy() - ssh_config["run"]["env"] = { - # Set PATH since it’s not a login shell. - "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" - } - fabric_config = fabric.Config(overrides=ssh_config) - logging.info(f"Using VM at: '{host}'") - return key, name, host, data, fabric_config diff --git a/pytest/selftests/setup_plan/conftest.py b/pytest/selftests/setup_plan/conftest.py deleted file mode 100644 index fb9db52c51..0000000000 --- a/pytest/selftests/setup_plan/conftest.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Proof-of-concept to schedule a comprehensive test plan.""" -from __future__ import annotations - -import time -import typing - -from filelock import FileLock # type: ignore - -import pytest - -if typing.TYPE_CHECKING: - from typing import List - - from _pytest.config import Config - from _pytest.fixtures import SubRequest - from _pytest.tmpdir import TempPathFactory - - from pytest import Item, Session - - -def pytest_collection_modifyitems( - session: Session, config: Config, items: List[Item] -) -> None: - """For each item keep only instances using the feature.""" - keep: List[Item] = [] - for item in items: - marker = item.get_closest_marker("feature") - if marker is None: - keep.append(item) - continue - feature = marker.args[0] - if item.name.endswith(f"[{feature}]"): - keep.append(item) - items[:] = keep - - -@pytest.fixture(scope="session", autouse=True, params=["xdp", "gpu", "rdma"]) -def feature( - request: SubRequest, tmp_path_factory: TempPathFactory, worker_id: str -) -> str: - """Pretend that this sets up the environment.""" - assert request.param - if worker_id == "master": - return str(request.param) - # Get the shared temp directory. - tmp_dir = tmp_path_factory.getbasetemp().parent - fn = tmp_dir / request.param - data: str = "" - with FileLock(str(fn) + ".lock"): - print(f"Worker {worker_id} using feature {request.param}") - if fn.is_file(): - data = fn.read_text() - else: - # Pretend to do some expensive setup and cache it. - time.sleep(3) - data = request.param - fn.write_text(data) - return data diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py index d0edba2ba1..211361fad3 100644 --- a/pytest/selftests/setup_plan/test_plan_A.py +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -1,16 +1,17 @@ -import pytest +from conftest import LISA +from target import Target -@pytest.mark.feature("xdp") -def test_xdp_a() -> None: +@LISA(platform="Azure", features="xdp") +def test_xdp_a(target: Target) -> None: pass -@pytest.mark.feature("gpu") -def test_gpu_a() -> None: +@LISA(platform="Azure", features="gpu") +def test_gpu_a(target: Target) -> None: pass -@pytest.mark.feature("rdma") -def test_rdma_a() -> None: +@LISA(platform="Azure", features="rdma") +def test_rdma_a(target: Target) -> None: pass diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py index c415d8bda7..4576d89bcd 100644 --- a/pytest/selftests/setup_plan/test_plan_B.py +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -1,16 +1,17 @@ -import pytest +from conftest import LISA +from target import Target -@pytest.mark.feature("xdp") -def test_xdp_b() -> None: +@LISA(platform="Azure", features="xdp") +def test_xdp_b(target: Target) -> None: pass -@pytest.mark.feature("gpu") -def test_gpu_b() -> None: +@LISA(platform="Azure", features="gpu") +def test_gpu_b(target: Target) -> None: pass -@pytest.mark.feature("rdma") -def test_rdma_b() -> None: +@LISA(platform="Azure", features="rdma") +def test_rdma_b(target: Target) -> None: pass diff --git a/pytest/selftests/setup_plan/test_plan_C.py b/pytest/selftests/setup_plan/test_plan_C.py index ce9d2155fe..5380ee00f2 100644 --- a/pytest/selftests/setup_plan/test_plan_C.py +++ b/pytest/selftests/setup_plan/test_plan_C.py @@ -1,16 +1,17 @@ -import pytest +from conftest import LISA +from target import Target -@pytest.mark.feature("xdp") -def test_xdp_c() -> None: +@LISA(platform="Azure", features="xdp") +def test_xdp_c(target: Target) -> None: pass -@pytest.mark.feature("gpu") -def test_gpu_c() -> None: +@LISA(platform="Azure", features="gpu") +def test_gpu_c(target: Target) -> None: pass -@pytest.mark.feature("rdma") -def test_rdma_c() -> None: +@LISA(platform="Azure", features="rdma") +def test_rdma_c(target: Target) -> None: pass diff --git a/pytest/selftests/test_basic.py b/pytest/selftests/test_basic.py index a644b137ae..b8ffdd4125 100644 --- a/pytest/selftests/test_basic.py +++ b/pytest/selftests/test_basic.py @@ -1,7 +1,9 @@ """These tests are meant to run in a CI environment.""" -from node_plugin import Node +from conftest import LISA +from target import Target -def test_basic(node: Node) -> None: +@LISA +def test_basic(target: Target) -> None: """Basic test which creates a Node connection to 'localhost'.""" - node.local("echo Hello World") + target.local("echo Hello World") diff --git a/pytest/target.py b/pytest/target.py new file mode 100644 index 0000000000..a20333df7e --- /dev/null +++ b/pytest/target.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import platform +import typing +from abc import ABC, abstractmethod +from io import BytesIO +from uuid import uuid4 + +from fabric import Config as FabricConfig +from fabric import Connection +from invoke import Config as InvokeConfig +from invoke import Context +from invoke.runners import Result # type: ignore +from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore + +import lisa + +if typing.TYPE_CHECKING: + from typing import Any, Dict, Set + + +class Target(ABC): + """Extends 'fabric.Connection' with our own utilities.""" + + local_context = Context(config=InvokeConfig(overrides=lisa.config)) + + def __init__( + self, + params: Dict[str, str], + features: Set[str], + name: str = f"pytest-{uuid4()}", + ): + """If not given a name, generates one uniquely. + + Name is a unique identifier for the group of associated + resources. Features is a list of requirements such as sriov, + rdma, gpu, xdp. + + """ + self.params: Dict[str, str] = params + self.features: Set[str] = features + self.name: str = name + + # TODO: Fix this. + self.host = self.deploy() + + config = lisa.config.copy() + config["run"]["env"] = { + # Set PATH since it’s not a login shell. + "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" + } + self.connection = Connection( + self.host, config=FabricConfig(overrides=config), inline_ssh_env=True + ) + + @abstractmethod + def deploy(self) -> str: + """Must deploy the target resources and return hostname.""" + ... + + @abstractmethod + def delete(self) -> None: + """Must delete the target resources.""" + ... + + @classmethod + def local(self, *args: Any, **kwargs: Any) -> Result: + """This patches Fabric's 'local()' function to ignore SSH environment.""" + return Target.local_context.run(*args, **kwargs) + + # TODO: Generate these automatically. There’s some weird bug with + # inheriting from ‘Connection’ that causes infinite recursion. + def run(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.run(*args, **kwargs) + + def sudo(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.sudo(*args, **kwargs) + + def get(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.get(*args, **kwargs) + + def put(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.put(*args, **kwargs) + + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def ping(self, **kwargs: Any) -> Result: + """Ping the node from the local system in a cross-platform manner.""" + flag = "-c 1" if platform.system() == "Linux" else "-n 1" + return self.local(f"ping {flag} {self.host}", **kwargs) + + def cat(self, path: str) -> str: + """Gets the value of a remote file without a temporary file.""" + with BytesIO() as buf: + self.get(path, buf) + return buf.getvalue().decode("utf-8").strip() diff --git a/pytest/targets.yaml b/pytest/targets.yaml new file mode 100644 index 0000000000..58c3d04797 --- /dev/null +++ b/pytest/targets.yaml @@ -0,0 +1,12 @@ +# TODO: We need to actually think about the schema here. +- target: + image: "citrix:netscalervpx-130:netscalerbyol:latest" + +- target: + image: "audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest" + +- target: + image: "credativ:Debian:9:9.0.201706190" + +- target: + image: "github:github-enterprise:github-enterprise:latest" diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index bb050d7d0f..ea91f7840d 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -1,19 +1,20 @@ """Runs 'LIS-Tests.xml' using Pytest.""" -import conftest -import pytest -from node_plugin import Node +from __future__ import annotations +import typing -@pytest.mark.lisa( - platform="Azure", category="Functional", area="LIS_DEPLOY", tags=["lis"], priority=0 -) -# @pytest.mark.deploy(setup="OneVM") -@pytest.mark.connect("centos") -def test_lis_driver_version(node: Node) -> None: +if typing.TYPE_CHECKING: + from azure import Azure + +from conftest import LINUX_SCRIPTS, LISA + + +@LISA(platform="Azure", category="Functional", priority=0, area="LIS_DEPLOY") +def test_lis_driver_version(target: Azure) -> None: # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: - node.put(conftest.LINUX_SCRIPTS / f) - node.run(f"chmod +x {f}") - node.sudo("yum install -y bc") - node.run("./LIS-VERSION-CHECK.sh") - assert node.cat("state.txt") == "TestCompleted" + target.put(LINUX_SCRIPTS / f) + target.run(f"chmod +x {f}") + target.sudo("yum install -y bc") + target.run("./LIS-VERSION-CHECK.sh") + assert target.cat("state.txt") == "TestCompleted" diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index fa0108493b..28d1a14c35 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -6,27 +6,12 @@ from invoke.runners import CommandTimedOut, Result, UnexpectedExit # type: ignore from paramiko import SSHException # type: ignore -import pytest -from node_plugin import Node - -# TODO: This is an example of leveraging Pytest’s parameterization -# support. We can implement a small YAML parser to read a playbook at -# runtime to generate this instead of using the below list. -params = [ - pytest.param(i, marks=pytest.mark.deploy(vm_image=i, vm_size="Standard_DS2_v2")) - for i in [ - "citrix:netscalervpx-130:netscalerbyol:latest", - "audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest", - "credativ:Debian:9:9.0.201706190", - "github:github-enterprise:github-enterprise:latest", - ] -] - - -@pytest.mark.lisa(priority=0) -@pytest.mark.parametrize("urn", params) -@pytest.mark.flaky(reruns=1) -def test_smoke(urn: str, node: Node) -> None: +from azure import Azure +from conftest import LISA + + +@LISA(platform="Azure", priority=0, sku="Standard_DS2_v2") +def test_smoke(target: Azure) -> None: """Check that a VM can be deployed and is responsive. 1. Deploy the VM (via 'node' fixture) and log it. @@ -45,15 +30,15 @@ def test_smoke(urn: str, node: Node) -> None: logging.info("Pinging before reboot...") ping1 = Result() try: - ping1 = node.ping() + ping1 = target.ping() except UnexpectedExit: - logging.warning(f"Pinging {node.host} before reboot failed") + logging.warning(f"Pinging {target.host} before reboot failed") ssh_errors = (TimeoutError, CommandTimedOut, SSHException, socket.error) try: logging.info("SSHing before reboot...") - node.open() + target.connection.open() except ssh_errors as e: logging.warning(f"SSH before reboot failed: '{e}'") @@ -61,10 +46,10 @@ def test_smoke(urn: str, node: Node) -> None: try: logging.info("Rebooting...") # If this succeeds, we should expect the exit code to be -1 - reboot_exit = node.sudo("reboot", timeout=5).exited + reboot_exit = target.sudo("reboot", timeout=5).exited except ssh_errors as e: logging.warning(f"SSH failed, using platform to reboot: '{e}'") - node.platform_restart() + target.platform_restart() except UnexpectedExit: # TODO: How do we differentiate reboot working and the SSH # connection disconnecting for other reasons? @@ -77,19 +62,19 @@ def test_smoke(urn: str, node: Node) -> None: logging.info("Pinging after reboot...") ping2 = Result() try: - ping2 = node.ping() + ping2 = target.ping() except UnexpectedExit: - logging.warning(f"Pinging {node.host} after reboot failed") + logging.warning(f"Pinging {target.host} after reboot failed") try: logging.info("SSHing after reboot...") - node.open() + target.connection.open() except ssh_errors as e: logging.warning(f"SSH after reboot failed: '{e}'") logging.info("Retrieving boot diagnostics...") try: - node.get_boot_diagnostics() + target.get_boot_diagnostics() except UnexpectedExit: logging.warning("Retrieving boot diagnostics failed.") else: From 941dfebd3a82c1f0b9eb0ba7a2f50634cfa70d9c Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 6 Nov 2020 20:41:04 -0800 Subject: [PATCH 72/84] Use schema to massively simplify playbook --- Makefile | 2 +- pytest/conftest.py | 141 ++++++++++++++++++------------------------- pytest/criteria.yaml | 33 +++++----- pytest/lisa.py | 37 +++++------- pytest/playbook.py | 54 +++++++++++++++++ pytest/playbook.yaml | 15 +++++ pytest/target.py | 10 +-- 7 files changed, 162 insertions(+), 130 deletions(-) create mode 100644 pytest/playbook.py create mode 100644 pytest/playbook.yaml diff --git a/Makefile b/Makefile index fab052e9f9..a74e12f15a 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ run: setup # Run local tests test: setup - cd pytest && poetry run pytest --keep-vms --targets=targets.yaml --setup-show selftests/ + cd pytest && poetry run pytest --keep-vms --playbook=playbook.yaml --setup-show selftests/ # Run semantic analysis check: setup diff --git a/pytest/conftest.py b/pytest/conftest.py index 0c80e01648..749b213f95 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -5,26 +5,19 @@ """ from __future__ import annotations -import sys import typing -from functools import partial from pathlib import Path -import yaml +import schema import lisa +import playbook +import pytest # TODO: Use importlib instead from azure import Azure from target import Target -try: - from yaml import CLoader as Loader -except ImportError: - from yaml import Loader # type: ignore - -import pytest - if typing.TYPE_CHECKING: from typing import Any, Dict, Iterator, List, Optional @@ -77,7 +70,7 @@ def target(pool, worker_id, request: FixtureRequest) -> Iterator[Target]: """ params = request.param marker = request.node.get_closest_marker("lisa") - features = marker.kwargs["features"] + features = set(marker.kwargs["features"]) for t in pool: # TODO: Implement full feature comparison, etc. and not just # proof-of-concept string set comparison. @@ -102,38 +95,33 @@ def pytest_addoption(parser: Parser) -> None: parser.addoption("--keep-vms", action="store_true", help="Keeps deployed VMs.") parser.addoption("--check", action="store_true", help="Run semantic analysis.") parser.addoption("--demo", action="store_true", help="Run in demo mode.") - parser.addoption("--targets", type=Path, help="Path to targets playbook.") - parser.addoption("--criteria", type=Path, help="Path to criteria playbook.") + parser.addoption("--playbook", type=Path, help="Path to playbook.") TARGETS = [] TARGET_IDS = [] +def get_playbook(path: Optional[Path]) -> dict(): + book = dict() + if not path: + return book + with open(path) as f: + book = playbook.schema.validate(f) + return book + + def pytest_configure(config: Config) -> None: """Parse provided user inputs to setup configuration. Determines the targets based on the playbook and sets default configurations based user mode. - """ - playbook_path: Optional[Path] = config.getoption("--targets") - if playbook_path: - playbook = dict() - with open(playbook_path) as f: - playbook = yaml.load(f, Loader=Loader) - for play in playbook: - t = play.get("target") - if t is None: - continue - else: - print(f"Parsing target {t}") - setup = { - "platform": t.get("platform", "Azure"), - "image": t.get("image", "UbuntuLTS"), - "sku": t.get("sku", "Standard_DS1_v2"), - } - TARGETS.append(setup) - TARGET_IDS.append("-".join(setup.values())) + + configurations based user mode.""" + book = get_playbook(config.getoption("--playbook")) + for t in book.get("targets"): + TARGETS.append(t) + TARGET_IDS.append(t["name"]) # Search ‘_pytest’ for ‘addoption’ to find these. options: Dict[str, Any] = {} # See ‘pytest.ini’ for defaults. @@ -182,62 +170,49 @@ def pytest_collection_modifyitems( """ # Validate LISA mark on every item. for item in items: - mark = item.get_closest_marker("lisa") - assert mark, f"{item} is missing required LISA marker!" - lisa.validate(mark) + m = item.get_closest_marker("lisa") + assert m, f"{item} is missing required LISA marker!" + try: + lisa.validate(m) + except schema.SchemaMissingKeyError as e: + print(f"Test {item.name} failed LISA validation {e}!") + items[:] = [] + return # Optionally select tests based on a playbook. - playbook_path: Optional[Path] = config.getoption("--criteria") - new_items: List[Item] = [] - force_exclude: List[Item] = [] + included: List[Item] = [] + excluded: List[Item] = [] - def select_item(action: Optional[str], times: int, item: Item) -> None: + # TODO: Remove logging. + def select(item: Item, times: int, exclude: bool) -> None: """Includes or excludes the item as appropriate.""" - if action == "forceExclude": - print(f" Forcing exclusion of item {item}") - force_exclude.append(item) + if exclude: + print(f" Excluding {item}") + excluded.append(item) else: - print(f" Keeping {item} selected {times} times") - for _ in range(times - new_items.count(item)): - new_items.append(item) - - # TODO: Review, refactor, and fix logging. If we do schema - # validation and have reasonable defaults we can delete most of - # the `is not None` checks. Suggest using: - # https://pypi.org/project/schema/ - if playbook_path: - playbook = dict() - with open(playbook_path) as f: - playbook = yaml.load(f, Loader=Loader) - for play in playbook: - criteria = play.get("criteria") - if criteria is None: - continue - else: - print(f"Parsing criteria {criteria}") - select_action = play.get("select_action", "forceInclude") - times = play.get("times", 1) - select = partial(select_item, select_action, times) - - name = criteria.get("name") - priority = criteria.get("priority") - area = criteria.get("area") - for i in items: - marker = i.get_closest_marker("LISA") - args = marker.kwargs - if name is not None: - if i.name.startswith(name): - print(f" Selecting test {i} because name is {name}!") - select(i) - if priority is not None: - if args.get("priority") == priority: - print(f" Selecting test {i} because priority is {priority}!") - select(i) - if area and args.get("area"): - if args["area"].lower() == area: - print(f" Selecting test {i} because area is {area}!") - select(i) - items[:] = [i for i in new_items if i not in force_exclude] + print(f" Including {item} {times} times") + for _ in range(times - included.count(item)): + included.append(item) + + book = get_playbook(config.getoption("--playbook")) + for c in book.get("criteria"): + print(f"Parsing criteria {c}") + for item in items: + m = item.get_closest_marker("lisa").kwargs + if any( + [ + c["name"] and c["name"] in item.name, + c["area"] and c["area"].casefold() == m["area"].casefold(), + c["category"] + and c["category"].casefold() == m["category"].casefold(), + c["priority"] and c["priority"] == m["priority"], + c["tags"] and set(c["tags"]) <= set(m["tags"]), + ] + ): + select(item, c["times"], c["exclude"]) + if not included: + included = items + items[:] = [i for i in included if i not in excluded] def pytest_html_report_title(report): # type: ignore diff --git a/pytest/criteria.yaml b/pytest/criteria.yaml index 3e08a20942..cec1b91c3f 100644 --- a/pytest/criteria.yaml +++ b/pytest/criteria.yaml @@ -1,19 +1,14 @@ -# NOTE: This is a proof-of-concept ask from Chi. - - -# select all p0 cases -# for example, selected three cases: a,b,c -- criteria: - priority: 0 -# drop all cases of xdp, -# because it's not ready on a tested distro. -# for example, droped c, so now is: a,b -- criteria: - area: xdp - # forceExclude means not to pick up it again in next rules. - select_action: forceExclude -# run smoke_test cases twice, to prove a distro stable enough -# after this rule, the picked test cases is like a,b,b -- criteria: - name: test_smoke - times: 2 +# NOTE: This is an adjusted proof-of-concept ask from Chi. +criteria: + # select all p0 cases + # for example, selected three cases: a,b,c + - priority: 0 + # run smoke_test cases twice, to prove a distro stable enough + # after this rule, the picked test cases is like a,b,b + - name: smoke + times: 2 + # drop all cases of xdp, + # because it's not ready on a tested distro. + # for example, droped c, so now is: a,b + - area: xdp + exclude: true diff --git a/pytest/lisa.py b/pytest/lisa.py index d0d906a9c6..da98d92140 100644 --- a/pytest/lisa.py +++ b/pytest/lisa.py @@ -2,6 +2,8 @@ import typing +from schema import Optional, Or, Schema + if typing.TYPE_CHECKING: from _pytest.mark.structures import Mark @@ -20,29 +22,20 @@ } } +lisa_schema = Schema( + { + "platform": str, + "category": Or("Functional", "Performance", "Stress", "Community", "Longhaul"), + "area": str, + "priority": Or(0, 1, 2, 3), + Optional("features", default=list): [str], + Optional(object): object, + }, + ignore_extra_keys=True, +) + def validate(mark: Mark): """Validate each test's LISA parameters.""" assert not mark.args, "LISA marker cannot have positional arguments!" - args = mark.kwargs - - if args.get("platform"): - assert type(args["platform"]) is str, "Platform must be a string!" - - if args.get("priority") is not None: - assert type(args["priority"]) is int, "Priority must be an integer!" - - if args.get("features") is not None: - if type(args["features"]) is str: - # Convert single ‘str’ argument to ‘Set[str]’ - features = set() - features.add(args["features"]) - args["features"] = features - elif type(args["features"]) is list: - # Convert ‘list’ to ‘set’ - args["features"] = set(args["features"]) - assert type(args["features"]) is set, "Features must be a set!" - for feature in args["features"]: - assert type(feature) is str, "Features must be strings!" - else: - args["features"] = set() + mark.kwargs.update(lisa_schema.validate(mark.kwargs)) diff --git a/pytest/playbook.py b/pytest/playbook.py new file mode 100644 index 0000000000..ccbf34e44a --- /dev/null +++ b/pytest/playbook.py @@ -0,0 +1,54 @@ +import yaml + +try: + from yaml import CLoader as Loader +except ImportError: + from yaml import Loader # type: ignore + +from schema import And, Optional, Schema, Use + +criteria_schema = Schema( + { + # TODO: Validate that these strings are valid regular + # expressions if we change our matching logic. + Optional("name", default=None): str, + Optional("area", default=None): str, + Optional("category", default=None): str, + Optional("priority", default=None): int, + Optional("tags", default=list): [str], + Optional("times", default=1): int, + Optional("exclude", default=False): bool, + } +) + +# NOTE: We could have each platform register its own schema and +# “Or(...)” them together, so this is actually quite flexible. Again, +# so far just writing a proof-of-concept because we need to peer +# review our design. +target_schema = Schema( + { + # TODO: Maybe set name to image if unset. + "name": str, + # TODO: Use ‘Or([list of registered platforms])’ + "platform": str, + # TODO: Maybe validate as URN or path etc. + Optional("image", default=None): str, + Optional("sku", default=None): str, + } +) + +default_target = {"name": "Default", "platform": "Local"} + +schema = Schema( + And( + # NOTE: This is “magic” that automatically loads and validates + # YAML input. See https://pypi.org/project/schema/ and + # https://pyyaml.org/wiki/PyYAMLDocumentation for + # documentation. + Use(lambda x: yaml.load(x, Loader=Loader)), + { + Optional("targets", default=[default_target]): [target_schema], + Optional("criteria", default=list): [criteria_schema], + }, + ) +) diff --git a/pytest/playbook.yaml b/pytest/playbook.yaml new file mode 100644 index 0000000000..1ab2a7a16a --- /dev/null +++ b/pytest/playbook.yaml @@ -0,0 +1,15 @@ +# NOTE: This is a suggested playbook example. See the schema. +targets: + - name: Ubuntu + platform: Azure + image: UbuntuLTS + sku: Standard_DS1_v2 + - name: Debian + platform: Azure + image: credativ:Debian:9:9.0.201706190 + - name: GitHub + platform: Azure + image: github:github-enterprise:github-enterprise:latest + +criteria: + - name: smoke diff --git a/pytest/target.py b/pytest/target.py index a20333df7e..d78f615aa5 100644 --- a/pytest/target.py +++ b/pytest/target.py @@ -16,7 +16,7 @@ import lisa if typing.TYPE_CHECKING: - from typing import Any, Dict, Set + from typing import Any, Mapping, Sequence, Set class Target(ABC): @@ -26,8 +26,8 @@ class Target(ABC): def __init__( self, - params: Dict[str, str], - features: Set[str], + params: Mapping[str, str], + features: Sequence[str], name: str = f"pytest-{uuid4()}", ): """If not given a name, generates one uniquely. @@ -37,8 +37,8 @@ def __init__( rdma, gpu, xdp. """ - self.params: Dict[str, str] = params - self.features: Set[str] = features + self.params: Mapping[str, str] = params + self.features: Set[str] = set(features) self.name: str = name # TODO: Fix this. From 8080af18393a453998a045cafc3b2326c7a31386 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 6 Nov 2020 20:41:38 -0800 Subject: [PATCH 73/84] Add schema package --- pytest/poetry.lock | 29 ++++++++++++++++++++++++++++- pytest/pyproject.toml | 1 + 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/pytest/poetry.lock b/pytest/poetry.lock index fc2c2f3273..ea01a1d5f2 100644 --- a/pytest/poetry.lock +++ b/pytest/poetry.lock @@ -101,6 +101,14 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +[[package]] +name = "contextlib2" +version = "0.6.0.post1" +description = "Backports and enhancements for the contextlib module" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + [[package]] name = "cryptography" version = "3.1.1" @@ -635,6 +643,17 @@ python-versions = "*" [package.extras] dev = ["pytest"] +[[package]] +name = "schema" +version = "0.7.3" +description = "Simple data validation library" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +contextlib2 = ">=0.5.5" + [[package]] name = "six" version = "1.15.0" @@ -705,7 +724,7 @@ python-versions = ">=3.6" [metadata] lock-version = "1.1" python-versions = "^3.8" -content-hash = "ee86abdeec8b63e0ff22b16e7d9d6e8aedce399ada42a0cbe395ecd917a42703" +content-hash = "ff9d853cf9f58598aa01e465e2c673172b9e573fd7a8569bf29236348884c748" [metadata.files] apipkg = [ @@ -782,6 +801,10 @@ colorama = [ {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, ] +contextlib2 = [ + {file = "contextlib2-0.6.0.post1-py2.py3-none-any.whl", hash = "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"}, + {file = "contextlib2-0.6.0.post1.tar.gz", hash = "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e"}, +] cryptography = [ {file = "cryptography-3.1.1-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:65beb15e7f9c16e15934569d29fb4def74ea1469d8781f6b3507ab896d6d8719"}, {file = "cryptography-3.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:983c0c3de4cb9fcba68fd3f45ed846eb86a2a8b8d8bc5bb18364c4d00b3c61fe"}, @@ -1033,6 +1056,10 @@ regex = [ rope = [ {file = "rope-0.18.0.tar.gz", hash = "sha256:786b5c38c530d4846aa68a42604f61b4e69a493390e3ca11b88df0fbfdc3ed04"}, ] +schema = [ + {file = "schema-0.7.3-py2.py3-none-any.whl", hash = "sha256:c331438b60f634cab5664ab720d3083cc444f924d55269530c36b33e3354276f"}, + {file = "schema-0.7.3.tar.gz", hash = "sha256:4cf529318cfd1e844ecbe02f41f7e5aa027463e7403666a52746f31f04f47a5e"}, +] six = [ {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml index 01ba6f0db0..47db86c319 100644 --- a/pytest/pyproject.toml +++ b/pytest/pyproject.toml @@ -16,6 +16,7 @@ tenacity = "^6.2.0" pytest-rerunfailures = "^9.1.1" pytest-xdist = "^2.1.0" PyYAML = "^5.3.1" +schema = "^0.7.3" [tool.poetry.dev-dependencies] black = "^20.8b1" From ac019f90a4fb87db0bffc2c8b2baa63e3d963a53 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Fri, 6 Nov 2020 20:41:57 -0800 Subject: [PATCH 74/84] Fix up tests for schema validation --- pytest/selftests/setup_plan/test_plan_A.py | 14 ++++++++++---- pytest/selftests/setup_plan/test_plan_B.py | 14 ++++++++++---- pytest/selftests/setup_plan/test_plan_C.py | 14 ++++++++++---- pytest/selftests/test_basic.py | 4 +++- pytest/testsuites/test_smoke.py | 8 +++++++- pytest/testsuites/test_xdp.py | 22 +++++++++++----------- 6 files changed, 51 insertions(+), 25 deletions(-) diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py index 211361fad3..6f5e004937 100644 --- a/pytest/selftests/setup_plan/test_plan_A.py +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -1,17 +1,23 @@ -from conftest import LISA +import functools + +import conftest from target import Target +LISA = functools.partial( + conftest.LISA, platform="Azure", category="Functional", area="self-test", priority=1 +) + -@LISA(platform="Azure", features="xdp") +@LISA(features=["xdp"]) def test_xdp_a(target: Target) -> None: pass -@LISA(platform="Azure", features="gpu") +@LISA(features=["gpu"]) def test_gpu_a(target: Target) -> None: pass -@LISA(platform="Azure", features="rdma") +@LISA(features=["rdma"]) def test_rdma_a(target: Target) -> None: pass diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py index 4576d89bcd..b1e8fc7a26 100644 --- a/pytest/selftests/setup_plan/test_plan_B.py +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -1,17 +1,23 @@ -from conftest import LISA +import functools + +import conftest from target import Target +LISA = functools.partial( + conftest.LISA, platform="Azure", category="Functional", area="self-test", priority=1 +) + -@LISA(platform="Azure", features="xdp") +@LISA(features=["xdp"]) def test_xdp_b(target: Target) -> None: pass -@LISA(platform="Azure", features="gpu") +@LISA(features=["gpu"]) def test_gpu_b(target: Target) -> None: pass -@LISA(platform="Azure", features="rdma") +@LISA(features=["rdma"]) def test_rdma_b(target: Target) -> None: pass diff --git a/pytest/selftests/setup_plan/test_plan_C.py b/pytest/selftests/setup_plan/test_plan_C.py index 5380ee00f2..60698e3194 100644 --- a/pytest/selftests/setup_plan/test_plan_C.py +++ b/pytest/selftests/setup_plan/test_plan_C.py @@ -1,17 +1,23 @@ -from conftest import LISA +import functools + +import conftest from target import Target +LISA = functools.partial( + conftest.LISA, platform="Azure", category="Functional", area="self-test", priority=1 +) + -@LISA(platform="Azure", features="xdp") +@LISA(features=["xdp"]) def test_xdp_c(target: Target) -> None: pass -@LISA(platform="Azure", features="gpu") +@LISA(features=["gpu"]) def test_gpu_c(target: Target) -> None: pass -@LISA(platform="Azure", features="rdma") +@LISA(features=["rdma"]) def test_rdma_c(target: Target) -> None: pass diff --git a/pytest/selftests/test_basic.py b/pytest/selftests/test_basic.py index b8ffdd4125..92c835fcf1 100644 --- a/pytest/selftests/test_basic.py +++ b/pytest/selftests/test_basic.py @@ -2,8 +2,10 @@ from conftest import LISA from target import Target +pytestmark = [] -@LISA + +@LISA(platform="Local", category="Functional", area="self-test", priority=1) def test_basic(target: Target) -> None: """Basic test which creates a Node connection to 'localhost'.""" target.local("echo Hello World") diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 28d1a14c35..2867ee6317 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -10,7 +10,13 @@ from conftest import LISA -@LISA(platform="Azure", priority=0, sku="Standard_DS2_v2") +@LISA( + platform="Azure", + category="Functional", + area="deploy", + priority=0, + sku="Standard_DS2_v2", +) def test_smoke(target: Azure) -> None: """Check that a VM can be deployed and is responsive. diff --git a/pytest/testsuites/test_xdp.py b/pytest/testsuites/test_xdp.py index d386ff2bfc..c5f98a70d6 100644 --- a/pytest/testsuites/test_xdp.py +++ b/pytest/testsuites/test_xdp.py @@ -1,12 +1,12 @@ """Runs 'FunctionalTests-XDP.xml' using Pytest.""" -import conftest import pytest -from node_plugin import Node +from conftest import LINUX_SCRIPTS, LISA +from target import Target -@pytest.mark.lisa( +@LISA( platform="Azure", category="Functional", area="XDP", @@ -20,7 +20,7 @@ vm_size="Standard_DS4_v2", ) @pytest.mark.skip(reason="Not Finished") -def test_verify_xdp_compliance(node: Node) -> None: +def test_verify_xdp_compliance(target: Target) -> None: for f in [ "utils.sh", "XDPDumpSetup.sh", @@ -28,10 +28,10 @@ def test_verify_xdp_compliance(node: Node) -> None: "enable_passwordless_root.sh", "enable_root.sh", ]: - node.put(conftest.LINUX_SCRIPTS / f) - node.run(f"chmod +x {f}") - node.run("./enable_root.sh") - node.run("./enable_passwordless_root.sh") - synth_interface = node.run("source XDPUtils.sh ; get_extra_synth_nic").stdout - node.run(f"./XDPDumpSetup.sh {node.internal_address} {synth_interface}") - assert node.cat("state.txt") == "TestCompleted" + target.put(LINUX_SCRIPTS / f) + target.run(f"chmod +x {f}") + target.run("./enable_root.sh") + target.run("./enable_passwordless_root.sh") + synth_interface = target.run("source XDPUtils.sh ; get_extra_synth_nic").stdout + target.run(f"./XDPDumpSetup.sh {target.internal_address} {synth_interface}") + assert target.cat("state.txt") == "TestCompleted" From c17a5c09e933a3715d75b9644830771c3aec64e8 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 10 Nov 2020 15:04:44 -0800 Subject: [PATCH 75/84] Move LISA marker and update tests --- pytest/conftest.py | 4 ---- pytest/lisa.py | 8 +++++++- pytest/pytest.ini | 3 --- pytest/selftests/setup_plan/test_plan_A.py | 4 ++-- pytest/selftests/setup_plan/test_plan_B.py | 4 ++-- pytest/selftests/setup_plan/test_plan_C.py | 4 ++-- pytest/selftests/test_basic.py | 4 +--- pytest/testsuites/test_lis.py | 2 +- pytest/testsuites/test_smoke.py | 2 +- pytest/testsuites/test_xdp.py | 17 ++++++++--------- 10 files changed, 24 insertions(+), 28 deletions(-) diff --git a/pytest/conftest.py b/pytest/conftest.py index 749b213f95..cb61607632 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -29,10 +29,6 @@ from pytest import Item, Session -LISA = pytest.mark.lisa -LINUX_SCRIPTS = Path("../Testscripts/Linux") - - @pytest.fixture(scope="session") def pool(request: FixtureRequest) -> Iterator[List[Target]]: """This fixture tracks all deployed target resources.""" diff --git a/pytest/lisa.py b/pytest/lisa.py index da98d92140..20996c5c51 100644 --- a/pytest/lisa.py +++ b/pytest/lisa.py @@ -1,12 +1,18 @@ from __future__ import annotations import typing +from pathlib import Path -from schema import Optional, Or, Schema +from schema import Optional, Or, Schema # type: ignore + +import pytest if typing.TYPE_CHECKING: from _pytest.mark.structures import Mark +LISA = pytest.mark.lisa +LINUX_SCRIPTS = Path("../Testscripts/Linux") + # Setup a sane configuration for local and remote commands. Note that # the defaults between Fabric and Invoke are different, so we use # their Config classes explicitly. diff --git a/pytest/pytest.ini b/pytest/pytest.ini index b242158aa3..c80e7c884d 100644 --- a/pytest/pytest.ini +++ b/pytest/pytest.ini @@ -7,9 +7,6 @@ addopts = -rA markers = lisa - deploy - connect - feature log_cli = true log_cli_level = WARNING log_cli_format = %(asctime)s %(levelname)s %(message)s diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py index 6f5e004937..54f2e2543f 100644 --- a/pytest/selftests/setup_plan/test_plan_A.py +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -1,10 +1,10 @@ import functools -import conftest +import lisa from target import Target LISA = functools.partial( - conftest.LISA, platform="Azure", category="Functional", area="self-test", priority=1 + lisa.LISA, platform="Azure", category="Functional", area="self-test", priority=1 ) diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py index b1e8fc7a26..90b214e58c 100644 --- a/pytest/selftests/setup_plan/test_plan_B.py +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -1,10 +1,10 @@ import functools -import conftest +import lisa from target import Target LISA = functools.partial( - conftest.LISA, platform="Azure", category="Functional", area="self-test", priority=1 + lisa.LISA, platform="Azure", category="Functional", area="self-test", priority=1 ) diff --git a/pytest/selftests/setup_plan/test_plan_C.py b/pytest/selftests/setup_plan/test_plan_C.py index 60698e3194..8aff95e2fb 100644 --- a/pytest/selftests/setup_plan/test_plan_C.py +++ b/pytest/selftests/setup_plan/test_plan_C.py @@ -1,10 +1,10 @@ import functools -import conftest +import lisa from target import Target LISA = functools.partial( - conftest.LISA, platform="Azure", category="Functional", area="self-test", priority=1 + lisa.LISA, platform="Azure", category="Functional", area="self-test", priority=1 ) diff --git a/pytest/selftests/test_basic.py b/pytest/selftests/test_basic.py index 92c835fcf1..5b3012d8a4 100644 --- a/pytest/selftests/test_basic.py +++ b/pytest/selftests/test_basic.py @@ -1,9 +1,7 @@ """These tests are meant to run in a CI environment.""" -from conftest import LISA +from lisa import LISA from target import Target -pytestmark = [] - @LISA(platform="Local", category="Functional", area="self-test", priority=1) def test_basic(target: Target) -> None: diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index ea91f7840d..86921f4fa1 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -6,7 +6,7 @@ if typing.TYPE_CHECKING: from azure import Azure -from conftest import LINUX_SCRIPTS, LISA +from lisa import LINUX_SCRIPTS, LISA @LISA(platform="Azure", category="Functional", priority=0, area="LIS_DEPLOY") diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py index 2867ee6317..cfc911707f 100644 --- a/pytest/testsuites/test_smoke.py +++ b/pytest/testsuites/test_smoke.py @@ -7,7 +7,7 @@ from paramiko import SSHException # type: ignore from azure import Azure -from conftest import LISA +from lisa import LISA @LISA( diff --git a/pytest/testsuites/test_xdp.py b/pytest/testsuites/test_xdp.py index c5f98a70d6..b526db1f0f 100644 --- a/pytest/testsuites/test_xdp.py +++ b/pytest/testsuites/test_xdp.py @@ -2,8 +2,8 @@ import pytest -from conftest import LINUX_SCRIPTS, LISA -from target import Target +from azure import Azure +from lisa import LINUX_SCRIPTS, LISA @LISA( @@ -13,14 +13,13 @@ tags=["xdp", "network", "hv_netvsc", "sriov"], priority=0, ) -@pytest.mark.deploy( - setup="OneVM2NIC", - networking="SRIOV", - vm_image="Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest", - vm_size="Standard_DS4_v2", -) +# TODO: This example is pending an update. +# setup="OneVM2NIC", +# networking="SRIOV", +# vm_image="Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest", +# vm_size="Standard_DS4_v2", @pytest.mark.skip(reason="Not Finished") -def test_verify_xdp_compliance(target: Target) -> None: +def test_verify_xdp_compliance(target: Azure) -> None: for f in [ "utils.sh", "XDPDumpSetup.sh", From febb4fbfae89a5aa10eecdacafa586c9a04632a4 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 10 Nov 2020 15:15:20 -0800 Subject: [PATCH 76/84] Cleanup types in Target and Azure modules --- pytest/azure.py | 38 +++++++++++++++++++++++--------------- pytest/target.py | 35 +++++++++++++++++++++++------------ 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/pytest/azure.py b/pytest/azure.py index a20b4bbb32..3eade8e4a9 100644 --- a/pytest/azure.py +++ b/pytest/azure.py @@ -16,24 +16,29 @@ class Azure(Target): """Implements Azure-specific target methods.""" + # Custom instance attribute(s). + internal_address: str + + # A class attribute because it’s defined. az_ok = False - def check_az_cli(self) -> None: + @classmethod + def check_az_cli(cls) -> None: """Assert that the `az` CLI is installed and logged in.""" - if Azure.az_ok: + if cls.az_ok: # Shortcut if we already checked. return # E.g. on Ubuntu: `curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash` - assert self.local("az --version", warn=True), "Please install the `az` CLI!" + assert cls.local("az --version", warn=True), "Please install the `az` CLI!" # TODO: Login with service principal (az login) and set # default subscription (az account set -s) using secrets. - account: Result = self.local("az account show") + account: Result = cls.local("az account show") assert account.ok, "Please `az login`!" sub = json.loads(account.stdout) assert sub["isDefault"], "Please `az account set -s `!" logging.info( f"Using account '{sub['user']['name']}' with subscription '{sub['name']}'" ) - Azure.az_ok = True + cls.az_ok = True def create_boot_storage(self, location: str) -> str: """Create a separate resource group and storage account for boot diagnostics.""" @@ -59,22 +64,23 @@ def allow_ping(self) -> None: try: for d in ["Inbound", "Outbound"]: self.local( - f"az network nsg rule create --name allow{d}ICMP " - f"--nsg-name {self.name}NSG --priority 100 --resource-group {self.name}-rg " + f"az network nsg rule create " + f"--name allow{d}ICMP --resource-group {self.name}-rg " + f"--nsg-name {self.name}NSG --priority 100 " f"--access Allow --direction '{d}' --protocol Icmp " "--source-port-ranges '*' --destination-port-ranges '*'" ) except Exception as e: logging.warning(f"Failed to create ICMP allow rules in NSG due to '{e}'") - def deploy(self): + def deploy(self) -> str: """Given deployment info, deploy a new VM.""" - image = self.params["image"] - sku = self.params["sku"] - location = self.params.get("location", "eastus2") - networking = self.params.get("networking", "") + image = self.parameters["image"] + sku = self.parameters["sku"] + location = self.parameters["location"] + networking = self.parameters["networking"] - self.check_az_cli() + Azure.check_az_cli() logging.info( f"""Deploying VM... @@ -108,9 +114,11 @@ def deploy(self): vm_command.append("--accelerated-networking true") self.data = json.loads(self.local(" ".join(vm_command)).stdout) - self.allow_ping(self.name) + self.allow_ping() # TODO: Enable auto-shutdown 4 hours from deployment. - return self.data["publicIpAddress"] + self.internal_address = self.data["internal_address"] + hostname: str = self.data["publicIpAddress"] + return hostname def delete(self) -> None: """Delete the entire allocated resource group. diff --git a/pytest/target.py b/pytest/target.py index d78f615aa5..e41da654b9 100644 --- a/pytest/target.py +++ b/pytest/target.py @@ -6,28 +6,34 @@ from io import BytesIO from uuid import uuid4 -from fabric import Config as FabricConfig +from fabric import Config as FabricConfig # type: ignore from fabric import Connection -from invoke import Config as InvokeConfig +from invoke import Config as InvokeConfig # type: ignore from invoke import Context from invoke.runners import Result # type: ignore +from schema import Schema # type: ignore from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore import lisa if typing.TYPE_CHECKING: - from typing import Any, Mapping, Sequence, Set + from typing import Any, Mapping, Set class Target(ABC): """Extends 'fabric.Connection' with our own utilities.""" - local_context = Context(config=InvokeConfig(overrides=lisa.config)) + # Typed instance attributes, not class attributes. + parameters: Mapping[str, str] + features: Set[str] + name: str + host: str + connection: Connection def __init__( self, - params: Mapping[str, str], - features: Sequence[str], + parameters: Mapping[str, str], + features: Set[str], name: str = f"pytest-{uuid4()}", ): """If not given a name, generates one uniquely. @@ -37,15 +43,17 @@ def __init__( rdma, gpu, xdp. """ - self.params: Mapping[str, str] = params - self.features: Set[str] = set(features) - self.name: str = name + # TODO: Do we need to re-validate the parameters here? + self.parameters = parameters + self.features = features + self.name = name - # TODO: Fix this. + # TODO: Review this thoroughly as currently it depends on + # parameters which is side-effecty. self.host = self.deploy() config = lisa.config.copy() - config["run"]["env"] = { + config["run"]["env"] = { # type: ignore # Set PATH since it’s not a login shell. "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" } @@ -63,8 +71,11 @@ def delete(self) -> None: """Must delete the target resources.""" ... + # A class attribute because it’s defined. + local_context = Context(config=InvokeConfig(overrides=lisa.config)) + @classmethod - def local(self, *args: Any, **kwargs: Any) -> Result: + def local(cls, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return Target.local_context.run(*args, **kwargs) From 9a1cd98024fcff533dc256970172588f72a56790 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 10 Nov 2020 15:21:41 -0800 Subject: [PATCH 77/84] Handle LISA marker being optional --- pytest/conftest.py | 25 ++++++++++++++----------- pytest/lisa.py | 4 +++- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/pytest/conftest.py b/pytest/conftest.py index cb61607632..10b56849a5 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -8,7 +8,7 @@ import typing from pathlib import Path -import schema +from schema import SchemaMissingKeyError # type: ignore import lisa import playbook @@ -164,13 +164,11 @@ def pytest_collection_modifyitems( https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems """ - # Validate LISA mark on every item. + # Validate all LISA marks. for item in items: - m = item.get_closest_marker("lisa") - assert m, f"{item} is missing required LISA marker!" try: - lisa.validate(m) - except schema.SchemaMissingKeyError as e: + lisa.validate(item.get_closest_marker("lisa")) + except SchemaMissingKeyError as e: print(f"Test {item.name} failed LISA validation {e}!") items[:] = [] return @@ -194,15 +192,20 @@ def select(item: Item, times: int, exclude: bool) -> None: for c in book.get("criteria"): print(f"Parsing criteria {c}") for item in items: - m = item.get_closest_marker("lisa").kwargs + marker = item.get_closest_marker("lisa") + if not marker: + # Not all tests will have the LISA marker, such as + # static analysis tests. + continue + i = marker.kwargs if any( [ c["name"] and c["name"] in item.name, - c["area"] and c["area"].casefold() == m["area"].casefold(), + c["area"] and c["area"].casefold() == i["area"].casefold(), c["category"] - and c["category"].casefold() == m["category"].casefold(), - c["priority"] and c["priority"] == m["priority"], - c["tags"] and set(c["tags"]) <= set(m["tags"]), + and c["category"].casefold() == i["category"].casefold(), + c["priority"] and c["priority"] == i["priority"], + c["tags"] and set(c["tags"]) <= set(i["tags"]), ] ): select(item, c["times"], c["exclude"]) diff --git a/pytest/lisa.py b/pytest/lisa.py index 20996c5c51..b80346f9a8 100644 --- a/pytest/lisa.py +++ b/pytest/lisa.py @@ -41,7 +41,9 @@ ) -def validate(mark: Mark): +def validate(mark: Optional[Mark]) -> None: """Validate each test's LISA parameters.""" + if not mark: + return assert not mark.args, "LISA marker cannot have positional arguments!" mark.kwargs.update(lisa_schema.validate(mark.kwargs)) From e1f7b21973e5a779db439ce4bd9930595bcde2be Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 10 Nov 2020 15:25:58 -0800 Subject: [PATCH 78/84] Load platforms dynamically with their own parameters schema --- pytest/azure.py | 16 +++++++ pytest/conftest.py | 73 +++++++++++++++++++++----------- pytest/playbook.py | 82 +++++++++++++++++++++--------------- pytest/playbook.yaml | 26 ++++++++++-- pytest/selftests/__init__.py | 0 pytest/selftests/conftest.py | 17 ++++++++ pytest/target.py | 20 +++++++++ pytest/targets.yaml | 12 ------ 8 files changed, 171 insertions(+), 75 deletions(-) create mode 100644 pytest/selftests/__init__.py create mode 100644 pytest/selftests/conftest.py delete mode 100644 pytest/targets.yaml diff --git a/pytest/azure.py b/pytest/azure.py index 3eade8e4a9..aadb2b5eda 100644 --- a/pytest/azure.py +++ b/pytest/azure.py @@ -5,6 +5,7 @@ import typing from invoke.runners import Result # type: ignore +from schema import Optional, Schema # type: ignore from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore from target import Target @@ -19,6 +20,21 @@ class Azure(Target): # Custom instance attribute(s). internal_address: str + # @property + # @classmethod + # def schema(cls) -> Schema: + # return + + schema: Schema = Schema( + { + # TODO: Maybe validate as URN or path etc. + "image": str, + Optional("sku", default="Standard_DS1_v2"): str, + Optional("location", default="eastus2"): str, + Optional("networking", default=""): str, + } + ) + # A class attribute because it’s defined. az_ok = False diff --git a/pytest/conftest.py b/pytest/conftest.py index 10b56849a5..f5cd7f3bb8 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -10,38 +10,35 @@ from schema import SchemaMissingKeyError # type: ignore +import azure # noqa import lisa -import playbook import pytest - -# TODO: Use importlib instead -from azure import Azure from target import Target if typing.TYPE_CHECKING: - from typing import Any, Dict, Iterator, List, Optional + from typing import Any, Dict, Iterator, List, Optional, Type from _pytest.config import Config from _pytest.config.argparsing import Parser - from _pytest.fixtures import FixtureRequest + from _pytest.fixtures import SubRequest from _pytest.python import Metafunc from pytest import Item, Session @pytest.fixture(scope="session") -def pool(request: FixtureRequest) -> Iterator[List[Target]]: +def pool(request: SubRequest) -> Iterator[List[Target]]: """This fixture tracks all deployed target resources.""" targets: List[Target] = [] yield targets for t in targets: - print(f"Created target: {t.features} / {t.params}") + print(f"Created target: {t.features} / {t.parameters}") if not request.config.getoption("keep_vms"): t.delete() @pytest.fixture -def target(pool, worker_id, request: FixtureRequest) -> Iterator[Target]: +def target(pool: List[Target], request: SubRequest) -> Iterator[Target]: """This fixture provides a connected target for each test. It is parametrized indirectly in 'pytest_generate_tests'. @@ -64,19 +61,28 @@ def target(pool, worker_id, request: FixtureRequest) -> Iterator[Target]: their environments. """ - params = request.param + import playbook + + platform: Type[Target] = playbook.PLATFORMS[request.param["platform"]] + parameters: Dict[str, Any] = request.param["parameters"] marker = request.node.get_closest_marker("lisa") features = set(marker.kwargs["features"]) + for t in pool: # TODO: Implement full feature comparison, etc. and not just # proof-of-concept string set comparison. - if params == t.params and features <= t.features: + if all( + [ + isinstance(t, platform), + t.parameters == parameters, + t.features >= features, + ] + ): yield t break else: # TODO: Reimplement caching. - # TODO: Dynamically load platform module and use it. - t = Azure(params, features) + t = platform(parameters, features) pool.append(t) yield t t.connection.close() @@ -94,16 +100,34 @@ def pytest_addoption(parser: Parser) -> None: parser.addoption("--playbook", type=Path, help="Path to playbook.") -TARGETS = [] -TARGET_IDS = [] +TARGETS: List[Dict[str, Any]] = [] +TARGET_IDS: List[str] = [] -def get_playbook(path: Optional[Path]) -> dict(): +def get_playbook(path: Optional[Path]) -> Dict[str, Any]: + """Loads and validates the playbook file. + + This imports the playbook module at runtime to ensure all + subclasses of 'Target' (e.g. all supported platforms, including + those defined in arbitrary 'conftest.py' files) are defined. + + """ + import playbook + book = dict() - if not path: - return book - with open(path) as f: - book = playbook.schema.validate(f) + if path: + # See https://pyyaml.org/wiki/PyYAMLDocumentation + import yaml + + try: + from yaml import CLoader as Loader + except ImportError: + from yaml import Loader # type: ignore + + with open(path) as f: + book = playbook.schema.validate(yaml.load(f, Loader=Loader)) + else: + book = playbook.schema.validate({}) return book @@ -115,7 +139,7 @@ def pytest_configure(config: Config) -> None: configurations based user mode.""" book = get_playbook(config.getoption("--playbook")) - for t in book.get("targets"): + for t in book.get("targets", []): TARGETS.append(t) TARGET_IDS.append(t["name"]) @@ -143,16 +167,15 @@ def pytest_configure(config: Config) -> None: setattr(config.option, attr, value) -def pytest_generate_tests(metafunc: Metafunc): +def pytest_generate_tests(metafunc: Metafunc) -> None: """Parametrize the tests based on our inputs. Note that this hook is run for each test, so we do the file I/O in 'pytest_configure' and save the results. """ - # TODO: Provide a default target? - assert TARGETS, "No targets specified!" if "target" in metafunc.fixturenames: + assert TARGETS, "No targets specified!" metafunc.parametrize("target", TARGETS, True, TARGET_IDS) @@ -189,7 +212,7 @@ def select(item: Item, times: int, exclude: bool) -> None: included.append(item) book = get_playbook(config.getoption("--playbook")) - for c in book.get("criteria"): + for c in book.get("criteria", []): print(f"Parsing criteria {c}") for item in items: marker = item.get_closest_marker("lisa") diff --git a/pytest/playbook.py b/pytest/playbook.py index ccbf34e44a..0d3a79267c 100644 --- a/pytest/playbook.py +++ b/pytest/playbook.py @@ -1,11 +1,50 @@ -import yaml +"""Describes the YAML schema for the playbook file. -try: - from yaml import CLoader as Loader -except ImportError: - from yaml import Loader # type: ignore +This module should be imported at runtime such that 'PLATFORMS' is +defined after all 'Target' subclasses have been defined. -from schema import And, Optional, Schema, Use +PLATFORMS is a mapping of platform names (strings) to the implementing +subclass of 'Target' where each subclass defines its own 'parameters' +schema, 'deploy' and 'delete' methods, and other platform-specific +functionality. A 'Target' subclass need only be defined in a file +loaded by Pytest, so a 'contest.py' file works just fine. No manual +registration is required, it will be discovered automatically. + +TODO: Add field annotations, friendly error reporting, automatic case +transformations, etc. + +""" +from __future__ import annotations + +import typing + +# See https://pypi.org/project/schema/ +from schema import Optional, Or, Schema # type: ignore + +from target import Target + +if typing.TYPE_CHECKING: + from typing import Mapping, Type + +# See https://github.com/python/mypy/issues/4717 for why we ignore the type. +PLATFORMS: Mapping[str, Type[Target]] = { + cls.__name__: cls for cls in Target.__subclasses__() # type: ignore +} + +target_schema = Schema( + { + "name": str, + "platform": Or(*[platform for platform in PLATFORMS.keys()]), + # TODO: What should we do when lacking parameters? Ideally we + # use the platform’s defaults from its own schema, but that + # means this value must be set, even if to an empty dict. + Optional("parameters", default=dict): Or( + *[cls.schema for cls in PLATFORMS.values()] + ), + } +) + +default_target = {"name": "Default", "platform": "Local"} criteria_schema = Schema( { @@ -21,34 +60,9 @@ } ) -# NOTE: We could have each platform register its own schema and -# “Or(...)” them together, so this is actually quite flexible. Again, -# so far just writing a proof-of-concept because we need to peer -# review our design. -target_schema = Schema( +schema = Schema( { - # TODO: Maybe set name to image if unset. - "name": str, - # TODO: Use ‘Or([list of registered platforms])’ - "platform": str, - # TODO: Maybe validate as URN or path etc. - Optional("image", default=None): str, - Optional("sku", default=None): str, + Optional("targets", default=[default_target]): [target_schema], + Optional("criteria", default=list): [criteria_schema], } ) - -default_target = {"name": "Default", "platform": "Local"} - -schema = Schema( - And( - # NOTE: This is “magic” that automatically loads and validates - # YAML input. See https://pypi.org/project/schema/ and - # https://pyyaml.org/wiki/PyYAMLDocumentation for - # documentation. - Use(lambda x: yaml.load(x, Loader=Loader)), - { - Optional("targets", default=[default_target]): [target_schema], - Optional("criteria", default=list): [criteria_schema], - }, - ) -) diff --git a/pytest/playbook.yaml b/pytest/playbook.yaml index 1ab2a7a16a..a291dda079 100644 --- a/pytest/playbook.yaml +++ b/pytest/playbook.yaml @@ -1,15 +1,33 @@ # NOTE: This is a suggested playbook example. See the schema. targets: + - name: Test + platform: Local + - name: Ubuntu platform: Azure - image: UbuntuLTS - sku: Standard_DS1_v2 + parameters: + image: UbuntuLTS + sku: Standard_DS1_v2 + - name: Debian platform: Azure - image: credativ:Debian:9:9.0.201706190 + parameters: + image: credativ:Debian:9:9.0.201706190 + - name: GitHub platform: Azure - image: github:github-enterprise:github-enterprise:latest + parameters: + image: github:github-enterprise:github-enterprise:latest + + - name: Citrix + platform: Azure + parameters: + image: citrix:netscalervpx-130:netscalerbyol:latest + + - name: AudioCodes + platform: Azure + parameters: + image: audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest criteria: - name: smoke diff --git a/pytest/selftests/__init__.py b/pytest/selftests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pytest/selftests/conftest.py b/pytest/selftests/conftest.py new file mode 100644 index 0000000000..cc7fd59999 --- /dev/null +++ b/pytest/selftests/conftest.py @@ -0,0 +1,17 @@ +from schema import Schema # type: ignore + +from target import Target + + +class Custom(Target): + schema: Schema = Schema(None) + # @property + # @classmethod + # def schema(cls) -> Schema: + # return Schema() + + def deploy(self) -> str: + return "localhost" + + def delete(self) -> None: + pass diff --git a/pytest/target.py b/pytest/target.py index e41da654b9..a9f1e382a2 100644 --- a/pytest/target.py +++ b/pytest/target.py @@ -61,6 +61,16 @@ def __init__( self.host, config=FabricConfig(overrides=config), inline_ssh_env=True ) + # TODO: Use an abstract class property to ensure this is defined. + schema: Schema = Schema(None) + + # @property + # @classmethod + # @abstractmethod + # def schema(cls) -> Schema: + # """Must return the parameters schema for setup.""" + # ... + @abstractmethod def deploy(self) -> str: """Must deploy the target resources and return hostname.""" @@ -104,3 +114,13 @@ def cat(self, path: str) -> str: with BytesIO() as buf: self.get(path, buf) return buf.getvalue().decode("utf-8").strip() + + +class Local(Target): + schema: Schema = Schema(None) + + def deploy(self) -> str: + return "localhost" + + def delete(self) -> None: + pass diff --git a/pytest/targets.yaml b/pytest/targets.yaml deleted file mode 100644 index 58c3d04797..0000000000 --- a/pytest/targets.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# TODO: We need to actually think about the schema here. -- target: - image: "citrix:netscalervpx-130:netscalerbyol:latest" - -- target: - image: "audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest" - -- target: - image: "credativ:Debian:9:9.0.201706190" - -- target: - image: "github:github-enterprise:github-enterprise:latest" From 59e3e0fd194fcced83002b146f852401f810c672 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 10 Nov 2020 15:43:13 -0800 Subject: [PATCH 79/84] Move smoke test targets to smoke.yaml --- Makefile | 2 +- pytest/playbook.yaml | 23 ----------------------- pytest/smoke.yaml | 23 +++++++++++++++++++++++ 3 files changed, 24 insertions(+), 24 deletions(-) create mode 100644 pytest/smoke.yaml diff --git a/Makefile b/Makefile index a74e12f15a..d335e3bb06 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ yaml: # Run the smoke test demo. smoke: - cd pytest && poetry run pytest --demo -k smoke + cd pytest && poetry run pytest --demo -n 4 --playbook=smoke.yaml # Print current Python virtualenv venv: diff --git a/pytest/playbook.yaml b/pytest/playbook.yaml index a291dda079..df9c3178b2 100644 --- a/pytest/playbook.yaml +++ b/pytest/playbook.yaml @@ -8,26 +8,3 @@ targets: parameters: image: UbuntuLTS sku: Standard_DS1_v2 - - - name: Debian - platform: Azure - parameters: - image: credativ:Debian:9:9.0.201706190 - - - name: GitHub - platform: Azure - parameters: - image: github:github-enterprise:github-enterprise:latest - - - name: Citrix - platform: Azure - parameters: - image: citrix:netscalervpx-130:netscalerbyol:latest - - - name: AudioCodes - platform: Azure - parameters: - image: audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest - -criteria: - - name: smoke diff --git a/pytest/smoke.yaml b/pytest/smoke.yaml new file mode 100644 index 0000000000..ea1a15ab60 --- /dev/null +++ b/pytest/smoke.yaml @@ -0,0 +1,23 @@ +targets: + - name: Debian + platform: Azure + parameters: + image: credativ:Debian:9:9.0.201706190 + + - name: GitHub + platform: Azure + parameters: + image: github:github-enterprise:github-enterprise:latest + + - name: Citrix + platform: Azure + parameters: + image: citrix:netscalervpx-130:netscalerbyol:latest + + - name: AudioCodes + platform: Azure + parameters: + image: audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest + +criteria: + - name: smoke From 4082e0639231e39ba9391b03043dcfe2f3d6e3ab Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 11 Nov 2020 12:46:28 -0800 Subject: [PATCH 80/84] =?UTF-8?q?Move=20playbooks=20and=20fix=20self=20tes?= =?UTF-8?q?ts=20to=20use=20=E2=80=9CCustom=E2=80=9D=20platform?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of “Azure” which of course doesn’t work in CI. --- Makefile | 6 +++--- pytest/conftest.py | 9 ++++++++- pytest/playbook.yaml | 10 ---------- pytest/{ => playbooks}/criteria.yaml | 0 pytest/{ => playbooks}/smoke.yaml | 0 pytest/playbooks/test.yaml | 5 +++++ pytest/selftests/setup_plan/test_plan_A.py | 2 +- pytest/selftests/setup_plan/test_plan_B.py | 2 +- pytest/selftests/setup_plan/test_plan_C.py | 2 +- 9 files changed, 19 insertions(+), 17 deletions(-) delete mode 100644 pytest/playbook.yaml rename pytest/{ => playbooks}/criteria.yaml (100%) rename pytest/{ => playbooks}/smoke.yaml (100%) create mode 100644 pytest/playbooks/test.yaml diff --git a/Makefile b/Makefile index d335e3bb06..d75bd1fbe5 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ run: setup # Run local tests test: setup - cd pytest && poetry run pytest --keep-vms --playbook=playbook.yaml --setup-show selftests/ + cd pytest && poetry run pytest --verbose --playbook=playbooks/test.yaml --setup-show selftests/ # Run semantic analysis check: setup @@ -22,11 +22,11 @@ clean: # Demonstrate test selection via YAML playbook. yaml: - cd pytest && poetry run pytest --collect-only --playbook=criteria.yaml + cd pytest && poetry run pytest --collect-only --playbook=playbooks/criteria.yaml # Run the smoke test demo. smoke: - cd pytest && poetry run pytest --demo -n 4 --playbook=smoke.yaml + cd pytest && poetry run pytest --demo -n 4 --playbook=playbooks/smoke.yaml # Print current Python virtualenv venv: diff --git a/pytest/conftest.py b/pytest/conftest.py index f5cd7f3bb8..6c8ca9c30b 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -92,6 +92,7 @@ def pytest_addoption(parser: Parser) -> None: """Pytest hook for adding arbitrary CLI options. https://docs.pytest.org/en/latest/example/simple.html + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_addoption """ parser.addoption("--keep-vms", action="store_true", help="Keeps deployed VMs.") @@ -112,6 +113,8 @@ def get_playbook(path: Optional[Path]) -> Dict[str, Any]: those defined in arbitrary 'conftest.py' files) are defined. """ + # TODO: Move to 'playbook.py' and setup 'PLATFORMS' when called so + # that the import can take place at any time. import playbook book = dict() @@ -137,7 +140,9 @@ def pytest_configure(config: Config) -> None: Determines the targets based on the playbook and sets default configurations based user mode. - configurations based user mode.""" + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_configure + + """ book = get_playbook(config.getoption("--playbook")) for t in book.get("targets", []): TARGETS.append(t) @@ -173,6 +178,8 @@ def pytest_generate_tests(metafunc: Metafunc) -> None: Note that this hook is run for each test, so we do the file I/O in 'pytest_configure' and save the results. + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_generate_tests + """ if "target" in metafunc.fixturenames: assert TARGETS, "No targets specified!" diff --git a/pytest/playbook.yaml b/pytest/playbook.yaml deleted file mode 100644 index df9c3178b2..0000000000 --- a/pytest/playbook.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# NOTE: This is a suggested playbook example. See the schema. -targets: - - name: Test - platform: Local - - - name: Ubuntu - platform: Azure - parameters: - image: UbuntuLTS - sku: Standard_DS1_v2 diff --git a/pytest/criteria.yaml b/pytest/playbooks/criteria.yaml similarity index 100% rename from pytest/criteria.yaml rename to pytest/playbooks/criteria.yaml diff --git a/pytest/smoke.yaml b/pytest/playbooks/smoke.yaml similarity index 100% rename from pytest/smoke.yaml rename to pytest/playbooks/smoke.yaml diff --git a/pytest/playbooks/test.yaml b/pytest/playbooks/test.yaml new file mode 100644 index 0000000000..0495002ab0 --- /dev/null +++ b/pytest/playbooks/test.yaml @@ -0,0 +1,5 @@ +targets: + - name: Local Tests + platform: Local + - name: Setup Plan + platform: Custom diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py index 54f2e2543f..5a4e049dd6 100644 --- a/pytest/selftests/setup_plan/test_plan_A.py +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -4,7 +4,7 @@ from target import Target LISA = functools.partial( - lisa.LISA, platform="Azure", category="Functional", area="self-test", priority=1 + lisa.LISA, platform="Custom", category="Functional", area="self-test", priority=1 ) diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py index 90b214e58c..0d89896300 100644 --- a/pytest/selftests/setup_plan/test_plan_B.py +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -4,7 +4,7 @@ from target import Target LISA = functools.partial( - lisa.LISA, platform="Azure", category="Functional", area="self-test", priority=1 + lisa.LISA, platform="Custom", category="Functional", area="self-test", priority=1 ) diff --git a/pytest/selftests/setup_plan/test_plan_C.py b/pytest/selftests/setup_plan/test_plan_C.py index 8aff95e2fb..efc579fe10 100644 --- a/pytest/selftests/setup_plan/test_plan_C.py +++ b/pytest/selftests/setup_plan/test_plan_C.py @@ -4,7 +4,7 @@ from target import Target LISA = functools.partial( - lisa.LISA, platform="Azure", category="Functional", area="self-test", priority=1 + lisa.LISA, platform="Custom", category="Functional", area="self-test", priority=1 ) From 7c839544cb8a374baf7d4ce1d3e94d449c2a0c53 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 11 Nov 2020 19:58:59 -0800 Subject: [PATCH 81/84] Small cleanups found during update of design document --- pytest/conftest.py | 17 +++++++++++------ pytest/lisa.py | 5 +++-- pytest/playbooks/criteria.yaml | 10 +++------- pytest/target.py | 19 +++++++++---------- pytest/testsuites/test_lis.py | 1 + 5 files changed, 27 insertions(+), 25 deletions(-) diff --git a/pytest/conftest.py b/pytest/conftest.py index 6c8ca9c30b..2dfe991bd2 100644 --- a/pytest/conftest.py +++ b/pytest/conftest.py @@ -68,15 +68,16 @@ def target(pool: List[Target], request: SubRequest) -> Iterator[Target]: marker = request.node.get_closest_marker("lisa") features = set(marker.kwargs["features"]) + # TODO: If `t` is not already in use, deallocate the previous + # target, and ensure the tests have been sorted (and so grouped) + # by their requirements. for t in pool: # TODO: Implement full feature comparison, etc. and not just # proof-of-concept string set comparison. - if all( - [ - isinstance(t, platform), - t.parameters == parameters, - t.features >= features, - ] + if ( + isinstance(t, platform) + and t.parameters == parameters + and t.features >= features ): yield t break @@ -194,6 +195,10 @@ def pytest_collection_modifyitems( https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems """ + # TODO: The ‘Item’ object has a ‘user_properties’ attribute which + # is a list of tuples and could be used to hold the validated + # marker data, simplifying later usage. + # Validate all LISA marks. for item in items: try: diff --git a/pytest/lisa.py b/pytest/lisa.py index b80346f9a8..bc3b16506e 100644 --- a/pytest/lisa.py +++ b/pytest/lisa.py @@ -35,15 +35,16 @@ "area": str, "priority": Or(0, 1, 2, 3), Optional("features", default=list): [str], + Optional("tags", default=list): [str], Optional(object): object, }, ignore_extra_keys=True, ) -def validate(mark: Optional[Mark]) -> None: +def validate(mark: typing.Optional[Mark]) -> None: """Validate each test's LISA parameters.""" if not mark: return assert not mark.args, "LISA marker cannot have positional arguments!" - mark.kwargs.update(lisa_schema.validate(mark.kwargs)) + mark.kwargs.update(lisa_schema.validate(mark.kwargs)) # type: ignore diff --git a/pytest/playbooks/criteria.yaml b/pytest/playbooks/criteria.yaml index cec1b91c3f..3e86bc538d 100644 --- a/pytest/playbooks/criteria.yaml +++ b/pytest/playbooks/criteria.yaml @@ -1,14 +1,10 @@ # NOTE: This is an adjusted proof-of-concept ask from Chi. criteria: - # select all p0 cases - # for example, selected three cases: a,b,c + # Select all Priority 0 tests. - priority: 0 - # run smoke_test cases twice, to prove a distro stable enough - # after this rule, the picked test cases is like a,b,b + # Run tests with 'smoke' in the name twice. - name: smoke times: 2 - # drop all cases of xdp, - # because it's not ready on a tested distro. - # for example, droped c, so now is: a,b + # Exclude all tests in Area "xdp" - area: xdp exclude: true diff --git a/pytest/target.py b/pytest/target.py index a9f1e382a2..2608039cd5 100644 --- a/pytest/target.py +++ b/pytest/target.py @@ -6,10 +6,8 @@ from io import BytesIO from uuid import uuid4 -from fabric import Config as FabricConfig # type: ignore -from fabric import Connection -from invoke import Config as InvokeConfig # type: ignore -from invoke import Context +import fabric # type: ignore +import invoke # type: ignore from invoke.runners import Result # type: ignore from schema import Schema # type: ignore from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore @@ -28,7 +26,7 @@ class Target(ABC): features: Set[str] name: str host: str - connection: Connection + conn: fabric.Connection def __init__( self, @@ -57,8 +55,8 @@ def __init__( # Set PATH since it’s not a login shell. "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" } - self.connection = Connection( - self.host, config=FabricConfig(overrides=config), inline_ssh_env=True + self.connection = fabric.Connection( + self.host, config=fabric.Config(overrides=config), inline_ssh_env=True ) # TODO: Use an abstract class property to ensure this is defined. @@ -82,15 +80,16 @@ def delete(self) -> None: ... # A class attribute because it’s defined. - local_context = Context(config=InvokeConfig(overrides=lisa.config)) + local_context = invoke.Context(config=invoke.Config(overrides=lisa.config)) @classmethod def local(cls, *args: Any, **kwargs: Any) -> Result: """This patches Fabric's 'local()' function to ignore SSH environment.""" return Target.local_context.run(*args, **kwargs) - # TODO: Generate these automatically. There’s some weird bug with - # inheriting from ‘Connection’ that causes infinite recursion. + # TODO: Refactor this. We don’t want to inherit from `Connection` + # because that’s overly complicated. Honestly we probably just + # want users to call `target.conn.run()` etc. def run(self, *args: Any, **kwargs: Any) -> Result: return self.connection.run(*args, **kwargs) diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py index 86921f4fa1..a35e6fe94e 100644 --- a/pytest/testsuites/test_lis.py +++ b/pytest/testsuites/test_lis.py @@ -11,6 +11,7 @@ @LISA(platform="Azure", category="Functional", priority=0, area="LIS_DEPLOY") def test_lis_driver_version(target: Azure) -> None: + """Checks that the installed drivers have the correct version.""" # TODO: Include “utils.sh” automatically? Or something... for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: target.put(LINUX_SCRIPTS / f) From eeb382387835a9fa46464f66168785199c0c2f12 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Wed, 11 Nov 2020 20:00:17 -0800 Subject: [PATCH 82/84] Write version 0.2 of design document --- pytest/DESIGN.md | 874 ++++++++++++++++++++++++++--------------------- 1 file changed, 484 insertions(+), 390 deletions(-) diff --git a/pytest/DESIGN.md b/pytest/DESIGN.md index 57acb4848b..50ebb9a4c2 100644 --- a/pytest/DESIGN.md +++ b/pytest/DESIGN.md @@ -7,7 +7,7 @@ evaluating the feasibility of leveraging Please see [PR #1065](https://github.com/LIS/LISAv2/pull/1065) for a working, proof-of-concept prototype. -Authored by Andrew Schwartzmeyer (he/him), version 0.1.0. +Authored by Andrew Schwartzmeyer (he/him), version 0.2.0. ## Why Pytest? @@ -15,10 +15,11 @@ Pytest is an [incredibly popular](https://docs.pytest.org/en/stable/talks.html) MIT licensed open source Python testing framework. It has a thriving community and plugin framework, with over 750 [plugins](https://plugincompat.herokuapp.com/). Instead of writing (and -therefore maintaining) yet another test framework, we would do less with more by +therefore maintaining) yet another test framework, we would do more with less by reusing Pytest and existing plugins. This will allow us to focus on our unique problems: organizing and understanding our tests, deploying necessary resources -(such as Azure or Hyper-V virtual machines), and analyzing our results. +(such as Azure, Hyper-V, or bare metal machines, collectively known as +“targets”), and analyzing our results. In fact, most of Pytest itself is implemented via [built-in plugins](https://docs.pytest.org/en/stable/plugins.html), providing us with many @@ -43,23 +44,315 @@ needs very well: * Modular setup/teardown via fixtures * Incredibly customizable (as detailed above) -So all the logic for describing, discovering, running, skipping based on -requirements, and reporting results of the tests is already written and -maintained by the greater open source community, leaving us to focus on our hard -and specific problem: creating an abstraction to launch the necessary nodes in -our environments. Using Pytest would also allow us the space to abstract other +So all the logic for describing, discovering, running, skipping and reporting +results of the tests, as well as enabling and importing users’ plugins is +already written and maintained by the open source community. This leaves us to +focus on our hard and specific problems: creating an abstraction to launch the +necessary targets, organizing and publishing our tests, and reporting test +results upstream. Using Pytest would also allow us the space to abstract other commonalities in our specific tests. In this way, LISAv3 could solve the difficulties we have at hand without creating yet another test framework. -## High-Level Design Decisions +Finally, by leveraging such a popular framework and reducing the amount of code +we need to maintain, we drastically increase our chances of receiving pull +requests instead of bug reports from users. This is important because despite +our best efforts it is practically guaranteed that as adoption of LISAv3 +increases, users will want changes to be made, and we need to empower them to do +so themselves. + +## What are we maintaining? + +The current proof-of-concept implementation uses the top-level `conftest.py` +file to define our “plugin” functionality. This works, but it is not ideal. I +believe that we will want to publish two open source Pytest plugins as packages +on [PyPI](https://pypi.org/), the Python Package Index: `pytest-target` and +`pytest-lisa`. We will also maintain our set of public “LISA” tests, but these +should simply install and use our plugins. + +The `pytest-target` plugin should encapsulate all our logic for _how_ and _when_ +to deploy targets (local or cloud virtual machines, or bare metal machines, and +all the associated resources), run tests on the specified targets, and delete +the targets. This includes specifying which features and resources each test +needs and each given target provides (such as number of cores, amount of RAM, +and other hardware like a GPU etc.), how to deploy and delete each target based +on its platform, and parameterization of the `target` fixture based on CLI or +YAML file input. In fact, some tests (like networking) will require multiple +targets at once. This plugin will need to manage resources intelligently, being +able to optimize for both time and cost, and make it easy for tests to request +and use various resources. + +The `pytest-lisa` plugin should encapsulate all our logic for how to organize +and select tests, as well as our opinions on displaying test results. This +includes the user modes, test metadata and inventory, test selection based on +criteria against that metadata, required and pre-configured upstream plugins, +and result notifiers. It will similarly support both CLI and YAML file input. + +We should strive to keep these plugins from depending on each other in order to +keep their scope well-defined. In the “LISA” repository of tests we will depend +on the two plugins and maintain additional fixtures for our tests’ unique +requirements. Similarly, we and others may have private test repositories which +build upon the above by defining new platform support and internal service +integrations. + +## pytest-target + +### How are targets provided and accessed? + +First we need to define “target” as an instance of a system-under-test. That is, +given some environment requirements, such an Azure image (URN) and size (SKU), a +target would be a virtual machine deployed by `pytest-target` with SSH access +provided to the requesting test. A target could optionally be pre-deployed and +simply connected. Some tests may request multiple targets as well. -### What are the User Modes? +Pytest uses [fixtures](https://docs.pytest.org/en/stable/fixture.html), which +are the primary way of setting up test requirements. They replace less flexible +alternatives like setup/teardown functions. It is through fixtures that we +implement remote target setup/teardown. Our `target` fixture returns a `Target` +instance, which currently provides: + +* Remote shell access via SSH +* Data including hostname / IP address +* Cross-platform ping functionality with exponential back-off +* Uploading of local files to arbitrary remote destinations +* Downloading of remote file contents into local string variable +* Asynchronous remote command execution with promises + +The `Azure(Target)` subclass additionally provides: + +* Automatic provisioning of an Azure VM given URN and SKU +* Allowing ICMP ping via Azure firewall rules +* Azure platform forced reboot by API +* Downloading boot diagnostics (serial console log) from platform + +The prototype demonstrates how easy it is to quickly implement these features. +As we need more features, they can be readily added and shared among tests. + +The `Target` class leverages [Fabric](https://www.fabfile.org/) which is a +popular high-level Python library for executing shell commands on remote systems +over SSH. Underneath the covers Fabric uses +[paramiko](https://docs.paramiko.org/en/stable/), the most popular low-level +Python SSH library. Fabric does the heavy lifting of safely connecting and +disconnecting from the node, executing the shell command (synchronously or +asynchronously), reporting the exit status, gathering the stdout and stderr, +providing stdin (or interactive auto-responses, similar to `expect`), uploading +and downloading files, and much more. In fact, these APIs are all available and +implemented for the local machine by the underlying +[Inovke](https://www.pyinvoke.org/) library, which is essentially a Python +`subprocess` wrapper with “a powerful and clean feature set.” + +Other test specific requirements, such as installing software and daemons, +downloading files from remote storage, or checking the state of our Bash test +scripts, would similarly be implemented by methods on the `Target` class or via +additional fixtures and thus shared among tests. + +### How do we interact with Azure? + +For Azure, we currently use the [Azure CLI](https://aka.ms/azureclidocs) to +deploy a virtual machine. For Hyper-V (and other virtualization platforms), we +would like to use [libvirt](https://libvirt.org/python.html), and for embedded +environments we are evaluating +[labgrid](https://github.com/labgrid-project/labgrid). + +If possible, we do not want to use the [Azure Python +APIs](https://aka.ms/azsdk/python/all) directly because they are more +complicated (and less documented) than the [Azure +CLI](https://aka.ms/azureclidocs). With Invoke (as discussed above), `az` +becomes incredibly easy to work with. The Azure CLI lead developer states that +they have [feature parity](https://stackoverflow.com/a/50005660/1028665) and +that the CLI is more straightforward to use. Considering our ease-of-maintenance +requirement, this seems the apt choice. If it later becomes necessary to use the +Python APIs directly, that is, of course, still doable. + +### What’s the `Target` class? + +In version 0.1 of this design document we detailed a planned refactor of what +was then called the `Node` class. This has since been executed with just a few +modifications (one being the rename to `Target`, as `Node` was found to be an +overloaded term in the context of data centers). This class and its subclasses +are decoupled from Pytest, and are used via fixtures. It looks like this: + +```python +from abc import ABC, abstractmethod +from schema import Schema +import fabric + +class Target(ABC): + parameters: Mapping[str, str] + features: Set[str] + name: str + host: str + conn: fabric.Connection # Provides run, sudo, get, put etc. + + def __init__(...): + ... + self.host = self.deploy() + self.conn = fabric.Connection(self.host) + + @classmethod + @property + @abstractmethod + def schema(cls) -> Schema: + """Must return the parameters schema for setup.""" + ... + + @abstractmethod + def deploy(self) -> str: + """Must deploy the target resources and return hostname.""" + ... + + @abstractmethod + def delete(self) -> None: + """Must delete the target resources.""" + ... + + @classmethod + def local(...) -> Result: + """Runs a local shell command.""" + ... +``` + +#### How are platforms implemented? + +Platform support is implemented by subclassing `Target` and defining the +`schema` property, `deploy` method, `delete` method, and any platform-specific +methods. Using the `__subclasses__` attribute of `Target` the available +platforms and their parameter schemata are automatically gathered from users’ +own `conftest.py` files and other plugins. This enables the `target` fixture to +dynamically instantiate a target from the gathered requirements and parameters. + +#### How are requirements examined? + +The `features` attribute is currently a set of strings and (combined with the +parameters dictionary) was used to demonstrate how we can test if an existing +target instance (representing a deployed machine) met a test’s requirements. It +should be updated with a `Requirements` class that represents all physical +attributes of the target, and a `requires` Pytest mark should be added which +takes instances of this class. Two `Requirements` should be comparable to +determine if one set meets (or exceeds) the other set. + +#### How do we share common tasks? + +Common tasks for targets like rebooting and pinging should be implemented on the +`Target` class, and platform-specific tasks on the respective subclass. + +Methods available from `Connection` include `run()` and `sudo()` which are used +to easily run arbitrary commands, and `get()` and `put()` to download and upload +arbitrary files. + +The `cat()` method wraps `get()` and returns the file as data in a string. This +makes test code like this possible: + +```python +assert target.conn.cat("state.txt") == "TestCompleted" +``` -Because Pytest is infinitely customizable, we want to provide a few sets of +A `reboot()` method should be added that first tries to use `sudo("reboot", +timeout=5)` (with a short timeout to avoid a hung SSH session). It should retry +with an exponential back-off to see if the machine has rebooted by checking +either `uptime` or the existence of a file created before the reboot. This is to +avoid having to `sleep()` and just guess the amount of time it takes to reboot. + +A `restart()` method should “power cycle” the machine using the platform’s API, +and thus is in abstract method. + +Other tools and shared logic should be implemented as necessary. A major area of +concern is the automatic and package-manager agnostic installation of necessary +tools, much of which has been implemented previously and can be integrated. + +### How are targets requested and managed? + +We implement a pair of Pytest fixtures to provide targets. The first is the +`pool` fixture, which looks like: + +```python +@pytest.fixture(scope="session") +def pool(request: SubRequest) -> Iterator[List[Target]]: + """This fixture tracks all deployed target resources.""" + targets: List[Target] = [] + yield targets + for t in targets: + t.delete() +``` + +The `pool` fixture is setup once at the beginning of the test session, at which +point the `targets` list is then provided as input to every instance of the +`target` fixture. While currently a list, to support optimal scheduling we will +likely want to use a priority queue, where the priority of a target represents +its cost (whether in terms of time or money), allowing us to provide either the +fastest or the cheapest target to each request. Targets not in use will be +deallocated, and all targets will be automatically deleted after the tests are +finished (unless the user requested otherwise, in which case they’ll be cached). + +Note that cross-session [caching](https://docs.pytest.org/en/stable/cache.html) +is provided by Pytest, and very easy to work with. An early prototype +implemented a `--keep-vms` flag successfully, and this will be implemented again +with the updated design. + +The second is the `target` fixture, which looks like: + +```python +@pytest.fixture +def target(pool: List[Target], request: SubRequest) -> Iterator[Target]: + """This fixture provides a connected target for each test.""" + platform: Type[Target] = playbook.PLATFORMS[request.param["platform"]] + parameters: Dict[str, Any] = request.param["parameters"] + marker = request.node.get_closest_marker("lisa") + features = set(marker.kwargs["features"]) + + # TODO: If `t` is not already in use, deallocate the previous target. + for t in pool: + if isinstance(t, platform) and t.parameters == parameters and t.features >= features: + yield t + break + else: + t = platform(parameters, features) + pool.append(t) + yield t + t.connection.close() +``` + +This is obviously still an early implementation, but it is viable. By using the +[pytest_collection_modifyitems][] hook to sort (and so group) the tests by their +requirements, the tests would efficiently reuse targets. This fixture is +indirectly parameterized during setup with the [pytest_generate_tests][] hook. +Test and fixture [parameterization][] is a huge feature of Pytest. When we +parameterize the `target` fixture for multiple targets (e.g. “Ubuntu” and +“Debian”), Pytest automatically creates a set of tests for each target. So +`test_smoke` turns into `test_smoke[Ubuntu]` and `test_smoke[Debian]`. This +allows us to run a collection of tests against multiple targets with ease. These +targets are defined in a YAML file and validated against the parameters +collected from the previously described platform subclasses. + +### How are tests executed in parallel? + +While our original list of goals stated that we want to run tests “in parallel” +we were not specific about what was meant, and the topic of parallelism and +concurrency is understandably complex. We certainly don’t mean running two tests +at once on the same target, as this would undoubtedly lead to flaky tests. + +Assuming that we care about a set of tests passing on a particular image and +size combination, but not necessarily on a particular deployed instance, then we +can run tests concurrently by deploying multiple “identical” targets and +splitting the tests across them. The tests would still run in isolation on each +target. This sounds hard, but actually it’s practically free with Pytest via +[pytest-xdist][]. + +The default `pytest-xdist` implementation simply takes the list of tests and +runs them in a round-robin fashion with the desired number of executors. We’ve +talked at length about being able to schedule groups of tests to run in +particular executors and using particular targets. While there are many paths +open to us, this plugin actually provides a hook, `pytest_xdist_make_scheduler` +that exists specifically to “implement custom tests distribution logic.” + +## pytest-lisa + +### What are the user modes? + +Because Pytest is incredibly customizable, we want to provide a few sets of reasonable default configurations for some common scenarios. We will add a flag -like `--mode=[dev,debug,ci,demo]` to change the default options and output of -Pytest. Doing so is readily supported by Pytest via the `pytest_addoption` and -`pytest_configure` hooks. We call these the provided “user modes.” +like `--lisa-mode=[dev,debug,ci,demo]` to change the default options and output +of Pytest. Doing so is readily supported by Pytest via the [pytest_addoption][] +and [pytest_configure][] hooks. We call these the provided “user modes.” * The dev(eloper) mode is intended for use by test developers while writing a new test. It is verbose, caches the deployed VMs between runs, and generates a @@ -78,7 +371,7 @@ Pytest. Doing so is readily supported by Pytest via the `pytest_addoption` and * The demo mode will show the “executive summary” (a lot like CI, but finely tuned for demos). For example, what `make smoke` currently shows. -### How Are Tests Described? +### How are tests described? The built-in [pytest-mark](https://docs.pytest.org/en/stable/mark.html) plugin already provides functionality for adding metadata to tests, where we @@ -86,7 +379,7 @@ specifically want: * Platform: used to skip tests inapplicable to the current system-under-test * Category: our high-level test organization -* Area: feature being tested (could default to module name) +* Area: feature being tested * Priority: self-explanatory * Tags: optional additional metadata for test organization @@ -98,35 +391,75 @@ It looks like this: ```python import pytest -@pytest.mark.lisa( - platform="Azure", category="Functional", area="LIS_DEPLOY", priority=0, tags=["lis"] -) -def test_lis_driver_version(node: Node) -> None: +@pytest.mark.lisa(platform="Azure", category="Functional", priority=0, area="LIS_DEPLOY") +def test_lis_driver_version(target: Azure) -> None: """Checks that the installed drivers have the correct version.""" ... ``` This is a functional example, which takes zero implementation. With this simple -decorator, all test collection hooks can introspect the metadata, enforce +decorator, all test [collection hooks][] can introspect the metadata, enforce required parameters and set defaults, select tests based on arbitrary criteria, and list test coverage statistics. Note that Pytest leverages Python’s docstrings for built-in documentation (and -can even run tests discovered in such strings, like doctest). Being just Python -code, this decorator need not be `@pytest.mark.lisa(...)` but can trivially be -provided as simply `@lisa(...)`. +can even run tests discovered in such strings, like doctest). Hence we do not +have a separate field for the test’s documentation. + +Being just Python code, this decorator need not be `@pytest.mark.lisa(...)` but +can trivially be provided as simply `@LISA(...)`. In fact, we provide this in +`lisa.py` with: + +```python +LISA = pytest.mark.lisa + +@LISA(...) +def test_something(...) +``` + +Currently we validate the parameters given to this mark during test collection, +by using the following code, which leverages the [schema][] library: + +```python +from schema import Optional, Or, Schema + +lisa_schema = Schema( + { + "platform": str, + "category": Or("Functional", "Performance", "Stress", "Community", "Longhaul"), + "area": str, + "priority": Or(0, 1, 2, 3), + Optional("tags", default=list): [str], + }, +) + +def validate(mark: Mark) -> None: + """Validate each test's LISA parameters.""" + assert not mark.args, "LISA marker cannot have positional arguments!" + mark.kwargs.update(lisa_schema.validate(mark.kwargs)) +``` + +In the future we could change `LISA` to be a function with these keyword +arguments so that IDE auto-completion is enabled. However, this is not mandatory +to move forward, and parameter validation is enabled succinctly with the above. This mark also does need to be repeated for each test, as marks can be scoped to a module, and so one line could describe defaults for every test in a file, with -individual tests overriding parameters as needed. We may also introduce marks -such as `@pytest.mark.slow` to allow for easier test selection. +individual tests overriding parameters as needed. -We even have a prototype +In the current implementation, we also take a `features: List[str]` argument +that is used to prove the concept deploying (or reusing) a target based on the +test’s required and the target’s available sets of features. However, as we move +forward we should define a separate `requires` mark that takes well-defined +classes describing the minimal required resources for a test. This will be part +of the refactor into the two Pytest plugins mentioned above. + +Furthermore, we have a prototype [generator](https://github.com/LIS/LISAv2/tree/pytest/generator) which parses LISAv2 XML test descriptions and generates stubs with this mark filled in correctly. -### How Are Tests Selected? +### How are tests selected? Pytest already allows a user to specify which exact tests to run: @@ -135,28 +468,86 @@ Pytest already allows a user to specify which exact tests to run: * Specifying a mark expression on the CLI (e.g. `-m functional and not slow`) We can also implement any other mechanism via the -`pytest_collection_modifyitems` hook. There’s already a -[proof-of-concept](https://github.com/LIS/LISAv2/blob/ab01c33f1f1e1ffac7100f6a69beda07192f05bb/pytest/conftest.py#L49) -which uses selection criteria read from a YAML file: +[pytest_collection_modifyitems][] hook. The proof-of-concept supports gathering +selection criteria from a YAML file: ```yaml -# Select all Priority 0 tests -- criteria: - priority: 0 -# Exclude all tests in Area "xdp" -- criteria: - area: xdp - select_action: forceExclude -# Run test with name `test_smoke` twice -- criteria: - name: test_smoke - times: 2 +criteria: + # Select all Priority 0 tests. + - priority: 0 + # Run tests with 'smoke' in the name twice. + - name: smoke + times: 2 + # Exclude all tests in Area "xdp" + - area: xdp + exclude: true +``` + +This criteria is validated against the following [schema][]: + +```python +from schema import Schema, Optional + +criteria_schema = Schema( + { + # TODO: Validate that these strings are valid regular + # expressions if we change our matching logic. + Optional("name", default=None): str, + Optional("area", default=None): str, + Optional("category", default=None): str, + Optional("priority", default=None): int, + Optional("tags", default=list): [str], + Optional("times", default=1): int, + Optional("exclude", default=False): bool, + } +) +``` + +The test collection is then modified using the Pytest hook, +[pytest_collection_modifyitems][]: + +```python +def pytest_collection_modifyitems( + session: Session, config: Config, items: List[Item] +) -> None: + included: List[Item] = [] + excluded: List[Item] = [] + + def select(item: Item, times: int, exclude: bool) -> None: + if exclude: + excluded.append(item) + else: + for _ in range(times - included.count(item)): + included.append(item) + + for c in criteria: # Where `criteria` is from the schema. + for item in items: + marker = item.get_closest_marker("lisa") + if not marker: + # Not all tests will have the LISA marker, such as + # static analysis tests. + continue + i = marker.kwargs + if any( + [ + c["name"] and c["name"] in item.name, + c["area"] and c["area"].casefold() == i["area"].casefold(), + c["category"] + and c["category"].casefold() == i["category"].casefold(), + c["priority"] and c["priority"] == i["priority"], + c["tags"] and set(c["tags"]) <= set(i["tags"]), + ] + ): + select(item, c["times"], c["exclude"]) + items[:] = [i for i in included if i not in excluded] ``` -However, before we settle on the basic schema understood by the -proof-of-concept, we should write and _review_ a full schema. +Because this is simply a Python list, we can also sort the tests according to +our needs, such as by priority. If the `python-targets` plugin has already +sorted by requirements, that’s just fine, Python’s `sorted()` built-in is +guaranteed to be stable (meaning we can sort in multiple passes). -### How Are Results Reported? +### How are results reported? Parsing the results of a large test suite can be difficult. Fortunately, because Pytest is a testing framework, there already exists support for generating @@ -173,104 +564,15 @@ community plugin [pytest-azurepipelines](https://pypi.org/project/pytest-azurepipelines/) which enhances the standard JUnit report for ADO. -### How Are Nodes Provided and Accessed? - -First we need to define “node” as an instance of a system-under-test. That is, -given some environment requirements, such an Azure image (URN) and image (SKU), -a node would be a virtual machine deployed by Pytest with SSH access provided to -the tests. A node could optionally be deployed outside Pytest. - -Pytest uses [fixtures](https://docs.pytest.org/en/stable/fixture.html), which -are the primary way of setting up test requirements. They replace less flexible -alternatives like setup/teardown functions. It is through fixtures that we -implement remote node setup/teardown. Our node fixture currently provides: - -* Automatic provisioning of an Azure VM given URN and SKU -* Remote shell access via SSH -* Data including hostname / IP address for local tools -* Cross-platform ping functionality with exponential back-off -* Allowing ICMP ping via Azure firewall rules -* Platform API reboot -* Uploading of local files to arbitrary remote destinations -* Downloading of remote file contents into local string variable -* Downloading boot diagnostics (serial console log) from platform -* Asynchronous remote command execution with promises +However, we also have internal requirements to report test results throughout +the test life cycle to a database to be consumed by other tools. In this sense, +LISAv3 (the composition of our published plugins, tests, and fixtures) is simply +a producer. Our repository’s `conftest.py` can implement the necessary logic +using Pytest’s ample [test running hooks][]. In particular, the hook +[pytest_runtest_makereport][] is called for each of the setup, call and teardown +phases of a test. As such it can used for precisely this purpose. -The prototype demonstrates how easy it is to quickly implement these features. -As we need more features, they can be readily added and shared among tests. - -Our abstraction leverages [Fabric](https://www.fabfile.org/) which is a popular -high-level Python library for executing shell commands on remote systems over -SSH. Underneath the covers it uses -[paramiko](https://docs.paramiko.org/en/stable/), the most popular low-level -Python SSH library. Fabric does the heavy lifting of safely connecting and -disconnecting from the node, executing the shell command (synchronously or -asynchronously), reporting the exit status, gathering the stdout and stderr, -providing stdin (or interactive auto-responses, similar to `expect`), uploading -and downloading files, and much more. In fact, these APIs are all available and -implemented for the local machine by the underlying -[Inovke](https://www.pyinvoke.org/) library, which is essentially a Python -`subprocess` wrapper with “a powerful and clean feature set.” - -Other test specific requirements, such as installing software and daemons, -downloading files from remote storage, or checking the state of our Bash test -scripts, would similarly be implemented by methods on the `Node` class or via -additional fixtures and thus shared among tests. - -For Azure, we use the [Azure CLI](https://aka.ms/azureclidocs) to deploy a -virtual machine. For Hyper-V (and other virtualization platforms), we would like -to use [libvirt](https://libvirt.org/python.html), and for embedded -environments we are evaluating -[labgrid](https://github.com/labgrid-project/labgrid). - -Tests do not need to explicitly call for a node to be provided, and we do not -need to write much code to setup this resource-provider logic. We simply define -a `Node` class and a Pytest fixture which returns one: - -```python -@pytest.fixture(scope="session") -def node(request: FixtureRequest) -> Iterator[Node]: - """Return the current node for any test which requests it.""" - with Node() as n: - yield n - -@pytest.mark.lisa(...) -def test_uptime(node: Node) -> None - """Automatically has access to the current node because of the argument.""" - # Runs `uname` via SSH and asserts it's Linux. - assert node.run("uname").stdout.strip() == "Linux" -``` - -When created, the `Node` instance either uses a cached node or deploys a new one -based on the given parameters (which can be provided at runtime). When the scope -of the fixture is exited (in this example, the test session), the `Node` -instance deletes its deployed resource unless requested not to by the user, -which is currently controlled by the `--keep-vms` flag. - -To provide the parameters to the node fixture, the prototype currently -implements a simple `@pytest.mark.deploy(...)` mark which takes `vm_image`, -`vm_size`, etc., and it’s applied to each function. This worked for the demo, -and proved the concept; however, we will want to provide a mechanism for -specifying lists of environments and their required resources to the tests at -runtime. This will likely be a YAML file that is parsed at initialization and -used to parameterize the node fixture itself, causing all the tests to be -executed for each environment. For more details, see the section “Where Does -Parameterization Happen?” - -See the Detailed Design Decisions below for what the `Node` class looks like. - -#### Interaction with Azure - -We do not use the [Azure Python APIs](https://aka.ms/azsdk/python/all) directly -because they are more complicated (and less documented) than the [Azure -CLI](https://aka.ms/azureclidocs). With Invoke (as discussed above), `az` -becomes incredibly easy to work with. The Azure CLI lead developer states that -they have [feature parity](https://stackoverflow.com/a/50005660/1028665) and -that the CLI is more straightforward to use. Considering our ease-of-maintenance -requirement, this seems the apt choice. If it later becomes necessary to use the -Python APIs directly, that is, of course, still allowed by our design. - -### How Are Tests Timed Out? +### How are tests timed out? The [pytest-timeout](https://pypi.org/project/pytest-timeout/) plugin provides integrated timeouts via `@pytest.mark.timeout()`, a configuration @@ -278,7 +580,7 @@ file option, environment variable, and CLI flag. The Fabric library provides timeouts in both the configuration and per-command usage. These are already used to satisfaction in the prototype. -### How Are Tests Organized? +### How are tests organized? That is, what does a folder of tests map to: a platform, feature, or owner? @@ -308,7 +610,7 @@ test dictates if the tests below it should be skipped. If it passes, it implies the tests underneath it would pass, and so skips them; but if it fails, the next test below it runs and so on until a passing layer is found. -### How Will We Port LISAv2 Tests? +### How will we port LISAv2 tests? Given the above, we still must decide if we want to put the engineering effort into porting _every_ LISAv2 test. However, the prototype started by porting the @@ -327,31 +629,10 @@ This work needs to be done regardless of the approach we take with our framework (leveraging Pytest or writing our own), and it is not inconsequential work. It needs to be thoroughly planned and executed, and is certainly a ways off. -### What Do Parallel Tests Mean? - -While our original list of goals stated that we want to run tests “in parallel” -we were not specific about what was meant, and the topic of parallelism and -concurrency is understandably complex. We certainly don’t mean running two tests -at once on the same node, as this would undoubtedly lead to flaky tests. - -Assuming that we care about a set of tests passing on a particular image and -size combination, but not necessarily on a particular deployed instance, then we -can run tests concurrently by deploying multiple “identical” nodes and splitting -the tests across them. The tests would still run in isolation on each node. This -sounds hard, but actually it’s practically free with Pytest if the node fixture -is session scoped and we use -[pytest-xdist](https://pypi.org/project/pytest-xdist/) as described below. - -It’s also unlikely that we want to write our tests using the Async I/O pattern, -because we do not want tests to accidentally conflict with each other. While -[pytest-asyncio](https://pypi.org/project/pytest-asyncio/) exists, our -concurrency model is probably as described above: split the tests among multiple -identical nodes. - -### How Are Tests and Functions Retried? +### How are tests and functions retried? -Testing remote instances is inherently flaky, so we take a two-pronged approach -to dealing with the flakiness. +Testing remote targets is inherently flaky, so we take a two-pronged approach to +dealing with the flakiness. The [pytest-rerunfailures](https://pypi.org/project/pytest-rerunfailures/) plugin will be used to easily mark a test itself as flaky. It has the nice @@ -392,191 +673,29 @@ We can additionally list a test twice when modifying the items collection, as implemented in the criteria proof-of-concept. However, given the above abilities, this may not be desired. -### Where Does Parameterization Happen? - -Do we parameterize -[tests](https://docs.pytest.org/en/stable/parametrize.html#parametrizemark) or -[fixtures](https://docs.pytest.org/en/stable/fixture.html#fixture-parametrize)? - -This all comes down to how we want to use LISA. If we want to put a single -system under test at a time, and run all possible tests against it, then it -would make sense to parameterize the node fixture across the set of images to -test. I believe this to likely be the case. - -A parameterized node fixture would be session-scoped. This would enable us to -take advantage of [pytest-xdist](https://pypi.org/project/pytest-xdist/) for -running the tests concurrently against multiple nodes, where each forked runner -has its own node. Note that the cache key for deployed nodes will need to -include an identifier to separate the parallel runs, but this is available. - -This approach would let us list a number of images and sizes (or a matrix -combination of them) and then run all requested tests against each of those. -However, it means that tests will need to be intelligent enough to [skip or -xfail](https://docs.pytest.org/en/stable/skipping.html) on systems where they do -not apply. This can be done in test code to start with. As commonalities are -realized they can be refactored into simple, reusable feature checks. - -Finally, while the base (and most common) case of tests which require one node -becomes trivially solved, we still have to deal with the edge cases of tests -which use two or three nodes. Determining the best course of action here -requires investigating how and when those tests are run, and if the node pair or -triple all use the same image and size. An easy solution would be to have a test -which requires a second or third node to simply deploy them through a -function-scoped fixture, and tear them down at the end. This may be costly in -terms of time if there are many of these tests and they run frequently, but for -long “performance” tests it would be an adequate option. Alternatively, we could -have a node pool that the session-scoped node fixture uses, where each node is -locked while in use. While this would take more engineering effort, it means we -could use the nodes for running tests concurrently, and “borrow” a runner when a -test needs another. - -Other ideas are welcome, but what we don’t want to do is change the environment -a user is expecting their tests to run in. I do not think that we should use a -“least common denominator” approach that collects feature requests and deploys -nodes which match those features, as the user will lose control over their -environment. We still want to enumerate features so tests can check if they’re -applicable, but the user’s environment request should be respected. - -Alternatively, parameterizing tests means that each test (or module, or class, -as the fixture could no longer be session-scoped) specifies in some way (whether -in code or read at runtime from a file) what image/size combinations it should -run against. This generally eliminates having to check if it should skip, but -means that running the test suite will put multiple systems under test at once, -the results of which may be difficult to interpret. While this is a viable -route, it means maintaining a comprehensive list of which environments each -tests use, and I think feature-checking is more scalable. - -This is an open question which we need to settle as the two methods can -technically be combined, but we will want to be careful if we do this. - -Regardless of approach, we will want to write and _review_ a simple YAML schema -for specifying the system-under-test targets. As described above, the prototype -currently reads this information from a mark, but if we move forward with the -suggestion above, the scope of the node fixture will change to session and it -will become parameterized. Those parameters would be set at runtime by reading a -given YAML file. - -### When Do We Export a Plugin? - -The current prototype is simply using Pytest. All the implementation is in the files -`conftest.py` and `node_plugin.py`, the former of which is Pytest’s default -“user plugin” file. We likely want to create a proper `pytest-lisa` package -which provides our marks, fixtures, command-line parameters, user modes, and -hook modifications for reading YAML files. - -This requires more research as doing so is obviously not necessary but is nice. - -## Detailed Design Decisions - -This section contains truly technical specifications of our current plans to -bring the prototype to production. - -### Planned `Node` Class Refactor - -#### Basic Shape - -`Node` should still subclass `fabric.Connection`. It should be a partially -abstract class with platform-specific subclasses (Azure, libvirt, an embedded -device, etc.). However, the initializer and context manager methods _should not_ -need to be reimplemented by a platform subclass. Most added methods like -`ping()` and `reboot()` should also be shared. This is where static type -checking will help. - -An `Environment` class will be a collection of nodes in a group, for tests which -require multiple nodes. It is important that `Node` is self-contained and does -not require an `Environment` instance because the base case of most tests is to -use a `Node`. - -#### Caching - -A `Node` should be able to be cached. If `--keep-vms` is given to Pytest, it -should not delete the deployed VM resource and should instead cache its data so -that a subsequent invocation can connect directly to it. A `Node` should also be -able to connect directly to a system deployed outside Pytest, reusing the cache -hydration logic. The `init()` and `__exit__()` methods will handle checking and -updating the cache so that this logic is shared. - -Note that cross-session [caching](https://docs.pytest.org/en/stable/cache.html) -is provided by Pytest, and very easy to work with. The existing prototype -already implements `--keep-vms`. - -#### Initializing - -The `init()` method does the following: - -* Takes an optional group ID (provided by Environment for instance so that it’s - easy to create/deploy multiple nodes into one group) to generate its name and - deduce its group. - -* Checks the cache for the node’s key. - -* On a cache miss, calls `deploy()` and saves the returned host to the field - inherited from `Connection` and the rest of the platform-specific information - to a `data` dictionary field. Caches the data dictionary for the node’s key. - -* On a cache hit, saves the cached host and data to the instance. - -* Calls `super()` to setup `Connection` with our default Fabric configuration. - -#### Deploy and Delete - -* The `deploy()` and `delete()` methods are abstract and implemented by - platform-specific node classes to actually deploy the VM. For Azure, note that - `deploy()` will check if the resource group exists, and if not, creates it. - For `delete()` it will check if it is the last VM in the group, and if so - deletes the group too. Again this is to keep `Environment` from being a - requirement. - -* The group ID is `pytest-{uuid4()}` (maybe with `pytest` being replaced by a - user- or run-specific short identifier). The ID should be returned by a static - method so that when an `Environment` creates a collection of nodes, it can - simply use the static method to generate a shared group ID. - -* The context manager’s `__exit__()` method calls `super()` to disconnect and - potentially `delete()` the VM. If it’s to be deleted, the key/value pair is - also removed from the cache. - -* Because of how Python’s context managers work, we may not need to reimplement - `__enter__()` but will want to check its inherited implementation. - -#### Common Tasks - -Common tasks for systems under tests like rebooting and pinging should be -implemented on the `Node` class. - -* Methods inherited from `Connection` include `run()`, `sudo()` and `local()` - which are used to easily run arbitrary commands, and `get()` and `put()` to - download and upload arbitrary files. - -* The `cat()` method (already implemented in the prototype) wraps `get()` and - returns the file as data in a string. This makes test code like this possible: - - ```python - assert node.cat("state.txt") == "TestCompleted" - ``` - -* Reboot should first try to use `self.sudo("reboot", timeout=5)` (with a short - timeout to avoid a hung SSH session). It should retry with an exponential - back-off to see if the machine has rebooted by checking either `uptime` or the - existence of a file created before the reboot. This is to avoid having to - `sleep()` and just guess the amount of time it takes to reboot. +## What Else? -* Restart should “power cycle” the machine using the platform’s API, and thus is - in abstract method. It should optionally be able to redeploy the node too, - which can be used by tests which require a completely fresh node. +There’s still a lot more to think about and design. A non-exhaustive list of +future topics (some touched on above): -* Note that the `local()` method is already overridden to patch Fabric so as to - ignore the provided SSH environment. This demonstrates that we can easily - provide necessary changes to users while still leveraging the library. For - instance, we may want an alternative to `run()` which, instead of taking a - string, takes a list of arguments and quotes them correctly so as to deal with - difficult shell quoting edge cases. +* Tests inventory (generating statistics from metadata) +* ARM template support (with Azure CLI) +* Servicing Azure CLI (how stable is their API?) +* libvirt driver support (gives us Hyper-V and more) +* Duration reporting (built-in) +* Self-documentation (via Pydoc) +* Environment class design +* Feature requests (NICs in particular) +* Selection and targets YAML schema +* Secret management +* External results reporting (database and emails) +* Embedded systems / bare metal support +* Managing Python `logging` records +* Managing shell command stdout/stderr -* One new method we’ve already identified is `copy_scripts()` which will copy a - list of scripts to the node and mark them executable. It could even be a - context manager which deletes the scripts when exited. +## What alternatives were tried? -## Alternatives Considered +These are notes from things tried that did not work out, and why. ### Writing Another Framework @@ -592,17 +711,16 @@ already caused this mess in the first place. I think the work of prototyping said new framework was valuable, as it provided insight into the eventual technical design of LISAv3. -### Using Remote Capabilities of pytest-xdist +### Using Remote Capabilities of `pytest-xdist` -With the [pytest-xdist plugin](https://github.com/pytest-dev/pytest-xdist) there -already exists support for running a folder of tests on an arbitrary remote host -via SSH. +With the [pytest-xdist][] plugin there already exists support for running a +folder of tests on an arbitrary remote host via SSH. The LISA tests could be written as Python code suitable for running on the target test system, which means direct access to the system in the test code itself (subprocesses are still available, without having to use SSH within the test, but would become far less necessary), something that is not possible with -any current prototype. Where the pytest-xdist plugin copies the package of code +any current prototype. Where the `pytest-xdist` plugin copies the package of code to the target node and runs it, the pytest-lisa plugin could instantiate that node (boot the necessary image on a remote machine or launch a new Hyper-V or Azure VM, etc.) for the tests. @@ -611,8 +729,8 @@ However, this use of pytest-dist requires full Python support on the target machines, and drastically changes how developers write tests. Furthermore, it would not support running local commands against the remote node (like ping) or running the test across a reboot of the node. Thus we do not want to use this -functionality of pytest-xdist. That said, pytest-xdist will still be useful for -running tests concurrently, as described above. +functionality of `pytest-xdist`. That said, `pytest-xdist` will still be useful +for running tests concurrently, as described above. ### Using Paramiko Instead of Fabric @@ -668,37 +786,13 @@ However, the data returned by Paramiko is in bytes, which in Python 3 are not equivalent to strings, hence the existing implementation which uses `BytesIO` and decodes the bytes to a string. -### Writing a Class of Individual Test Methods - -An option I explored to make an “executive summary” of the smoke test was to use -a class where each functionality was tested as individual function (meaning they -could fail independently without failing the whole smoke test), accompanied by a -class-scoped node fixture. This had its advantages, however, it was difficult to -parameterize and also overly verbose. We should instead keep each test as Pytest -intends: as a function. This allows the fixtures to be written in a simpler -manner (not rely on caching between functions) and allows -[parameterization](https://docs.pytest.org/en/stable/parametrize.html) using the -built-in decorator `@pytest.mark.parametrize`. - -However, this decision may be reconsidered if we session-scope and parameterize -the `Node` fixture, in which case these issues are resolved. - -## What Else? - -There’s still a lot more to think about and design. A non-exhaustive list of -future topics (some touched on above): - -* Tests inventory (generating statistics from metadata) -* ARM template support (with Azure CLI) -* Servicing Azure CLI (how stable is their API?) -* libvirt driver support (gives us Hyper-V and more) -* Duration reporting (built-in) -* Self-documentation (via Pydoc) -* Environment class design -* Feature requests (NICs in particular) -* Selection and targets YAML schema -* Secret management -* External results reporting (database and emails) -* Embedded systems / bare metal support -* Managing Python `logging` records -* Managing shell command stdout/stderr +[pytest-xdist]: https://github.com/pytest-dev/pytest-xdist +[collection hooks]: https://docs.pytest.org/en/latest/reference.html#collection-hooks +[parameterization]: https://docs.pytest.org/en/stable/parametrize.html +[pytest_addoption]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_addoption +[pytest_collection_modifyitems]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems +[pytest_configure]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_configure +[pytest_generate_tests]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_generate_tests +[pytest_runtest_makereport]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_runtest_makereport +[schema]: https://pypi.org/project/schema/ +[test running hooks]: https://docs.pytest.org/en/latest/reference.html#test-running-runtest-hooks From ad0e3bc0050764678f9fd9f1b1ae66acce5e9038 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 17 Nov 2020 16:10:59 -0800 Subject: [PATCH 83/84] Write version 0.3 of design document --- pytest/DESIGN.md | 273 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 228 insertions(+), 45 deletions(-) diff --git a/pytest/DESIGN.md b/pytest/DESIGN.md index 50ebb9a4c2..31647e5dc2 100644 --- a/pytest/DESIGN.md +++ b/pytest/DESIGN.md @@ -7,7 +7,7 @@ evaluating the feasibility of leveraging Please see [PR #1065](https://github.com/LIS/LISAv2/pull/1065) for a working, proof-of-concept prototype. -Authored by Andrew Schwartzmeyer (he/him), version 0.2.0. +Authored by Andrew Schwartzmeyer (he/him), version 0.3.0. ## Why Pytest? @@ -40,8 +40,8 @@ needs very well: * Automatic test discovery, no boiler-plate test code * Useful information when a test fails (assertions are introspected) -* Test and fixture parameterization -* Modular setup/teardown via fixtures +* Test and fixture [parameterization][] +* Modular setup/teardown via [fixtures][] * Incredibly customizable (as detailed above) So all the logic for describing, discovering, running, skipping and reporting @@ -89,10 +89,18 @@ and result notifiers. It will similarly support both CLI and YAML file input. We should strive to keep these plugins from depending on each other in order to keep their scope well-defined. In the “LISA” repository of tests we will depend -on the two plugins and maintain additional fixtures for our tests’ unique +on the two plugins and maintain additional [fixtures][] for our tests’ unique requirements. Similarly, we and others may have private test repositories which build upon the above by defining new platform support and internal service -integrations. +integrations. The built-in plugin discovery of Pytest (via `conftest.py` files) +enables us to satisfy one of our requirements to “support plugins to orchestrate +the test environment.” + +Finally, a third smaller utility plugin, `pytest-schema` may be written in order +to share the common functionality of registering component schemata (e.g. +platform and target parameters from `pytest-target` and selection criteria from +`pytest-lisa`). This is somewhat of an implementation detail, but would be a +third and lower-level library we can publish. ## pytest-target @@ -104,11 +112,11 @@ target would be a virtual machine deployed by `pytest-target` with SSH access provided to the requesting test. A target could optionally be pre-deployed and simply connected. Some tests may request multiple targets as well. -Pytest uses [fixtures](https://docs.pytest.org/en/stable/fixture.html), which -are the primary way of setting up test requirements. They replace less flexible -alternatives like setup/teardown functions. It is through fixtures that we -implement remote target setup/teardown. Our `target` fixture returns a `Target` -instance, which currently provides: +Pytest uses [fixtures][], which are the primary way of setting up test +requirements. They replace less flexible alternatives like setup/teardown +functions. It is through fixtures that we implement remote target +setup/teardown. Our `target` fixture returns a `Target` instance, which +currently provides: * Remote shell access via SSH * Data including hostname / IP address @@ -149,8 +157,8 @@ additional fixtures and thus shared among tests. For Azure, we currently use the [Azure CLI](https://aka.ms/azureclidocs) to deploy a virtual machine. For Hyper-V (and other virtualization platforms), we -would like to use [libvirt](https://libvirt.org/python.html), and for embedded -environments we are evaluating +would like to use [libvirt](https://libvirt.org/python.html), and for embedded / +bare metal environments we are evaluating [labgrid](https://github.com/labgrid-project/labgrid). If possible, we do not want to use the [Azure Python @@ -160,8 +168,17 @@ CLI](https://aka.ms/azureclidocs). With Invoke (as discussed above), `az` becomes incredibly easy to work with. The Azure CLI lead developer states that they have [feature parity](https://stackoverflow.com/a/50005660/1028665) and that the CLI is more straightforward to use. Considering our ease-of-maintenance -requirement, this seems the apt choice. If it later becomes necessary to use the -Python APIs directly, that is, of course, still doable. +requirement, this seems the apt choice, especially since the Azure CLI supports +deploying resources with [ARM +templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/deploy-cli). +If it later becomes necessary to use the Python APIs directly, that is, of +course, still doable (and we can reuse existing code doing it). + +On the topic of “servicing” the Azure CLI, its developers state that “at command +level, packages only upgrading the PATCH version guarantee backward +compatibility.” The tool is also intended to be used in scripts, so servicing +would amount to documenting the tested version and having the Azure class check +that it’s compatible before using it (or warning and then trying its best). ### What’s the `Target` class? @@ -220,6 +237,51 @@ platforms and their parameter schemata are automatically gathered from users’ own `conftest.py` files and other plugins. This enables the `target` fixture to dynamically instantiate a target from the gathered requirements and parameters. +For example, the `Azure(Target)` class defines its required parameters using the +[schema][] library like this: + +```python +from schema import Optional, Schema +from target import Target + +class Azure(Target): + ... + schema: Schema = Schema( + { + # TODO: Maybe validate as URN or path etc. + "image": str, + Optional("sku", default="Standard_DS1_v2"): str, + Optional("location", default="eastus2"): str, + Optional("networking", default=""): str, + } + ) +``` + +In the YAML playbook, a set of Azure targets can then be defined like this: + +```yaml +targets: + - name: Debian + platform: Azure + parameters: + image: credativ:Debian:9:9.0.201706190 + location: westus2 + + - name: Ubuntu + platform: Azure + parameters: + image: UbuntuLTS + sku: Standard_DS3_v2 +``` + +These targets are then used to parameterize the `target` fixture in the +[pytest_generate_tests][] hook (see below for more details). + +This demonstrated how we can have platforms define their own schema and register +that schema automatically. A pending update to this is to have two schemata per +`Target` subclass: target-level and platform-level (the former is what’s +demonstrated above, the latter would be common settings, such as subscription). + #### How are requirements examined? The `features` attribute is currently a set of strings and (combined with the @@ -228,7 +290,8 @@ target instance (representing a deployed machine) met a test’s requirements. I should be updated with a `Requirements` class that represents all physical attributes of the target, and a `requires` Pytest mark should be added which takes instances of this class. Two `Requirements` should be comparable to -determine if one set meets (or exceeds) the other set. +determine if one set meets (or exceeds) the other set. Existing code that does +this can be reused here. #### How do we share common tasks? @@ -257,7 +320,7 @@ and thus is in abstract method. Other tools and shared logic should be implemented as necessary. A major area of concern is the automatic and package-manager agnostic installation of necessary -tools, much of which has been implemented previously and can be integrated. +tools, much of which has been implemented previously and can be reused. ### How are targets requested and managed? @@ -323,6 +386,44 @@ allows us to run a collection of tests against multiple targets with ease. These targets are defined in a YAML file and validated against the parameters collected from the previously described platform subclasses. +The entire implementation looks like so: + +```python +TARGETS: List[Dict[str, Any]] = [] +TARGET_IDS: List[str] = [] + +def pytest_configure(config: Config) -> None: + book = get_playbook(config.getoption("--playbook")) + for t in book.get("targets", []): + TARGETS.append(t) + TARGET_IDS.append(t["name"]) + +def pytest_generate_tests(metafunc: Metafunc) -> None: + if "target" in metafunc.fixturenames: + assert TARGETS, "No targets specified!" + metafunc.parametrize("target", TARGETS, True, TARGET_IDS) +``` + +The function `get_playbook()` only imports the [PyYAML][] library, opens the +playbook file `f` within a context manager, and returns +`playbook.schema.validate(yaml.load(f))`. This is leveraging Pytest’s existing +parameterization technology to achieve one of our “test entrance” goals of +requesting environments with a YAML playbook, and one of our “test parameter +validation” goals of validating platforms before executing tests so that we can +fail fast if a target has insufficient information to be setup. Parsing the same +parameters from a CLI can also be implemented. + +Finally, once the `target` fixture has returned a working and sanity-checked +environment to the requesting test, the test is capable of examining any and all +attributes of the `Target` and quickly marking itself as skipped, expected to +fail, or failed before executing the body of the test. Our static type checking +enables developers to ensure that the platform they requested supports all +methods and fields they use by annotating the test’s `target` parameter with the +expected platform type (or types). Ensuring the effectiveness of this type +checking will require us to carefully update our platform implementations, and +not rely on arbitrary objects of data. (For example, add an `internal_address` +field to `Azure`, don’t just look up `data["internal_address"]`.) + ### How are tests executed in parallel? While our original list of goals stated that we want to run tests “in parallel” @@ -344,6 +445,11 @@ particular executors and using particular targets. While there are many paths open to us, this plugin actually provides a hook, `pytest_xdist_make_scheduler` that exists specifically to “implement custom tests distribution logic.” +Figuring out the requirements of our test scheduler and designing the best +algorithm will require further discussion and design review. For the purposes of +moving forward, we are not blocked, as the eventual implementation can be +dropped in-place with minimal effort. + ## pytest-lisa ### What are the user modes? @@ -352,7 +458,10 @@ Because Pytest is incredibly customizable, we want to provide a few sets of reasonable default configurations for some common scenarios. We will add a flag like `--lisa-mode=[dev,debug,ci,demo]` to change the default options and output of Pytest. Doing so is readily supported by Pytest via the [pytest_addoption][] -and [pytest_configure][] hooks. We call these the provided “user modes.” +and [pytest_configure][] hooks. We call these the provided “user modes.” Note +that by “output” we mean not just logging (because that implies the Python +`logger` module, which Pytest allows full control over) but also commands’ +stdout and stderr as well as Pytest-provided information. * The dev(eloper) mode is intended for use by test developers while writing a new test. It is verbose, caches the deployed VMs between runs, and generates a @@ -400,11 +509,15 @@ def test_lis_driver_version(target: Azure) -> None: This is a functional example, which takes zero implementation. With this simple decorator, all test [collection hooks][] can introspect the metadata, enforce required parameters and set defaults, select tests based on arbitrary criteria, -and list test coverage statistics. +and list test coverage statistics (test inventory). Designing and implementing +the test inventory algorithm is still under development, but it’s tractable. Note that Pytest leverages Python’s docstrings for built-in documentation (and can even run tests discovered in such strings, like doctest). Hence we do not -have a separate field for the test’s documentation. +have a separate field for the test’s documentation. As long as we continue to +follow the practice of using docstrings for our modules, classes, and functions, +we can automatically use [pydoc](https://docs.python.org/3/library/pydoc.html) +to generate full documentation for each plugin and test. Being just Python code, this decorator need not be `@pytest.mark.lisa(...)` but can trivially be provided as simply `@LISA(...)`. In fact, we provide this in @@ -441,7 +554,8 @@ def validate(mark: Mark) -> None: In the future we could change `LISA` to be a function with these keyword arguments so that IDE auto-completion is enabled. However, this is not mandatory -to move forward, and parameter validation is enabled succinctly with the above. +to move forward, and parameter validation is enabled succinctly with the above, +which satisfies one of our “test parameter validation” requirements. This mark also does need to be repeated for each test, as marks can be scoped to a module, and so one line could describe defaults for every test in a file, with @@ -452,7 +566,10 @@ that is used to prove the concept deploying (or reusing) a target based on the test’s required and the target’s available sets of features. However, as we move forward we should define a separate `requires` mark that takes well-defined classes describing the minimal required resources for a test. This will be part -of the refactor into the two Pytest plugins mentioned above. +of the refactor into the two Pytest plugins mentioned above. Coupled with the +test’s requested `target` fixture being parameterized (see discussion in +`pytest-target`) this demonstrates at least one way we can satisfy our “test run +planner/scheduler” requirement. Furthermore, we have a prototype [generator](https://github.com/LIS/LISAv2/tree/pytest/generator) which parses @@ -542,6 +659,10 @@ def pytest_collection_modifyitems( items[:] = [i for i in included if i not in excluded] ``` +Together, the CLI support and YAML playbook satisfy one of our “test entrance” +requirements. We can also generate our own binary called `lisa` which simply +delegates to Pytest, if we really want to do so. + Because this is simply a Python list, we can also sort the tests according to our needs, such as by priority. If the `python-targets` plugin has already sorted by requirements, that’s just fine, Python’s `sorted()` built-in is @@ -564,13 +685,34 @@ community plugin [pytest-azurepipelines](https://pypi.org/project/pytest-azurepipelines/) which enhances the standard JUnit report for ADO. +One of our requirements is to support the lookup of previous tests’ execution +metrics, such as recorded performance metrics and duration, so that performance +tests can check regressions. This is the perfect example of carrying a small +fixture which provides access to our internal database and is dynamically added +to our tests when run internally, and the tests can lookup and record whatever +they need through the fixture. + However, we also have internal requirements to report test results throughout -the test life cycle to a database to be consumed by other tools. In this sense, -LISAv3 (the composition of our published plugins, tests, and fixtures) is simply -a producer. Our repository’s `conftest.py` can implement the necessary logic -using Pytest’s ample [test running hooks][]. In particular, the hook -[pytest_runtest_makereport][] is called for each of the setup, call and teardown -phases of a test. As such it can used for precisely this purpose. +the test life cycle to a database (the “result manager” and “progress tracker”) +to be consumed by other tools. In this sense, LISAv3 (the composition of our +published plugins, tests, and fixtures) is simply a producer, and the consumers +can parse the test results, send emails, archive the collected logs, update a +GUI display of test progress, etc. Our repository’s `conftest.py` can implement +the necessary logic using Pytest’s ample [test running hooks][]. In particular, +the hook [pytest_runtest_makereport][] is called for each of the setup, call and +teardown phases of a test. As such it can used for precisely this purpose. + +### How is setup, run, and cleanup handled? + +Pytest strives to require minimal boiler-plate code. Thus the classic +“xunit-style” of defining a class with setup and teardown functions in addition +to test functions is not recommended (nor necessary). Generally Pytest expects +[fixtures][] to be used for dependency injection (which is what setup/teardown +functions usually do). For users that really want the classic style, it is +nonetheless fully +[supported](https://docs.pytest.org/en/stable/xunit_setup.html) and documented +(and can be applied at the module, class, and method scopes). Thus our “test +runner” requirement is satisfied. ### How are tests timed out? @@ -578,7 +720,9 @@ The [pytest-timeout](https://pypi.org/project/pytest-timeout/) plugin provides integrated timeouts via `@pytest.mark.timeout()`, a configuration file option, environment variable, and CLI flag. The Fabric library provides timeouts in both the configuration and per-command usage. These are already used -to satisfaction in the prototype. +to satisfaction in the prototype. Additionally, Pytest has built-in support for +measuring the duration of each fixture’s setup and teardown and each test (it’s +simply the `--durations` and `--durations-min` flags). ### How are tests organized? @@ -645,10 +789,10 @@ def test_something_flaky(...): ... ``` -> Note that there is an open -> [bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) in this -> plugin which can cause issues with fixtures using scopes other than “function” -> but it can be worked around. +Note that there is an open +[bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) in this +plugin which can cause issues with fixtures using scopes other than “function” +but it can be worked around. The [Tenacity](https://tenacity.readthedocs.io/en/latest/) library should be used to retry flaky functions that are not tests, such as downloading boot @@ -673,25 +817,62 @@ We can additionally list a test twice when modifying the items collection, as implemented in the criteria proof-of-concept. However, given the above abilities, this may not be desired. +## What does the “flow” of Pytest look-like? + +This is best described in Pythonic pseudo-code, where the context manager +encapsulates each scope and the for loop encapsulates processing: + +```python +pool_fixture: a session-scoped context manager +target_fixture: a function-scoped context manager +items: a collection of tests +targets: a collection of targets +criteria: a collection of test selection criteria + +def pytest_addoption(parser): + """Add CLI options etc.""" + parser.addoption("--playbook", type=Path) + +pytest_addoption(parser) # Pytest fills in parser. + +def pytest_configure(config): + """Setup the run's configuration.""" + targets = playbook.get_targets() + criteria = playbook.get_criteria() + +pytest_configure(config) # Pytest fills in config. + +# pytest_generate_tests(metafunc) does this: +for test_metafunc in metafuncs: + for target in targets: + # items is tests * targets in size + items.append(test_metafunc[target]) + +# pytest_collection_modifyitems(session, config, items) does this: +for test in items: + validate(test) + include_or_exclude(test, criteria) + +# finally, each executor/session does this: +session_items = items.split() # based on scheduler algorithm +with pool_fixture as pool: + # the fixture has setup a pool to track the deployed targets + for test_function in session_items: + with target_fixture as target: + # the fixture has found or deployed an appropriate target + test_function(target) +``` + ## What Else? There’s still a lot more to think about and design. A non-exhaustive list of future topics (some touched on above): * Tests inventory (generating statistics from metadata) -* ARM template support (with Azure CLI) -* Servicing Azure CLI (how stable is their API?) -* libvirt driver support (gives us Hyper-V and more) -* Duration reporting (built-in) -* Self-documentation (via Pydoc) -* Environment class design -* Feature requests (NICs in particular) -* Selection and targets YAML schema +* Environment / multiple targets class design +* Feature/requirement requests (NICs in particular) +* Custom test scheduler algorithm * Secret management -* External results reporting (database and emails) -* Embedded systems / bare metal support -* Managing Python `logging` records -* Managing shell command stdout/stderr ## What alternatives were tried? @@ -786,9 +967,11 @@ However, the data returned by Paramiko is in bytes, which in Python 3 are not equivalent to strings, hence the existing implementation which uses `BytesIO` and decodes the bytes to a string. -[pytest-xdist]: https://github.com/pytest-dev/pytest-xdist +[PyYAML]: https://pyyaml.org/wiki/PyYAMLDocumentation [collection hooks]: https://docs.pytest.org/en/latest/reference.html#collection-hooks +[fixtures]: https://docs.pytest.org/en/stable/fixture.html [parameterization]: https://docs.pytest.org/en/stable/parametrize.html +[pytest-xdist]: https://github.com/pytest-dev/pytest-xdist [pytest_addoption]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_addoption [pytest_collection_modifyitems]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems [pytest_configure]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_configure From e33288c08b46916bfc62f8383da058eee13107c5 Mon Sep 17 00:00:00 2001 From: Andrew Schwartzmeyer Date: Tue, 17 Nov 2020 17:04:20 -0800 Subject: [PATCH 84/84] Fix GitHub CI workflow The old-style was recently deprecated: https://github.blog/changelog/2020-10-01-github-actions-deprecating-set-env-and-add-path-commands/ --- .github/workflows/ci-workflow.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-workflow.yaml b/.github/workflows/ci-workflow.yaml index b37a79c046..8d9527d3e9 100644 --- a/.github/workflows/ci-workflow.yaml +++ b/.github/workflows/ci-workflow.yaml @@ -25,13 +25,13 @@ jobs: if: runner.os == 'Linux' run: | curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - --preview --version 1.1.0b4 - echo "::add-path::$HOME/.poetry/bin" + echo "$HOME/.poetry/bin" >> $GITHUB_PATH - name: Install Poetry for Windows if: runner.os == 'Windows' run: | (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - --preview --version 1.1.0b4 - echo "::add-path::$env:USERPROFILE\.poetry\bin" + echo "$env:USERPROFILE\.poetry\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - name: Install Python dependencies run: make setup