diff --git a/.github/.editorconfig b/.github/.editorconfig new file mode 100644 index 0000000000..15e6a1f149 --- /dev/null +++ b/.github/.editorconfig @@ -0,0 +1,2 @@ +# Ignore parent project’s config +root = true diff --git a/.github/workflows/ci-workflow.yaml b/.github/workflows/ci-workflow.yaml new file mode 100644 index 0000000000..8d9527d3e9 --- /dev/null +++ b/.github/workflows/ci-workflow.yaml @@ -0,0 +1,43 @@ +name: CI Workflow for LISAv3 via Pytest + +on: + pull_request: + branches: + - pytest/main + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04, windows-2019] + fail-fast: false + steps: + - name: Checkout repository to $GITHUB_WORKSPACE + uses: actions/checkout@v2 + + - name: Setup bootstrap Python + uses: actions/setup-python@v2 + with: + python-version: "3.8" + + - name: Install Poetry for Linux + if: runner.os == 'Linux' + run: | + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - --preview --version 1.1.0b4 + echo "$HOME/.poetry/bin" >> $GITHUB_PATH + + - name: Install Poetry for Windows + if: runner.os == 'Windows' + run: | + (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - --preview --version 1.1.0b4 + echo "$env:USERPROFILE\.poetry\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + - name: Install Python dependencies + run: make setup + + - name: Run self tests + run: make test + + - name: Run semantic analysis + run: make check diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..d75bd1fbe5 --- /dev/null +++ b/Makefile @@ -0,0 +1,33 @@ +all: setup test run + +# Install Python packages +setup: + cd pytest && poetry install --no-ansi --remove-untracked + +# Run Pytest +run: setup + cd pytest && poetry run pytest + +# Run local tests +test: setup + cd pytest && poetry run pytest --verbose --playbook=playbooks/test.yaml --setup-show selftests/ + +# Run semantic analysis +check: setup + cd pytest && poetry run pytest --check + +# Clear cache and show when each fixture would be setup and torn down. +clean: + cd pytest && poetry run pytest --cache-clear --setup-plan + +# Demonstrate test selection via YAML playbook. +yaml: + cd pytest && poetry run pytest --collect-only --playbook=playbooks/criteria.yaml + +# Run the smoke test demo. +smoke: + cd pytest && poetry run pytest --demo -n 4 --playbook=playbooks/smoke.yaml + +# Print current Python virtualenv +venv: + cd pytest && poetry env list --no-ansi --full-path diff --git a/pytest/.editorconfig b/pytest/.editorconfig new file mode 100644 index 0000000000..15e6a1f149 --- /dev/null +++ b/pytest/.editorconfig @@ -0,0 +1,2 @@ +# Ignore parent project’s config +root = true diff --git a/pytest/.flake8 b/pytest/.flake8 new file mode 100644 index 0000000000..f855799a35 --- /dev/null +++ b/pytest/.flake8 @@ -0,0 +1,5 @@ +[flake8] +max-line-length = 88 +select = B,BLK,C90,E,F,I,W +max-complexity = 15 +extend-ignore = E203 diff --git a/pytest/.gitignore b/pytest/.gitignore new file mode 100644 index 0000000000..e1711c78f3 --- /dev/null +++ b/pytest/.gitignore @@ -0,0 +1,4 @@ +# Pytest report files +/*.xml +/*.html +/assets diff --git a/pytest/CODE_OF_CONDUCT.md b/pytest/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..f9ba8cf65f --- /dev/null +++ b/pytest/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/pytest/CONTRIBUTING.md b/pytest/CONTRIBUTING.md new file mode 100644 index 0000000000..f74461187d --- /dev/null +++ b/pytest/CONTRIBUTING.md @@ -0,0 +1,269 @@ +# Contributing Guidelines + +This document describes the existing developer tooling we have in place (and what to +expect of it), as well as our design and development philosophy. + +## Naming Conventions + +Naming conventions are not automatically enforced, so please read the [naming +conventions](https://www.python.org/dev/peps/pep-0008/#naming-conventions) +section of PEP 8, which describes what each of the different styles means. A +short summary of the most important parts: + +* Modules (and hence files) should have short, all-lowercase names. +* Class (and exception) names should normally use the `CapWords` convention + (also known as `CamelCase`). +* Function and variable names should be lowercase, with words separated by + underscores as necessary to improve readability (also known as `snake_case`). +* To avoid collisions with the standard library, an underscore can be appended, + such as `id_`. +* Always use `self` for the first argument to instance methods. +* Always use `cls` for the first argument to class methods. +* Use one leading underscore only for non-public methods and instance variables, + such as `_data`. Do not activate name mangling with `__` unless necessary. +* If there is a pair of `get_x` and `set_x` methods, they should instead be a + proper property, which is easy to do with the built-in `@property` decorator. +* Constants should be `CAPITALIZED_SNAKE_CASE`. +* When importing a function, try to avoid renaming it with `import as` because + it introduces cognitive overhead to track yet another name. +* When deriving another module’s class (such as `unittest.TestCase`), reuse the + class name to avoid confusion, such as `LisaTestCase`, instead of introducing + a different connotation like `TestSuite`. + +When in doubt, adhere to existing conventions, or check the style guide. + +## Automated Tooling + +If you have ran pytest-lisa already, then you have installed and used the `poetry` +tool. [Poetry][] is a [PEP 518][] compliant and cross-platform build system +which handles our Python dependencies and environment. + +This project’s dependencies are found in the [`pyproject.toml`](pyproject.toml) +file. This is similar to but more powerful than the familiar `requirements.txt`. +With [PEP 518][] and [PEP 621][]. + +[Poetry]: https://python-poetry.org/docs/ +[PEP 518]: https://www.python.org/dev/peps/pep-0518/ +[PEP 621]: https://www.python.org/dev/peps/pep-0621/ + +### Metadata + +The first section, `tool.poetry`, defines the project’s metadata (name, version, +description, authors, and license) which will be embedded in the final built +package. + +The chosen version follows [Semantic Versioning][], with the [Python specific +pre-release versioning suffix][pre-release] ‘.dev1’. Since this is “pytest-lisa” it +seemed appropriate to set our version to ‘3.0.0.dev1’, that is, “the first +development release of pytest-lisa.” + +[Semantic Versioning]: https://semver.org/ +[pre-release]: https://packaging.python.org/guides/distributing-packages-using-setuptools/#choosing-a-versioning-scheme + +### Package Dependencies + +The next section, `tool.poetry.dependencies`, is where `poetry add +` records our required packages. + +Poetry automatically creates and manages [isolated +environments](https://python-poetry.org/docs/managing-environments/). + +From the documentation: + +> Poetry will first check if it’s currently running inside a virtual +> environment. If it is, it will use it directly without creating a new one. But +> if it’s not, it will use one that it has already created or create a brand new +> one for you. + +On Linux, your initial run of `poetry install` will cause Poetry to +automatically setup a new [virtualenv][] using [pyenv][]. If you are developing +on Windows, you will want to setup your own, perhaps using [Conda][]. + +[virtualenv]: https://docs.python-guide.org/dev/virtualenvs/ +[pyenv]: https://github.com/pyenv/pyenv +[Conda]: https://docs.conda.io/en/latest/ + +* python: We pinned Python to version 3.8 so everyone uses the same version. + +### Developer Dependencies + +Similar to the previous section, `tool.poetry.dev-dependencies` is where `poetry +add --dev ` records our _developer_ packages. These are not +necessary for LISAv3 to execute, but are used by developers to automatically +adhere to our coding standards. + +* [Black](https://github.com/psf/black), the opinionated code formatter which + settles all debates as to how our Python files should be formatted. It follows + [PEP 8](https://www.python.org/dev/peps/pep-0008/), the official Python style + guide, and where ambiguous makes the decision for us. + +* [Flake8](https://flake8.pycqa.org/en/latest/) (and integrations), the semantic + analyzer, used to coordinate most of the other tools. + +* [isort](https://timothycrosley.github.io/isort/), the `import` sorter, which + automatically splits imports into the expected, alphabetized sections. + +* [mypy](http://mypy-lang.org/), the static type checker, which coupled with + type annotations allows us to avoid the pitfalls of Python being a dynamically + typed language. + +* [python-language-server](https://github.com/palantir/python-language-server) + (and integrations), the de facto LSP server. While Microsoft is developing + their own LSP servers, they do not integrate with the existing ecosystem of + tools, and their latest tool, Pyright, simply does not support + `pyproject.toml`. Since pyls is used far more widely, and supports every + editor, we use it. + +* [rope](https://github.com/python-rope/rope), to provide completions and + renaming support to pyls. + +With these packages installed and a correctly setup editor (see the readme and +feel free to reach out to us), your code should automatically follow all the +standards which we could automate. + +The final sections, `tool.black`, `tool.isort`, `build-system`, and the +`.flake8` file (Flake8 does not yet support `pyproject.toml`) configure the +tools per their recommendations. + +## Type Annotations + +We are using [mypy][] to enforce static type checking of our Python code. This +may surprise you as Python is not a statically typed language. While dynamic +typing can be useful, for a complex tool such as LISA it is more likely to +introduce bugs that are found only at runtime (which the user experiences as a +crash). For more information on why we (and others) do this, see [Dropbox’s +journey to type checking 4 million lines of Python][dropbox]. [PEP 484][] and +[PEP 526][] (among others) introduced and defined [type hints][] for the Python +language. You can probably figuring out the syntax based on the surrounding +code, but you can also see this [Intro to Using Python Type Hints][intro] and +mypy’s [cheat sheet][]. + +[mypy]: http://mypy-lang.org/ +[dropbox]: https://dropbox.tech/application/our-journey-to-type-checking-4-million-lines-of-python +[PEP 484]: https://www.python.org/dev/peps/pep-0484/ +[PEP 526]: https://www.python.org/dev/peps/pep-0526/ +[type hints]: https://docs.python.org/3/library/typing.html +[intro]: https://kishstats.com/python/2019/01/07/python-type-hinting.html +[cheat sheet]: https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html + +## Runbook schema + +Some plugins like Platform need follow this section to extend runbook schema. Runbook is the configurations of LISA runs. Every LISA run need a runbook. + +The runbook uses [dataclass](https://docs.python.org/3/library/dataclasses.html) to define, [dataclass-json](https://github.com/lidatong/dataclasses-json/) to deserialize, and [marshmallow](https://marshmallow.readthedocs.io/en/3.0/api_reference.html) to validate the schema. + +See more examples in [schema.py](lisa/schema.py), if you need to extend runbook schema. + +## Committing Guidelines + +A best practice when using [Git](https://git-scm.com/book/en/v2) is to create a +series of independent and well-documented commits. Each commit should “do one +thing” and do it correctly. If a mistake is made (you need to fix a bug or +adjust formatting), you should amend it (or use an [interactive +rebase](https://thoughtbot.com/blog/git-interactive-rebase-squash-amend-rewriting-history) +to edit it). If you’re using Emacs, the [Magit](https://magit.vc/) package makes +all of this easy. Some of the reasons for making each commit polished is that it +aids immensely in future debugging. It lets us use tools like [`git +bisect`](https://git-scm.com/docs/git-bisect) to automatically find bugs, and +understand why prior code was written. Although some of it has gone out of date, +see this otherwise great essay on [Git best +practices](http://sethrobertson.github.io/GitBestPractices/). For how Git works, +read [Git from the Bottom +Up](https://jwiegley.github.io/git-from-the-bottom-up/). + +For writing your commit messages, see this modification of [Tim Pope’s +example](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html): + +> Capitalized, short (72 chars or less) summary +> +> More detailed explanatory text, if necessary. Wrap it to about 72 +> characters or so. In some contexts, the first line is treated as the +> subject of an email and the rest of the text as the body. The blank line +> separating the summary from the body is critical (unless you omit the +> body entirely); tools like rebase can get confused if you run the two +> together. +> +> Write your commit message in the imperative: “Fix bug” and not “Fixed +> bug” or “Fixes bug.” This convention matches up with commit messages +> generated by commands like git merge and git revert. +> +> Further paragraphs come after blank lines. +> +> * Bullet points are okay, too +> +> * Typically a hyphen or asterisk is used for the bullet, followed by a +> single space, with blank lines in between, but conventions vary here +> +> * Use a hanging indent + +You should also feel free to use Markdown in the commit messages, as our project +is hosted on GitHub which renders it (and Markdown is human readable). + +## Design Patterns + +The most important goal we are attempting to accomplish with LISAv3 is for it to +be “simple, clean, and with a low maintenance cost.” + +We should use caution when using Object Oriented Design, because when it is used +without critical analysis, it creates unmaintainable code. A great talk on this +subject is [Stop Writing Classes](https://www.youtube.com/watch?v=o9pEzgHorH0), +by Jack Diederich. As he says, “classes are great but they are also overused.” + +This [Python Design Patterns](https://python-patterns.guide/) is a fantastic +collection of material for writing maintainable Python code. It specifically +details many of the common “Object Oriented” patterns from the Gang of Four book +(which, in fact, were patterns geared toward languages like C++, and no longer +apply to modern languages like Python), what lessons can be learned from them, +and how to apply them (or their modern alternatives) today. It also serves as an +easy-to-read guide to the Gang of Four book itself, as its principles still +serve us well today. + +Every time a developer chooses to use a design pattern, that person needs to +reason through and document why it was chosen, and what alternatives were +considered. We will recreate the problems with LISAv2 unless we take our time to +carefully create a well-designed and maintainable framework. + +Several popular patterns that actually _do not_ work well in Python are: + +* [The Abstract Factory Pattern](https://python-patterns.guide/gang-of-four/abstract-factory/) +* [The Factory Method Pattern](https://python-patterns.guide/gang-of-four/factory-method/) +* [The Prototype Pattern](https://python-patterns.guide/gang-of-four/prototype/) +* [The Singleton Pattern](https://python-patterns.guide/gang-of-four/singleton/) + +Conversely, patterns that are a natural fit to Python include: + +* [The Composite Pattern](https://python-patterns.guide/gang-of-four/composite/) +* [The Iterator Pattern](https://python-patterns.guide/gang-of-four/iterator/) + (caution: it is actually better to implement these with `yield`!) + +Finally, a high-level guide to all things Python is [The Hitchhiker’s Guide to +Python](https://docs.python-guide.org/). It covers just about everything in the +Python world. If you make it through even some of these guides, you will be well +on your way to being a “Pythonista” (a Python developer) writing “Pythonic” +(canonically correct Python) code left and right. + +### Async IO + +With Python 3.4, the Async IO pattern found in languages such as C# and Go is +available through the keywords `async` and `await`, along with the Python module +`asyncio`. Please read [Async IO in Python: A Complete +Walkthrough](https://realpython.com/async-io-python/) to understand at a high +level how asynchronous programming works. As of Python 3.7, One major “gotcha” +is that `asyncio.run(...)` should be used [exactly once in +`main`](https://docs.python.org/3/library/asyncio-task.html), it starts the +event loop. Everything else should be a coroutine or task which the event loop +schedules. + +## Future Sections + +Just a collection of reminders for the author to expand on later. + +* [unittest](https://docs.python.org/3/library/unittest.html) +* [doctest](https://docs.python.org/3/library/doctest.html) +* [subprocess](https://pymotw.com/3/subprocess/index.html) +* [GitHub Actions](https://github.com/LIS/LISAv2/actions) +* [ShellCheck](https://www.shellcheck.net/) +* [Governance](https://opensource.guide/leadership-and-governance/) +* [Maintenance Cost](https://web.archive.org/web/20120313070806/http://users.jyu.fi/~koskinen/smcosts.htm) +* Parallelism and multi-plexing +* Versioned inputs and outputs diff --git a/pytest/DESIGN.md b/pytest/DESIGN.md new file mode 100644 index 0000000000..31647e5dc2 --- /dev/null +++ b/pytest/DESIGN.md @@ -0,0 +1,981 @@ +# LISAv3 Technical Specification Document + +This document outlines the technical specifications for LISAv3. We are +evaluating the feasibility of leveraging +[Pytest](https://docs.pytest.org/en/stable/) as our test runner. + +Please see [PR #1065](https://github.com/LIS/LISAv2/pull/1065) for a working, +proof-of-concept prototype. + +Authored by Andrew Schwartzmeyer (he/him), version 0.3.0. + +## Why Pytest? + +Pytest is an [incredibly popular](https://docs.pytest.org/en/stable/talks.html) +MIT licensed open source Python testing framework. It has a thriving community +and plugin framework, with over 750 +[plugins](https://plugincompat.herokuapp.com/). Instead of writing (and +therefore maintaining) yet another test framework, we would do more with less by +reusing Pytest and existing plugins. This will allow us to focus on our unique +problems: organizing and understanding our tests, deploying necessary resources +(such as Azure, Hyper-V, or bare metal machines, collectively known as +“targets”), and analyzing our results. + +In fact, most of Pytest itself is implemented via [built-in +plugins](https://docs.pytest.org/en/stable/plugins.html), providing us with many +useful and well-documented examples. Furthermore, when others were confronted +with a problem similar to our own they also chose to use Pytest. +[Labgrid](https://github.com/labgrid-project/labgrid) is an open source embedded +board control library that delegated the testing framework logic to Pytest in +their [design](https://labgrid.readthedocs.io/en/latest/design_decisions.html), +and [U-Boot](https://github.com/u-boot/u-boot), an embedded board boot loader, +similarly leveraged Pytest in their +[tests](https://github.com/u-boot/u-boot/tree/master/test/py). KernelCI and +Avocado were also evaluated by the Labgrid developers at an [Embedded Linux +Conference](https://youtu.be/S0EJJM5bVUY) and both ruled out for reasons similar +to our own before they settled on Pytest. + +The [fundamental features](https://youtu.be/CMuSn9cofbI) of Pytest match our +needs very well: + +* Automatic test discovery, no boiler-plate test code +* Useful information when a test fails (assertions are introspected) +* Test and fixture [parameterization][] +* Modular setup/teardown via [fixtures][] +* Incredibly customizable (as detailed above) + +So all the logic for describing, discovering, running, skipping and reporting +results of the tests, as well as enabling and importing users’ plugins is +already written and maintained by the open source community. This leaves us to +focus on our hard and specific problems: creating an abstraction to launch the +necessary targets, organizing and publishing our tests, and reporting test +results upstream. Using Pytest would also allow us the space to abstract other +commonalities in our specific tests. In this way, LISAv3 could solve the +difficulties we have at hand without creating yet another test framework. + +Finally, by leveraging such a popular framework and reducing the amount of code +we need to maintain, we drastically increase our chances of receiving pull +requests instead of bug reports from users. This is important because despite +our best efforts it is practically guaranteed that as adoption of LISAv3 +increases, users will want changes to be made, and we need to empower them to do +so themselves. + +## What are we maintaining? + +The current proof-of-concept implementation uses the top-level `conftest.py` +file to define our “plugin” functionality. This works, but it is not ideal. I +believe that we will want to publish two open source Pytest plugins as packages +on [PyPI](https://pypi.org/), the Python Package Index: `pytest-target` and +`pytest-lisa`. We will also maintain our set of public “LISA” tests, but these +should simply install and use our plugins. + +The `pytest-target` plugin should encapsulate all our logic for _how_ and _when_ +to deploy targets (local or cloud virtual machines, or bare metal machines, and +all the associated resources), run tests on the specified targets, and delete +the targets. This includes specifying which features and resources each test +needs and each given target provides (such as number of cores, amount of RAM, +and other hardware like a GPU etc.), how to deploy and delete each target based +on its platform, and parameterization of the `target` fixture based on CLI or +YAML file input. In fact, some tests (like networking) will require multiple +targets at once. This plugin will need to manage resources intelligently, being +able to optimize for both time and cost, and make it easy for tests to request +and use various resources. + +The `pytest-lisa` plugin should encapsulate all our logic for how to organize +and select tests, as well as our opinions on displaying test results. This +includes the user modes, test metadata and inventory, test selection based on +criteria against that metadata, required and pre-configured upstream plugins, +and result notifiers. It will similarly support both CLI and YAML file input. + +We should strive to keep these plugins from depending on each other in order to +keep their scope well-defined. In the “LISA” repository of tests we will depend +on the two plugins and maintain additional [fixtures][] for our tests’ unique +requirements. Similarly, we and others may have private test repositories which +build upon the above by defining new platform support and internal service +integrations. The built-in plugin discovery of Pytest (via `conftest.py` files) +enables us to satisfy one of our requirements to “support plugins to orchestrate +the test environment.” + +Finally, a third smaller utility plugin, `pytest-schema` may be written in order +to share the common functionality of registering component schemata (e.g. +platform and target parameters from `pytest-target` and selection criteria from +`pytest-lisa`). This is somewhat of an implementation detail, but would be a +third and lower-level library we can publish. + +## pytest-target + +### How are targets provided and accessed? + +First we need to define “target” as an instance of a system-under-test. That is, +given some environment requirements, such an Azure image (URN) and size (SKU), a +target would be a virtual machine deployed by `pytest-target` with SSH access +provided to the requesting test. A target could optionally be pre-deployed and +simply connected. Some tests may request multiple targets as well. + +Pytest uses [fixtures][], which are the primary way of setting up test +requirements. They replace less flexible alternatives like setup/teardown +functions. It is through fixtures that we implement remote target +setup/teardown. Our `target` fixture returns a `Target` instance, which +currently provides: + +* Remote shell access via SSH +* Data including hostname / IP address +* Cross-platform ping functionality with exponential back-off +* Uploading of local files to arbitrary remote destinations +* Downloading of remote file contents into local string variable +* Asynchronous remote command execution with promises + +The `Azure(Target)` subclass additionally provides: + +* Automatic provisioning of an Azure VM given URN and SKU +* Allowing ICMP ping via Azure firewall rules +* Azure platform forced reboot by API +* Downloading boot diagnostics (serial console log) from platform + +The prototype demonstrates how easy it is to quickly implement these features. +As we need more features, they can be readily added and shared among tests. + +The `Target` class leverages [Fabric](https://www.fabfile.org/) which is a +popular high-level Python library for executing shell commands on remote systems +over SSH. Underneath the covers Fabric uses +[paramiko](https://docs.paramiko.org/en/stable/), the most popular low-level +Python SSH library. Fabric does the heavy lifting of safely connecting and +disconnecting from the node, executing the shell command (synchronously or +asynchronously), reporting the exit status, gathering the stdout and stderr, +providing stdin (or interactive auto-responses, similar to `expect`), uploading +and downloading files, and much more. In fact, these APIs are all available and +implemented for the local machine by the underlying +[Inovke](https://www.pyinvoke.org/) library, which is essentially a Python +`subprocess` wrapper with “a powerful and clean feature set.” + +Other test specific requirements, such as installing software and daemons, +downloading files from remote storage, or checking the state of our Bash test +scripts, would similarly be implemented by methods on the `Target` class or via +additional fixtures and thus shared among tests. + +### How do we interact with Azure? + +For Azure, we currently use the [Azure CLI](https://aka.ms/azureclidocs) to +deploy a virtual machine. For Hyper-V (and other virtualization platforms), we +would like to use [libvirt](https://libvirt.org/python.html), and for embedded / +bare metal environments we are evaluating +[labgrid](https://github.com/labgrid-project/labgrid). + +If possible, we do not want to use the [Azure Python +APIs](https://aka.ms/azsdk/python/all) directly because they are more +complicated (and less documented) than the [Azure +CLI](https://aka.ms/azureclidocs). With Invoke (as discussed above), `az` +becomes incredibly easy to work with. The Azure CLI lead developer states that +they have [feature parity](https://stackoverflow.com/a/50005660/1028665) and +that the CLI is more straightforward to use. Considering our ease-of-maintenance +requirement, this seems the apt choice, especially since the Azure CLI supports +deploying resources with [ARM +templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/deploy-cli). +If it later becomes necessary to use the Python APIs directly, that is, of +course, still doable (and we can reuse existing code doing it). + +On the topic of “servicing” the Azure CLI, its developers state that “at command +level, packages only upgrading the PATCH version guarantee backward +compatibility.” The tool is also intended to be used in scripts, so servicing +would amount to documenting the tested version and having the Azure class check +that it’s compatible before using it (or warning and then trying its best). + +### What’s the `Target` class? + +In version 0.1 of this design document we detailed a planned refactor of what +was then called the `Node` class. This has since been executed with just a few +modifications (one being the rename to `Target`, as `Node` was found to be an +overloaded term in the context of data centers). This class and its subclasses +are decoupled from Pytest, and are used via fixtures. It looks like this: + +```python +from abc import ABC, abstractmethod +from schema import Schema +import fabric + +class Target(ABC): + parameters: Mapping[str, str] + features: Set[str] + name: str + host: str + conn: fabric.Connection # Provides run, sudo, get, put etc. + + def __init__(...): + ... + self.host = self.deploy() + self.conn = fabric.Connection(self.host) + + @classmethod + @property + @abstractmethod + def schema(cls) -> Schema: + """Must return the parameters schema for setup.""" + ... + + @abstractmethod + def deploy(self) -> str: + """Must deploy the target resources and return hostname.""" + ... + + @abstractmethod + def delete(self) -> None: + """Must delete the target resources.""" + ... + + @classmethod + def local(...) -> Result: + """Runs a local shell command.""" + ... +``` + +#### How are platforms implemented? + +Platform support is implemented by subclassing `Target` and defining the +`schema` property, `deploy` method, `delete` method, and any platform-specific +methods. Using the `__subclasses__` attribute of `Target` the available +platforms and their parameter schemata are automatically gathered from users’ +own `conftest.py` files and other plugins. This enables the `target` fixture to +dynamically instantiate a target from the gathered requirements and parameters. + +For example, the `Azure(Target)` class defines its required parameters using the +[schema][] library like this: + +```python +from schema import Optional, Schema +from target import Target + +class Azure(Target): + ... + schema: Schema = Schema( + { + # TODO: Maybe validate as URN or path etc. + "image": str, + Optional("sku", default="Standard_DS1_v2"): str, + Optional("location", default="eastus2"): str, + Optional("networking", default=""): str, + } + ) +``` + +In the YAML playbook, a set of Azure targets can then be defined like this: + +```yaml +targets: + - name: Debian + platform: Azure + parameters: + image: credativ:Debian:9:9.0.201706190 + location: westus2 + + - name: Ubuntu + platform: Azure + parameters: + image: UbuntuLTS + sku: Standard_DS3_v2 +``` + +These targets are then used to parameterize the `target` fixture in the +[pytest_generate_tests][] hook (see below for more details). + +This demonstrated how we can have platforms define their own schema and register +that schema automatically. A pending update to this is to have two schemata per +`Target` subclass: target-level and platform-level (the former is what’s +demonstrated above, the latter would be common settings, such as subscription). + +#### How are requirements examined? + +The `features` attribute is currently a set of strings and (combined with the +parameters dictionary) was used to demonstrate how we can test if an existing +target instance (representing a deployed machine) met a test’s requirements. It +should be updated with a `Requirements` class that represents all physical +attributes of the target, and a `requires` Pytest mark should be added which +takes instances of this class. Two `Requirements` should be comparable to +determine if one set meets (or exceeds) the other set. Existing code that does +this can be reused here. + +#### How do we share common tasks? + +Common tasks for targets like rebooting and pinging should be implemented on the +`Target` class, and platform-specific tasks on the respective subclass. + +Methods available from `Connection` include `run()` and `sudo()` which are used +to easily run arbitrary commands, and `get()` and `put()` to download and upload +arbitrary files. + +The `cat()` method wraps `get()` and returns the file as data in a string. This +makes test code like this possible: + +```python +assert target.conn.cat("state.txt") == "TestCompleted" +``` + +A `reboot()` method should be added that first tries to use `sudo("reboot", +timeout=5)` (with a short timeout to avoid a hung SSH session). It should retry +with an exponential back-off to see if the machine has rebooted by checking +either `uptime` or the existence of a file created before the reboot. This is to +avoid having to `sleep()` and just guess the amount of time it takes to reboot. + +A `restart()` method should “power cycle” the machine using the platform’s API, +and thus is in abstract method. + +Other tools and shared logic should be implemented as necessary. A major area of +concern is the automatic and package-manager agnostic installation of necessary +tools, much of which has been implemented previously and can be reused. + +### How are targets requested and managed? + +We implement a pair of Pytest fixtures to provide targets. The first is the +`pool` fixture, which looks like: + +```python +@pytest.fixture(scope="session") +def pool(request: SubRequest) -> Iterator[List[Target]]: + """This fixture tracks all deployed target resources.""" + targets: List[Target] = [] + yield targets + for t in targets: + t.delete() +``` + +The `pool` fixture is setup once at the beginning of the test session, at which +point the `targets` list is then provided as input to every instance of the +`target` fixture. While currently a list, to support optimal scheduling we will +likely want to use a priority queue, where the priority of a target represents +its cost (whether in terms of time or money), allowing us to provide either the +fastest or the cheapest target to each request. Targets not in use will be +deallocated, and all targets will be automatically deleted after the tests are +finished (unless the user requested otherwise, in which case they’ll be cached). + +Note that cross-session [caching](https://docs.pytest.org/en/stable/cache.html) +is provided by Pytest, and very easy to work with. An early prototype +implemented a `--keep-vms` flag successfully, and this will be implemented again +with the updated design. + +The second is the `target` fixture, which looks like: + +```python +@pytest.fixture +def target(pool: List[Target], request: SubRequest) -> Iterator[Target]: + """This fixture provides a connected target for each test.""" + platform: Type[Target] = playbook.PLATFORMS[request.param["platform"]] + parameters: Dict[str, Any] = request.param["parameters"] + marker = request.node.get_closest_marker("lisa") + features = set(marker.kwargs["features"]) + + # TODO: If `t` is not already in use, deallocate the previous target. + for t in pool: + if isinstance(t, platform) and t.parameters == parameters and t.features >= features: + yield t + break + else: + t = platform(parameters, features) + pool.append(t) + yield t + t.connection.close() +``` + +This is obviously still an early implementation, but it is viable. By using the +[pytest_collection_modifyitems][] hook to sort (and so group) the tests by their +requirements, the tests would efficiently reuse targets. This fixture is +indirectly parameterized during setup with the [pytest_generate_tests][] hook. +Test and fixture [parameterization][] is a huge feature of Pytest. When we +parameterize the `target` fixture for multiple targets (e.g. “Ubuntu” and +“Debian”), Pytest automatically creates a set of tests for each target. So +`test_smoke` turns into `test_smoke[Ubuntu]` and `test_smoke[Debian]`. This +allows us to run a collection of tests against multiple targets with ease. These +targets are defined in a YAML file and validated against the parameters +collected from the previously described platform subclasses. + +The entire implementation looks like so: + +```python +TARGETS: List[Dict[str, Any]] = [] +TARGET_IDS: List[str] = [] + +def pytest_configure(config: Config) -> None: + book = get_playbook(config.getoption("--playbook")) + for t in book.get("targets", []): + TARGETS.append(t) + TARGET_IDS.append(t["name"]) + +def pytest_generate_tests(metafunc: Metafunc) -> None: + if "target" in metafunc.fixturenames: + assert TARGETS, "No targets specified!" + metafunc.parametrize("target", TARGETS, True, TARGET_IDS) +``` + +The function `get_playbook()` only imports the [PyYAML][] library, opens the +playbook file `f` within a context manager, and returns +`playbook.schema.validate(yaml.load(f))`. This is leveraging Pytest’s existing +parameterization technology to achieve one of our “test entrance” goals of +requesting environments with a YAML playbook, and one of our “test parameter +validation” goals of validating platforms before executing tests so that we can +fail fast if a target has insufficient information to be setup. Parsing the same +parameters from a CLI can also be implemented. + +Finally, once the `target` fixture has returned a working and sanity-checked +environment to the requesting test, the test is capable of examining any and all +attributes of the `Target` and quickly marking itself as skipped, expected to +fail, or failed before executing the body of the test. Our static type checking +enables developers to ensure that the platform they requested supports all +methods and fields they use by annotating the test’s `target` parameter with the +expected platform type (or types). Ensuring the effectiveness of this type +checking will require us to carefully update our platform implementations, and +not rely on arbitrary objects of data. (For example, add an `internal_address` +field to `Azure`, don’t just look up `data["internal_address"]`.) + +### How are tests executed in parallel? + +While our original list of goals stated that we want to run tests “in parallel” +we were not specific about what was meant, and the topic of parallelism and +concurrency is understandably complex. We certainly don’t mean running two tests +at once on the same target, as this would undoubtedly lead to flaky tests. + +Assuming that we care about a set of tests passing on a particular image and +size combination, but not necessarily on a particular deployed instance, then we +can run tests concurrently by deploying multiple “identical” targets and +splitting the tests across them. The tests would still run in isolation on each +target. This sounds hard, but actually it’s practically free with Pytest via +[pytest-xdist][]. + +The default `pytest-xdist` implementation simply takes the list of tests and +runs them in a round-robin fashion with the desired number of executors. We’ve +talked at length about being able to schedule groups of tests to run in +particular executors and using particular targets. While there are many paths +open to us, this plugin actually provides a hook, `pytest_xdist_make_scheduler` +that exists specifically to “implement custom tests distribution logic.” + +Figuring out the requirements of our test scheduler and designing the best +algorithm will require further discussion and design review. For the purposes of +moving forward, we are not blocked, as the eventual implementation can be +dropped in-place with minimal effort. + +## pytest-lisa + +### What are the user modes? + +Because Pytest is incredibly customizable, we want to provide a few sets of +reasonable default configurations for some common scenarios. We will add a flag +like `--lisa-mode=[dev,debug,ci,demo]` to change the default options and output +of Pytest. Doing so is readily supported by Pytest via the [pytest_addoption][] +and [pytest_configure][] hooks. We call these the provided “user modes.” Note +that by “output” we mean not just logging (because that implies the Python +`logger` module, which Pytest allows full control over) but also commands’ +stdout and stderr as well as Pytest-provided information. + +* The dev(eloper) mode is intended for use by test developers while writing a + new test. It is verbose, caches the deployed VMs between runs, and generates a + digestible [HTML](https://pypi.org/project/pytest-html/) report. + +* The debug mode is like dev mode but with all possible information shown, and + will open the Python debugger automatically on failures (which is provided by + Pytest with the `--pdb` flag). + +* The CI mode will be fairly quiet on the console, showing all test results, but + putting the full info output into the generated report file (HTML for sharing + with humans and + [JUnit](https://docs.pytest.org/en/stable/_modules/_pytest/junitxml.html) for + the associated CI environment, which presents as native test results). + +* The demo mode will show the “executive summary” (a lot like CI, but finely + tuned for demos). For example, what `make smoke` currently shows. + +### How are tests described? + +The built-in [pytest-mark](https://docs.pytest.org/en/stable/mark.html) plugin +already provides functionality for adding metadata to tests, where we +specifically want: + +* Platform: used to skip tests inapplicable to the current system-under-test +* Category: our high-level test organization +* Area: feature being tested +* Priority: self-explanatory +* Tags: optional additional metadata for test organization + +We simply reuse this with minimal logic to enforce our required metadata, with +sane defaults (perhaps setting the area to the name of the module), and to list +statistics about our test coverage. This is already included in the prototype. +It looks like this: + +```python +import pytest + +@pytest.mark.lisa(platform="Azure", category="Functional", priority=0, area="LIS_DEPLOY") +def test_lis_driver_version(target: Azure) -> None: + """Checks that the installed drivers have the correct version.""" + ... +``` + +This is a functional example, which takes zero implementation. With this simple +decorator, all test [collection hooks][] can introspect the metadata, enforce +required parameters and set defaults, select tests based on arbitrary criteria, +and list test coverage statistics (test inventory). Designing and implementing +the test inventory algorithm is still under development, but it’s tractable. + +Note that Pytest leverages Python’s docstrings for built-in documentation (and +can even run tests discovered in such strings, like doctest). Hence we do not +have a separate field for the test’s documentation. As long as we continue to +follow the practice of using docstrings for our modules, classes, and functions, +we can automatically use [pydoc](https://docs.python.org/3/library/pydoc.html) +to generate full documentation for each plugin and test. + +Being just Python code, this decorator need not be `@pytest.mark.lisa(...)` but +can trivially be provided as simply `@LISA(...)`. In fact, we provide this in +`lisa.py` with: + +```python +LISA = pytest.mark.lisa + +@LISA(...) +def test_something(...) +``` + +Currently we validate the parameters given to this mark during test collection, +by using the following code, which leverages the [schema][] library: + +```python +from schema import Optional, Or, Schema + +lisa_schema = Schema( + { + "platform": str, + "category": Or("Functional", "Performance", "Stress", "Community", "Longhaul"), + "area": str, + "priority": Or(0, 1, 2, 3), + Optional("tags", default=list): [str], + }, +) + +def validate(mark: Mark) -> None: + """Validate each test's LISA parameters.""" + assert not mark.args, "LISA marker cannot have positional arguments!" + mark.kwargs.update(lisa_schema.validate(mark.kwargs)) +``` + +In the future we could change `LISA` to be a function with these keyword +arguments so that IDE auto-completion is enabled. However, this is not mandatory +to move forward, and parameter validation is enabled succinctly with the above, +which satisfies one of our “test parameter validation” requirements. + +This mark also does need to be repeated for each test, as marks can be scoped to +a module, and so one line could describe defaults for every test in a file, with +individual tests overriding parameters as needed. + +In the current implementation, we also take a `features: List[str]` argument +that is used to prove the concept deploying (or reusing) a target based on the +test’s required and the target’s available sets of features. However, as we move +forward we should define a separate `requires` mark that takes well-defined +classes describing the minimal required resources for a test. This will be part +of the refactor into the two Pytest plugins mentioned above. Coupled with the +test’s requested `target` fixture being parameterized (see discussion in +`pytest-target`) this demonstrates at least one way we can satisfy our “test run +planner/scheduler” requirement. + +Furthermore, we have a prototype +[generator](https://github.com/LIS/LISAv2/tree/pytest/generator) which parses +LISAv2 XML test descriptions and generates stubs with this mark filled in +correctly. + +### How are tests selected? + +Pytest already allows a user to specify which exact tests to run: + +* Listing folders on the CLI (see below on where tests should live) +* Specifying a name expression on the CLI (e.g. `-k smoke and xdp`) +* Specifying a mark expression on the CLI (e.g. `-m functional and not slow`) + +We can also implement any other mechanism via the +[pytest_collection_modifyitems][] hook. The proof-of-concept supports gathering +selection criteria from a YAML file: + +```yaml +criteria: + # Select all Priority 0 tests. + - priority: 0 + # Run tests with 'smoke' in the name twice. + - name: smoke + times: 2 + # Exclude all tests in Area "xdp" + - area: xdp + exclude: true +``` + +This criteria is validated against the following [schema][]: + +```python +from schema import Schema, Optional + +criteria_schema = Schema( + { + # TODO: Validate that these strings are valid regular + # expressions if we change our matching logic. + Optional("name", default=None): str, + Optional("area", default=None): str, + Optional("category", default=None): str, + Optional("priority", default=None): int, + Optional("tags", default=list): [str], + Optional("times", default=1): int, + Optional("exclude", default=False): bool, + } +) +``` + +The test collection is then modified using the Pytest hook, +[pytest_collection_modifyitems][]: + +```python +def pytest_collection_modifyitems( + session: Session, config: Config, items: List[Item] +) -> None: + included: List[Item] = [] + excluded: List[Item] = [] + + def select(item: Item, times: int, exclude: bool) -> None: + if exclude: + excluded.append(item) + else: + for _ in range(times - included.count(item)): + included.append(item) + + for c in criteria: # Where `criteria` is from the schema. + for item in items: + marker = item.get_closest_marker("lisa") + if not marker: + # Not all tests will have the LISA marker, such as + # static analysis tests. + continue + i = marker.kwargs + if any( + [ + c["name"] and c["name"] in item.name, + c["area"] and c["area"].casefold() == i["area"].casefold(), + c["category"] + and c["category"].casefold() == i["category"].casefold(), + c["priority"] and c["priority"] == i["priority"], + c["tags"] and set(c["tags"]) <= set(i["tags"]), + ] + ): + select(item, c["times"], c["exclude"]) + items[:] = [i for i in included if i not in excluded] +``` + +Together, the CLI support and YAML playbook satisfy one of our “test entrance” +requirements. We can also generate our own binary called `lisa` which simply +delegates to Pytest, if we really want to do so. + +Because this is simply a Python list, we can also sort the tests according to +our needs, such as by priority. If the `python-targets` plugin has already +sorted by requirements, that’s just fine, Python’s `sorted()` built-in is +guaranteed to be stable (meaning we can sort in multiple passes). + +### How are results reported? + +Parsing the results of a large test suite can be difficult. Fortunately, because +Pytest is a testing framework, there already exists support for generating +excellent reports. For developers, the +[HTML](https://pypi.org/project/pytest-html/) report is easy to read: it is +self-contained, holds all the results and logs, and each test can be expanded +and collapsed. Tests which were rerun are recorded separately. For CI pipelines, +Pytest has integrated +[JUnit](https://docs.pytest.org/en/stable/_modules/_pytest/junitxml.html) XML +test report support. This is the standard method of reporting results to CI +servers like Jenkins and are natively parsed into the CI system’s built-in test +display page. Finally, Azure DevOps pipelines are even supported with a +community plugin +[pytest-azurepipelines](https://pypi.org/project/pytest-azurepipelines/) which +enhances the standard JUnit report for ADO. + +One of our requirements is to support the lookup of previous tests’ execution +metrics, such as recorded performance metrics and duration, so that performance +tests can check regressions. This is the perfect example of carrying a small +fixture which provides access to our internal database and is dynamically added +to our tests when run internally, and the tests can lookup and record whatever +they need through the fixture. + +However, we also have internal requirements to report test results throughout +the test life cycle to a database (the “result manager” and “progress tracker”) +to be consumed by other tools. In this sense, LISAv3 (the composition of our +published plugins, tests, and fixtures) is simply a producer, and the consumers +can parse the test results, send emails, archive the collected logs, update a +GUI display of test progress, etc. Our repository’s `conftest.py` can implement +the necessary logic using Pytest’s ample [test running hooks][]. In particular, +the hook [pytest_runtest_makereport][] is called for each of the setup, call and +teardown phases of a test. As such it can used for precisely this purpose. + +### How is setup, run, and cleanup handled? + +Pytest strives to require minimal boiler-plate code. Thus the classic +“xunit-style” of defining a class with setup and teardown functions in addition +to test functions is not recommended (nor necessary). Generally Pytest expects +[fixtures][] to be used for dependency injection (which is what setup/teardown +functions usually do). For users that really want the classic style, it is +nonetheless fully +[supported](https://docs.pytest.org/en/stable/xunit_setup.html) and documented +(and can be applied at the module, class, and method scopes). Thus our “test +runner” requirement is satisfied. + +### How are tests timed out? + +The [pytest-timeout](https://pypi.org/project/pytest-timeout/) plugin provides +integrated timeouts via `@pytest.mark.timeout()`, a configuration +file option, environment variable, and CLI flag. The Fabric library provides +timeouts in both the configuration and per-command usage. These are already used +to satisfaction in the prototype. Additionally, Pytest has built-in support for +measuring the duration of each fixture’s setup and teardown and each test (it’s +simply the `--durations` and `--durations-min` flags). + +### How are tests organized? + +That is, what does a folder of tests map to: a platform, feature, or owner? + +In my opinion it is likely to be both. Tests which are common to a platform and +written by our team are probably best placed in a folder like `tests/azure` +whereas tests for a particular scenario which limits their image and SKU +applicability should be in a folder like `tests/acc`. It’s going to depend on +how often the tests are run together. + +Because Pytest can run tests and `conftest.py` files from arbitrary folders, +maintaining sets of tests and plugins separately from the base LISA repository +is easy. Custom repositories with new tests, plugins, fixtures, +platform-specific support, etc. can simply be cloned anywhere, and provided on +the command-line to Pytest. + +Test authors should keep tests which share requirements and are otherwise +similar to a single module (Python file). Not only is this well-organized, but +because marks can be applied at the module level, setting all the tests to be +skipped or expected to fail (with the built-in `skip` and `xfail` Pytest marks) +becomes even easier. + +An open question is if we really want to bring every test from LISAv2 directly +over, or if we should carefully analyze our tests to craft a new set of +high-level scenarios. An interesting result of reorganizing and rewriting the +tests would be the ability to have test layers, where the result of a high-level +test dictates if the tests below it should be skipped. If it passes, it implies +the tests underneath it would pass, and so skips them; but if it fails, the next +test below it runs and so on until a passing layer is found. + +### How will we port LISAv2 tests? + +Given the above, we still must decide if we want to put the engineering effort +into porting _every_ LISAv2 test. However, the prototype started by porting the +`LIS-DRIVER-VERSION-CHECK` test, proving that tests which exclusively use Bash +scripts are trivially portable. Unfortunately, most tests use an associated +PowerShell script which is tightly coupled to the LISAv2 framework. + +We believe that it is _possible_ to port these tests without untoward +modifications. We would need to write a mock library that implements (or stubs +where appropriate) LISAv2 framework functionality such as +`Provision-VMsForLisa`, `Copy-RemoteFiles`, `Run-LinuxCmd`, etc., and provides +both the expected “global” objects and the test function parameters `AllVmData` +and `CurrentTestData`. + +This work needs to be done regardless of the approach we take with our framework +(leveraging Pytest or writing our own), and it is not inconsequential work. It +needs to be thoroughly planned and executed, and is certainly a ways off. + +### How are tests and functions retried? + +Testing remote targets is inherently flaky, so we take a two-pronged approach to +dealing with the flakiness. + +The [pytest-rerunfailures](https://pypi.org/project/pytest-rerunfailures/) +plugin will be used to easily mark a test itself as flaky. It has the nice +feature of recording each rerun in the produced report. It looks like this: + +```python +@pytest.mark.flaky(reruns=5) +def test_something_flaky(...): + """This fails most of the time.""" + ... +``` + +Note that there is an open +[bug](https://github.com/pytest-dev/pytest-rerunfailures/issues/51) in this +plugin which can cause issues with fixtures using scopes other than “function” +but it can be worked around. + +The [Tenacity](https://tenacity.readthedocs.io/en/latest/) library should be +used to retry flaky functions that are not tests, such as downloading boot +diagnostics or pinging a node. As the modern Python retry library it has +easy-to-use decorators to retry functions (and context managers to use within +functions), as well as excellent wait and timeout support. It looks like this: + +```python +from tenacity import retry, stop_after_attempt, wait_exponential + +class Node: + ... + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def ping(self, **kwargs): + """Ping the node from the local system in a cross-platform manner.""" + flag = "-c 1" if platform.system() == "Linux" else "-n 1" + return self.local(f"ping {flag} {self.host}", **kwargs) + ... +``` + +We can additionally list a test twice when modifying the items collection, as +implemented in the criteria proof-of-concept. However, given the above +abilities, this may not be desired. + +## What does the “flow” of Pytest look-like? + +This is best described in Pythonic pseudo-code, where the context manager +encapsulates each scope and the for loop encapsulates processing: + +```python +pool_fixture: a session-scoped context manager +target_fixture: a function-scoped context manager +items: a collection of tests +targets: a collection of targets +criteria: a collection of test selection criteria + +def pytest_addoption(parser): + """Add CLI options etc.""" + parser.addoption("--playbook", type=Path) + +pytest_addoption(parser) # Pytest fills in parser. + +def pytest_configure(config): + """Setup the run's configuration.""" + targets = playbook.get_targets() + criteria = playbook.get_criteria() + +pytest_configure(config) # Pytest fills in config. + +# pytest_generate_tests(metafunc) does this: +for test_metafunc in metafuncs: + for target in targets: + # items is tests * targets in size + items.append(test_metafunc[target]) + +# pytest_collection_modifyitems(session, config, items) does this: +for test in items: + validate(test) + include_or_exclude(test, criteria) + +# finally, each executor/session does this: +session_items = items.split() # based on scheduler algorithm +with pool_fixture as pool: + # the fixture has setup a pool to track the deployed targets + for test_function in session_items: + with target_fixture as target: + # the fixture has found or deployed an appropriate target + test_function(target) +``` + +## What Else? + +There’s still a lot more to think about and design. A non-exhaustive list of +future topics (some touched on above): + +* Tests inventory (generating statistics from metadata) +* Environment / multiple targets class design +* Feature/requirement requests (NICs in particular) +* Custom test scheduler algorithm +* Secret management + +## What alternatives were tried? + +These are notes from things tried that did not work out, and why. + +### Writing Another Framework + +I believe the above set of technical specifications clearly describes how we can +leverage Pytest for our needs. Furthermore, the existing prototype proves this +is a viable option. Therefore I do not think we should consider writing and +maintaining a _new_ Python testing framework. We should avoid falling for “not +invented here” syndrome. The alternative prototype which does implement a new +framework required over five thousand lines of code, the Pytest-based prototype +used less than two hundred, or less than three percent. We do not want to take +on the maintenance cost of yet another framework, the maintenance cost of LISAv2 +already caused this mess in the first place. I think the work of prototyping +said new framework was valuable, as it provided insight into the eventual +technical design of LISAv3. + +### Using Remote Capabilities of `pytest-xdist` + +With the [pytest-xdist][] plugin there already exists support for running a +folder of tests on an arbitrary remote host via SSH. + +The LISA tests could be written as Python code suitable for running on the +target test system, which means direct access to the system in the test code +itself (subprocesses are still available, without having to use SSH within the +test, but would become far less necessary), something that is not possible with +any current prototype. Where the `pytest-xdist` plugin copies the package of code +to the target node and runs it, the pytest-lisa plugin could instantiate that +node (boot the necessary image on a remote machine or launch a new Hyper-V or +Azure VM, etc.) for the tests. + +However, this use of pytest-dist requires full Python support on the target +machines, and drastically changes how developers write tests. Furthermore, it +would not support running local commands against the remote node (like ping) or +running the test across a reboot of the node. Thus we do not want to use this +functionality of `pytest-xdist`. That said, `pytest-xdist` will still be useful +for running tests concurrently, as described above. + +### Using Paramiko Instead of Fabric + +The Paramiko library is less complex (smaller library footprint) than Fabric, as +the latter wraps the former, but it is a bit more difficult to use, and doesn’t +support reading existing SSH config files, nor does it support “ProxyJump” which +we use heavily. Fabric instead provides a clean high-level interface for +existing shell commands, handling all the connection abstractions for us. + +Using Paramiko looked like this: + +```python +from pathlib import Path +from typing import List + +from paramiko import SSHClient + +import pytest + +@pytest.fixture +def node() -> SSHClient: + with SSHClient() as client: + client.load_system_host_keys() + client.connect(hostname="...") + yield client + + +def test_lis_version(node: SSHClient) -> None: + with node.open_sftp() as sftp: + for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: + sftp.put(LINUX_SCRIPTS / f, f) + _, stdout, stderr = node.exec_command("./LIS-VERSION-CHECK.sh") + sftp.get("state.txt", "state.txt") + with Path("state.txt").open as f: + assert f.readline() == "TestCompleted" +``` + +It is more verbose than necessary when compared to Fabric. + +### StringIO + +For `Node.cat()` it would seem we could use `StringIO` like so: + +```python +from io import StringIO + +with StringIO() as result: + node.get("state.txt", result) + assert result.getvalue().strip() == "TestCompleted" +``` + +However, the data returned by Paramiko is in bytes, which in Python 3 are not +equivalent to strings, hence the existing implementation which uses `BytesIO` +and decodes the bytes to a string. + +[PyYAML]: https://pyyaml.org/wiki/PyYAMLDocumentation +[collection hooks]: https://docs.pytest.org/en/latest/reference.html#collection-hooks +[fixtures]: https://docs.pytest.org/en/stable/fixture.html +[parameterization]: https://docs.pytest.org/en/stable/parametrize.html +[pytest-xdist]: https://github.com/pytest-dev/pytest-xdist +[pytest_addoption]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_addoption +[pytest_collection_modifyitems]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems +[pytest_configure]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_configure +[pytest_generate_tests]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_generate_tests +[pytest_runtest_makereport]: https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_runtest_makereport +[schema]: https://pypi.org/project/schema/ +[test running hooks]: https://docs.pytest.org/en/latest/reference.html#test-running-runtest-hooks diff --git a/pytest/README.md b/pytest/README.md new file mode 100644 index 0000000000..c564d256cf --- /dev/null +++ b/pytest/README.md @@ -0,0 +1,31 @@ +# LISAv3 via pytest-lisa + +Basic instructions for testing the prototype: + +```bash +# Install Poetry, make sure `poetry` is in your `PATH` +curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python + +# Install Azure CLI, make sure `az` is in your `PATH` +curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + +# Login and set subscription +az login +az account set -s + +# Clone LISAv2 with the Pytest prototype +git clone -b pytest/main https://github.com/LIS/LISAv2.git +cd LISAv2 + +# Install Python packages +make setup + +# Run some local demos +make test +make yaml + +# Run a demo which deployes Azure resources +make smoke +``` + +See the [design document](DESIGN.md) for details. diff --git a/pytest/azure.py b/pytest/azure.py new file mode 100644 index 0000000000..aadb2b5eda --- /dev/null +++ b/pytest/azure.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +import json +import logging +import typing + +from invoke.runners import Result # type: ignore +from schema import Optional, Schema # type: ignore +from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore + +from target import Target + +if typing.TYPE_CHECKING: + from typing import Any + + +class Azure(Target): + """Implements Azure-specific target methods.""" + + # Custom instance attribute(s). + internal_address: str + + # @property + # @classmethod + # def schema(cls) -> Schema: + # return + + schema: Schema = Schema( + { + # TODO: Maybe validate as URN or path etc. + "image": str, + Optional("sku", default="Standard_DS1_v2"): str, + Optional("location", default="eastus2"): str, + Optional("networking", default=""): str, + } + ) + + # A class attribute because it’s defined. + az_ok = False + + @classmethod + def check_az_cli(cls) -> None: + """Assert that the `az` CLI is installed and logged in.""" + if cls.az_ok: # Shortcut if we already checked. + return + # E.g. on Ubuntu: `curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash` + assert cls.local("az --version", warn=True), "Please install the `az` CLI!" + # TODO: Login with service principal (az login) and set + # default subscription (az account set -s) using secrets. + account: Result = cls.local("az account show") + assert account.ok, "Please `az login`!" + sub = json.loads(account.stdout) + assert sub["isDefault"], "Please `az account set -s `!" + logging.info( + f"Using account '{sub['user']['name']}' with subscription '{sub['name']}'" + ) + cls.az_ok = True + + def create_boot_storage(self, location: str) -> str: + """Create a separate resource group and storage account for boot diagnostics.""" + account = "pytestbootdiag" + # This command always exits with 0 but returns a string. + if self.local("az group exists -n pytest-lisa").stdout.strip() == "false": + self.local(f"az group create -n pytest-lisa --location {location}") + if not self.local( + f"az storage account show -g pytest-lisa -n {account}", warn=True + ): + self.local(f"az storage account create -g pytest-lisa -n {account}") + return account + + def allow_ping(self) -> None: + """Create NSG rules to enable ICMP ping. + + ICMP ping is disallowed by the Azure load balancer by default, but + there’s strong debate about if this is necessary, and our tests + like to check if the host is up using ping, so we create inbound + and outbound rules in the VM's network security group to allow it. + + """ + try: + for d in ["Inbound", "Outbound"]: + self.local( + f"az network nsg rule create " + f"--name allow{d}ICMP --resource-group {self.name}-rg " + f"--nsg-name {self.name}NSG --priority 100 " + f"--access Allow --direction '{d}' --protocol Icmp " + "--source-port-ranges '*' --destination-port-ranges '*'" + ) + except Exception as e: + logging.warning(f"Failed to create ICMP allow rules in NSG due to '{e}'") + + def deploy(self) -> str: + """Given deployment info, deploy a new VM.""" + image = self.parameters["image"] + sku = self.parameters["sku"] + location = self.parameters["location"] + networking = self.parameters["networking"] + + Azure.check_az_cli() + + logging.info( + f"""Deploying VM... + Resource Group: '{self.name}-rg' + Region: '{location}' + Image: '{image}' + SKU: '{sku}'""" + ) + + boot_storage = self.create_boot_storage(location) + + self.local(f"az group create -n {self.name}-rg --location {location}") + # TODO: Accept EULA terms when necessary. Like: + # + # local.run(f"az vm image terms accept --urn {vm_image}") + # + # However, this command fails unless the terms exist and have yet + # to be accepted. + + vm_command = [ + "az vm create", + f"-g {self.name}-rg", + f"-n {self.name}", + f"--image {image}", + f"--size {sku}", + f"--boot-diagnostics-storage {boot_storage}", + "--generate-ssh-keys", + ] + # TODO: Support setting up to NICs. + if networking == "SRIOV": + vm_command.append("--accelerated-networking true") + + self.data = json.loads(self.local(" ".join(vm_command)).stdout) + self.allow_ping() + # TODO: Enable auto-shutdown 4 hours from deployment. + self.internal_address = self.data["internal_address"] + hostname: str = self.data["publicIpAddress"] + return hostname + + def delete(self) -> None: + """Delete the entire allocated resource group. + + TODO: Delete VM itself. Only if it was the last VM then delete + the entire resource group. + + """ + logging.info(f"Deleting resource group '{self.name}-rg'") + self.local(f"az group delete -n {self.name}-rg --yes --no-wait") + + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def get_boot_diagnostics(self, **kwargs: Any) -> Result: + """Gets the serial console logs.""" + # NOTE: Some images can cause the `az` CLI to crash because + # their logs aren’t UTF-8 encoded. I’ve filed a bug: + # https://github.com/Azure/azure-cli/issues/15590 + return self.local( + f"az vm boot-diagnostics get-boot-log -n {self.name} -g {self.name}-rg", + **kwargs, + ) + + def platform_restart(self) -> Result: + """TODO: Should this '--force' and redeploy?""" + return self.local(f"az vm restart -n {self.name} -g {self.name}-rg") diff --git a/pytest/conftest.py b/pytest/conftest.py new file mode 100644 index 0000000000..2dfe991bd2 --- /dev/null +++ b/pytest/conftest.py @@ -0,0 +1,253 @@ +"""This file sets up custom plugins. + +https://docs.pytest.org/en/stable/writing_plugins.html + +""" +from __future__ import annotations + +import typing +from pathlib import Path + +from schema import SchemaMissingKeyError # type: ignore + +import azure # noqa +import lisa +import pytest +from target import Target + +if typing.TYPE_CHECKING: + from typing import Any, Dict, Iterator, List, Optional, Type + + from _pytest.config import Config + from _pytest.config.argparsing import Parser + from _pytest.fixtures import SubRequest + from _pytest.python import Metafunc + + from pytest import Item, Session + + +@pytest.fixture(scope="session") +def pool(request: SubRequest) -> Iterator[List[Target]]: + """This fixture tracks all deployed target resources.""" + targets: List[Target] = [] + yield targets + for t in targets: + print(f"Created target: {t.features} / {t.parameters}") + if not request.config.getoption("keep_vms"): + t.delete() + + +@pytest.fixture +def target(pool: List[Target], request: SubRequest) -> Iterator[Target]: + """This fixture provides a connected target for each test. + + It is parametrized indirectly in 'pytest_generate_tests'. + + In this fixture we can check if any existing target matches all + the requirements. If so, we can re-use that target, and if not, we + can deallocate the currently running target and allocate a new + one. When all tests are finished, the pool fixture above will + delete all created VMs. Coupled with performing discrete + optimization in the test collection phase and ordering the tests + such that the test(s) with the lowest common denominator + requirements are executed first, we have the two-layer scheduling + as asked. + + However, this feels like putting the cart before the horse to me. + It would be much simpler in terms of design, implementation, and + usage that features are specified upfront when the targets are + specified. Then all this goes away, and tests are skipped when the + feature is missing, which also leaves users in full control of + their environments. + + """ + import playbook + + platform: Type[Target] = playbook.PLATFORMS[request.param["platform"]] + parameters: Dict[str, Any] = request.param["parameters"] + marker = request.node.get_closest_marker("lisa") + features = set(marker.kwargs["features"]) + + # TODO: If `t` is not already in use, deallocate the previous + # target, and ensure the tests have been sorted (and so grouped) + # by their requirements. + for t in pool: + # TODO: Implement full feature comparison, etc. and not just + # proof-of-concept string set comparison. + if ( + isinstance(t, platform) + and t.parameters == parameters + and t.features >= features + ): + yield t + break + else: + # TODO: Reimplement caching. + t = platform(parameters, features) + pool.append(t) + yield t + t.connection.close() + + +def pytest_addoption(parser: Parser) -> None: + """Pytest hook for adding arbitrary CLI options. + + https://docs.pytest.org/en/latest/example/simple.html + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_addoption + + """ + parser.addoption("--keep-vms", action="store_true", help="Keeps deployed VMs.") + parser.addoption("--check", action="store_true", help="Run semantic analysis.") + parser.addoption("--demo", action="store_true", help="Run in demo mode.") + parser.addoption("--playbook", type=Path, help="Path to playbook.") + + +TARGETS: List[Dict[str, Any]] = [] +TARGET_IDS: List[str] = [] + + +def get_playbook(path: Optional[Path]) -> Dict[str, Any]: + """Loads and validates the playbook file. + + This imports the playbook module at runtime to ensure all + subclasses of 'Target' (e.g. all supported platforms, including + those defined in arbitrary 'conftest.py' files) are defined. + + """ + # TODO: Move to 'playbook.py' and setup 'PLATFORMS' when called so + # that the import can take place at any time. + import playbook + + book = dict() + if path: + # See https://pyyaml.org/wiki/PyYAMLDocumentation + import yaml + + try: + from yaml import CLoader as Loader + except ImportError: + from yaml import Loader # type: ignore + + with open(path) as f: + book = playbook.schema.validate(yaml.load(f, Loader=Loader)) + else: + book = playbook.schema.validate({}) + return book + + +def pytest_configure(config: Config) -> None: + """Parse provided user inputs to setup configuration. + + Determines the targets based on the playbook and sets default + configurations based user mode. + + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_configure + + """ + book = get_playbook(config.getoption("--playbook")) + for t in book.get("targets", []): + TARGETS.append(t) + TARGET_IDS.append(t["name"]) + + # Search ‘_pytest’ for ‘addoption’ to find these. + options: Dict[str, Any] = {} # See ‘pytest.ini’ for defaults. + if config.getoption("--check"): + options.update( + { + "flake8": True, + "mypy": True, + "markexpr": "flake8 or mypy", + "reportchars": "fE", + } + ) + if config.getoption("--demo"): + options.update( + { + "html": "demo.html", + "no_header": True, + "showcapture": "log", + "tb": "line", + } + ) + for attr, value in options.items(): + setattr(config.option, attr, value) + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + """Parametrize the tests based on our inputs. + + Note that this hook is run for each test, so we do the file I/O in + 'pytest_configure' and save the results. + + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_generate_tests + + """ + if "target" in metafunc.fixturenames: + assert TARGETS, "No targets specified!" + metafunc.parametrize("target", TARGETS, True, TARGET_IDS) + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: List[Item] +) -> None: + """Pytest hook for modifying the selected items (tests). + + https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_collection_modifyitems + + """ + # TODO: The ‘Item’ object has a ‘user_properties’ attribute which + # is a list of tuples and could be used to hold the validated + # marker data, simplifying later usage. + + # Validate all LISA marks. + for item in items: + try: + lisa.validate(item.get_closest_marker("lisa")) + except SchemaMissingKeyError as e: + print(f"Test {item.name} failed LISA validation {e}!") + items[:] = [] + return + + # Optionally select tests based on a playbook. + included: List[Item] = [] + excluded: List[Item] = [] + + # TODO: Remove logging. + def select(item: Item, times: int, exclude: bool) -> None: + """Includes or excludes the item as appropriate.""" + if exclude: + print(f" Excluding {item}") + excluded.append(item) + else: + print(f" Including {item} {times} times") + for _ in range(times - included.count(item)): + included.append(item) + + book = get_playbook(config.getoption("--playbook")) + for c in book.get("criteria", []): + print(f"Parsing criteria {c}") + for item in items: + marker = item.get_closest_marker("lisa") + if not marker: + # Not all tests will have the LISA marker, such as + # static analysis tests. + continue + i = marker.kwargs + if any( + [ + c["name"] and c["name"] in item.name, + c["area"] and c["area"].casefold() == i["area"].casefold(), + c["category"] + and c["category"].casefold() == i["category"].casefold(), + c["priority"] and c["priority"] == i["priority"], + c["tags"] and set(c["tags"]) <= set(i["tags"]), + ] + ): + select(item, c["times"], c["exclude"]) + if not included: + included = items + items[:] = [i for i in included if i not in excluded] + + +def pytest_html_report_title(report): # type: ignore + report.title = "LISAv3 (Using Pytest) Results" diff --git a/pytest/lisa.py b/pytest/lisa.py new file mode 100644 index 0000000000..bc3b16506e --- /dev/null +++ b/pytest/lisa.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import typing +from pathlib import Path + +from schema import Optional, Or, Schema # type: ignore + +import pytest + +if typing.TYPE_CHECKING: + from _pytest.mark.structures import Mark + +LISA = pytest.mark.lisa +LINUX_SCRIPTS = Path("../Testscripts/Linux") + +# Setup a sane configuration for local and remote commands. Note that +# the defaults between Fabric and Invoke are different, so we use +# their Config classes explicitly. +config = { + "run": { + # Show each command as its run. + "echo": True, + # Disable stdin forwarding. + "in_stream": False, + # Don’t let remote commands take longer than five minutes + # (unless later overridden). This is to prevent hangs. + "command_timeout": 1200, + } +} + +lisa_schema = Schema( + { + "platform": str, + "category": Or("Functional", "Performance", "Stress", "Community", "Longhaul"), + "area": str, + "priority": Or(0, 1, 2, 3), + Optional("features", default=list): [str], + Optional("tags", default=list): [str], + Optional(object): object, + }, + ignore_extra_keys=True, +) + + +def validate(mark: typing.Optional[Mark]) -> None: + """Validate each test's LISA parameters.""" + if not mark: + return + assert not mark.args, "LISA marker cannot have positional arguments!" + mark.kwargs.update(lisa_schema.validate(mark.kwargs)) # type: ignore diff --git a/pytest/mypy.ini b/pytest/mypy.ini new file mode 100644 index 0000000000..6b513808fb --- /dev/null +++ b/pytest/mypy.ini @@ -0,0 +1,20 @@ +[mypy] +namespace_packages = True +pretty = True + +warn_unused_configs = True +disallow_any_generics = True +disallow_subclassing_any = False +disallow_untyped_calls = True +disallow_untyped_defs = True +disallow_incomplete_defs = True +check_untyped_defs = True +disallow_untyped_decorators = False +no_implicit_optional = True +warn_redundant_casts = True +warn_unused_ignores = True +warn_return_any = True +no_implicit_reexport = True +strict_equality = True + +warn_unreachable = True diff --git a/pytest/playbook.py b/pytest/playbook.py new file mode 100644 index 0000000000..0d3a79267c --- /dev/null +++ b/pytest/playbook.py @@ -0,0 +1,68 @@ +"""Describes the YAML schema for the playbook file. + +This module should be imported at runtime such that 'PLATFORMS' is +defined after all 'Target' subclasses have been defined. + +PLATFORMS is a mapping of platform names (strings) to the implementing +subclass of 'Target' where each subclass defines its own 'parameters' +schema, 'deploy' and 'delete' methods, and other platform-specific +functionality. A 'Target' subclass need only be defined in a file +loaded by Pytest, so a 'contest.py' file works just fine. No manual +registration is required, it will be discovered automatically. + +TODO: Add field annotations, friendly error reporting, automatic case +transformations, etc. + +""" +from __future__ import annotations + +import typing + +# See https://pypi.org/project/schema/ +from schema import Optional, Or, Schema # type: ignore + +from target import Target + +if typing.TYPE_CHECKING: + from typing import Mapping, Type + +# See https://github.com/python/mypy/issues/4717 for why we ignore the type. +PLATFORMS: Mapping[str, Type[Target]] = { + cls.__name__: cls for cls in Target.__subclasses__() # type: ignore +} + +target_schema = Schema( + { + "name": str, + "platform": Or(*[platform for platform in PLATFORMS.keys()]), + # TODO: What should we do when lacking parameters? Ideally we + # use the platform’s defaults from its own schema, but that + # means this value must be set, even if to an empty dict. + Optional("parameters", default=dict): Or( + *[cls.schema for cls in PLATFORMS.values()] + ), + } +) + +default_target = {"name": "Default", "platform": "Local"} + +criteria_schema = Schema( + { + # TODO: Validate that these strings are valid regular + # expressions if we change our matching logic. + Optional("name", default=None): str, + Optional("area", default=None): str, + Optional("category", default=None): str, + Optional("priority", default=None): int, + Optional("tags", default=list): [str], + Optional("times", default=1): int, + Optional("exclude", default=False): bool, + } +) + +schema = Schema( + { + Optional("targets", default=[default_target]): [target_schema], + Optional("criteria", default=list): [criteria_schema], + } +) diff --git a/pytest/playbooks/criteria.yaml b/pytest/playbooks/criteria.yaml new file mode 100644 index 0000000000..3e86bc538d --- /dev/null +++ b/pytest/playbooks/criteria.yaml @@ -0,0 +1,10 @@ +# NOTE: This is an adjusted proof-of-concept ask from Chi. +criteria: + # Select all Priority 0 tests. + - priority: 0 + # Run tests with 'smoke' in the name twice. + - name: smoke + times: 2 + # Exclude all tests in Area "xdp" + - area: xdp + exclude: true diff --git a/pytest/playbooks/smoke.yaml b/pytest/playbooks/smoke.yaml new file mode 100644 index 0000000000..ea1a15ab60 --- /dev/null +++ b/pytest/playbooks/smoke.yaml @@ -0,0 +1,23 @@ +targets: + - name: Debian + platform: Azure + parameters: + image: credativ:Debian:9:9.0.201706190 + + - name: GitHub + platform: Azure + parameters: + image: github:github-enterprise:github-enterprise:latest + + - name: Citrix + platform: Azure + parameters: + image: citrix:netscalervpx-130:netscalerbyol:latest + + - name: AudioCodes + platform: Azure + parameters: + image: audiocodes:mediantsessionbordercontroller:mediantvirtualsbcazure:latest + +criteria: + - name: smoke diff --git a/pytest/playbooks/test.yaml b/pytest/playbooks/test.yaml new file mode 100644 index 0000000000..0495002ab0 --- /dev/null +++ b/pytest/playbooks/test.yaml @@ -0,0 +1,5 @@ +targets: + - name: Local Tests + platform: Local + - name: Setup Plan + platform: Custom diff --git a/pytest/poetry.lock b/pytest/poetry.lock new file mode 100644 index 0000000000..ea01a1d5f2 --- /dev/null +++ b/pytest/poetry.lock @@ -0,0 +1,1129 @@ +[[package]] +name = "apipkg" +version = "1.5" +description = "apipkg: namespace control and lazy-import mechanism" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "atomicwrites" +version = "1.4.0" +description = "Atomic file writes." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "attrs" +version = "20.2.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "sphinx-rtd-theme", "pre-commit"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six"] + +[[package]] +name = "bcrypt" +version = "3.2.0" +description = "Modern password hashing for your software and your servers" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +cffi = ">=1.1" +six = ">=1.4.1" + +[package.extras] +tests = ["pytest (>=3.2.1,<3.3.0 || >3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "black" +version = "20.8b1" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +appdirs = "*" +click = ">=7.1.2" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.6,<1" +regex = ">=2020.1.8" +toml = ">=0.10.1" +typed-ast = ">=1.4.0" +typing-extensions = ">=3.7.4" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] + +[[package]] +name = "cffi" +version = "1.14.3" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "click" +version = "7.1.2" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "colorama" +version = "0.4.3" +description = "Cross-platform colored terminal text." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "contextlib2" +version = "0.6.0.post1" +description = "Backports and enhancements for the contextlib module" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "cryptography" +version = "3.1.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "main" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" + +[package.dependencies] +cffi = ">=1.8,<1.11.3 || >1.11.3" +six = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5,<1.8.0 || >1.8.0,<3.1.0 || >3.1.0,<3.1.1 || >3.1.1)", "sphinx-rtd-theme"] +docstest = ["doc8", "pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["pytest (>=3.6.0,<3.9.0 || >3.9.0,<3.9.1 || >3.9.1,<3.9.2 || >3.9.2)", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,<3.79.2 || >3.79.2)"] + +[[package]] +name = "execnet" +version = "1.7.1" +description = "execnet: rapid multi-Python deployment" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +apipkg = ">=1.4" + +[package.extras] +testing = ["pre-commit"] + +[[package]] +name = "fabric" +version = "2.5.0" +description = "High level SSH command execution" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +invoke = ">=1.3,<2.0" +paramiko = ">=2.4" + +[package.extras] +pytest = ["mock (>=2.0.0,<3.0)", "pytest (>=3.2.5,<4.0)"] +testing = ["mock (>=2.0.0,<3.0)"] + +[[package]] +name = "filelock" +version = "3.0.12" +description = "A platform independent file lock." +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "flake8" +version = "3.8.4" +description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" + +[package.dependencies] +mccabe = ">=0.6.0,<0.7.0" +pycodestyle = ">=2.6.0a1,<2.7.0" +pyflakes = ">=2.2.0,<2.3.0" + +[[package]] +name = "flake8-black" +version = "0.2.1" +description = "flake8 plugin to call black as a code style validator" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +black = "*" +flake8 = ">=3.0.0" + +[[package]] +name = "flake8-bugbear" +version = "20.1.4" +description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +attrs = ">=19.2.0" +flake8 = ">=3.0.0" + +[[package]] +name = "flake8-isort" +version = "4.0.0" +description = "flake8 plugin that integrates isort ." +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +flake8 = ">=3.2.1,<4" +isort = ">=4.3.5,<6" +testfixtures = ">=6.8.0,<7" + +[package.extras] +test = ["pytest (>=4.0.2,<6)", "toml"] + +[[package]] +name = "iniconfig" +version = "1.0.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "invoke" +version = "1.4.1" +description = "Pythonic task execution" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "isort" +version = "5.6.1" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.6,<4.0" + +[package.extras] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] +requirements_deprecated_finder = ["pipreqs", "pip-api"] +colors = ["colorama (>=0.4.3,<0.5.0)"] + +[[package]] +name = "jedi" +version = "0.17.2" +description = "An autocompletion tool for Python that can be used for text editors." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +parso = ">=0.7.0,<0.8.0" + +[package.extras] +qa = ["flake8 (3.7.9)"] +testing = ["Django (<3.1)", "colorama", "docopt", "pytest (>=3.9.0,<5.0.0)"] + +[[package]] +name = "mccabe" +version = "0.6.1" +description = "McCabe checker, plugin for flake8" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "mypy" +version = "0.782" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +mypy-extensions = ">=0.4.3,<0.5.0" +typed-ast = ">=1.4.0,<1.5.0" +typing-extensions = ">=3.7.4" + +[package.extras] +dmypy = ["psutil (>=4.0)"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "packaging" +version = "20.4" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +pyparsing = ">=2.0.2" +six = "*" + +[[package]] +name = "paramiko" +version = "2.7.2" +description = "SSH2 protocol library" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +bcrypt = ">=3.1.3" +cryptography = ">=2.5" +pynacl = ">=1.0.1" + +[package.extras] +all = ["pyasn1 (>=0.1.7)", "pynacl (>=1.0.1)", "bcrypt (>=3.1.3)", "invoke (>=1.3)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +ed25519 = ["pynacl (>=1.0.1)", "bcrypt (>=3.1.3)"] +gssapi = ["pyasn1 (>=0.1.7)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +invoke = ["invoke (>=1.3)"] + +[[package]] +name = "parso" +version = "0.7.1" +description = "A Python Parser" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +testing = ["docopt", "pytest (>=3.0.7)"] + +[[package]] +name = "pathspec" +version = "0.8.0" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "pluggy" +version = "0.13.1" +description = "plugin and hook calling mechanisms for python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +name = "py" +version = "1.9.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pycodestyle" +version = "2.6.0" +description = "Python style guide checker" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pycparser" +version = "2.20" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyflakes" +version = "2.2.0" +description = "passive checker of Python programs" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pyls-black" +version = "0.4.6" +description = "Black plugin for the Python Language Server" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +black = ">=19.3b0" +python-language-server = "*" +toml = "*" + +[package.extras] +dev = ["isort (>=5.0)", "flake8", "pytest", "mypy"] + +[[package]] +name = "pyls-isort" +version = "0.2.0" +description = "Isort plugin for python-language-server" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +isort = "*" +python-language-server = "*" + +[[package]] +name = "pyls-mypy" +version = "0.1.8" +description = "Mypy linter for the Python Language Server" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +mypy = "*" +python-language-server = "*" + +[package.extras] +test = ["tox", "versioneer", "pytest", "pytest-cov", "coverage"] + +[[package]] +name = "pynacl" +version = "1.4.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[package.dependencies] +cffi = ">=1.4.1" +six = "*" + +[package.extras] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["pytest (>=3.2.1,<3.3.0 || >3.3.0)", "hypothesis (>=3.27.0)"] + +[[package]] +name = "pyparsing" +version = "2.4.7" +description = "Python parsing module" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "pytest" +version = "6.1.1" +description = "pytest: simple powerful testing with Python" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=17.4.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<1.0" +py = ">=1.8.2" +toml = "*" + +[package.extras] +checkqa_mypy = ["mypy (0.780)"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +name = "pytest-flake8" +version = "1.0.6" +description = "pytest plugin to check FLAKE8 requirements" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +flake8 = ">=3.5" +pytest = ">=3.5" + +[[package]] +name = "pytest-forked" +version = "1.3.0" +description = "run tests in isolated forked subprocesses" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +py = "*" +pytest = ">=3.10" + +[[package]] +name = "pytest-html" +version = "2.1.1" +description = "pytest plugin for generating HTML reports" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pytest = ">=5.0" +pytest-metadata = "*" + +[[package]] +name = "pytest-metadata" +version = "1.10.0" +description = "pytest plugin for test session metadata" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" + +[package.dependencies] +pytest = ">=2.9.0" + +[[package]] +name = "pytest-mypy" +version = "0.7.0" +description = "Mypy static type checker plugin for Pytest" +category = "dev" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +filelock = ">=3.0" +mypy = {version = ">=0.700", markers = "python_version >= \"3.8\""} +pytest = ">=3.5" + +[[package]] +name = "pytest-rerunfailures" +version = "9.1.1" +description = "pytest plugin to re-run tests to eliminate flaky failures" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +pytest = ">=5.0" + +[[package]] +name = "pytest-timeout" +version = "1.4.2" +description = "py.test plugin to abort hanging tests" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pytest = ">=3.6.0" + +[[package]] +name = "pytest-xdist" +version = "2.1.0" +description = "pytest xdist plugin for distributed testing and loop-on-failing modes" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +execnet = ">=1.1" +pytest = ">=6.0.0" +pytest-forked = "*" + +[package.extras] +psutil = ["psutil (>=3.0)"] +testing = ["filelock"] + +[[package]] +name = "python-jsonrpc-server" +version = "0.4.0" +description = "JSON RPC 2.0 server library" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +ujson = ">=3.0.0" + +[package.extras] +test = ["versioneer", "pylint", "pycodestyle", "pyflakes", "pytest", "mock", "pytest-cov", "coverage"] + +[[package]] +name = "python-language-server" +version = "0.35.1" +description = "Python Language Server for the Language Server Protocol" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +jedi = ">=0.17.0,<0.18.0" +pluggy = "*" +python-jsonrpc-server = ">=0.4.0" + +[package.extras] +all = ["autopep8", "flake8 (>=3.8.0)", "mccabe (>=0.6.0,<0.7.0)", "pycodestyle (>=2.6.0,<2.7.0)", "pydocstyle (>=2.0.0)", "pyflakes (>=2.2.0,<2.3.0)", "pylint (>=2.5.0)", "rope (>=0.10.5)", "yapf"] +autopep8 = ["autopep8"] +flake8 = ["flake8 (>=3.8.0)"] +mccabe = ["mccabe (>=0.6.0,<0.7.0)"] +pycodestyle = ["pycodestyle (>=2.6.0,<2.7.0)"] +pydocstyle = ["pydocstyle (>=2.0.0)"] +pyflakes = ["pyflakes (>=2.2.0,<2.3.0)"] +pylint = ["pylint (>=2.5.0)"] +rope = ["rope (>0.10.5)"] +test = ["versioneer", "pylint (>=2.5.0)", "pytest", "mock", "pytest-cov", "coverage", "numpy", "pandas", "matplotlib", "flaky", "pyqt5"] +yapf = ["yapf"] + +[[package]] +name = "pyyaml" +version = "5.3.1" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "regex" +version = "2020.9.27" +description = "Alternative regular expression module, to replace re." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "rope" +version = "0.18.0" +description = "a python refactoring library..." +category = "dev" +optional = false +python-versions = "*" + +[package.extras] +dev = ["pytest"] + +[[package]] +name = "schema" +version = "0.7.3" +description = "Simple data validation library" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +contextlib2 = ">=0.5.5" + +[[package]] +name = "six" +version = "1.15.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "tenacity" +version = "6.2.0" +description = "Retry code until it succeeds" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = ">=1.9.0" + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + +[[package]] +name = "testfixtures" +version = "6.15.0" +description = "A collection of helpers and mock objects for unit tests and doc tests." +category = "dev" +optional = false +python-versions = "*" + +[package.extras] +build = ["setuptools-git", "wheel", "twine"] +docs = ["sphinx", "zope.component", "sybil", "twisted", "mock", "django (<2)", "django"] +test = ["pytest (>=3.6)", "pytest-cov", "pytest-django", "zope.component", "sybil", "twisted", "mock", "django (<2)", "django"] + +[[package]] +name = "toml" +version = "0.10.1" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "typed-ast" +version = "1.4.1" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "typing-extensions" +version = "3.7.4.3" +description = "Backported and Experimental Type Hints for Python 3.5+" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "ujson" +version = "4.0.1" +description = "Ultra fast JSON encoder and decoder for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[metadata] +lock-version = "1.1" +python-versions = "^3.8" +content-hash = "ff9d853cf9f58598aa01e465e2c673172b9e573fd7a8569bf29236348884c748" + +[metadata.files] +apipkg = [ + {file = "apipkg-1.5-py2.py3-none-any.whl", hash = "sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"}, + {file = "apipkg-1.5.tar.gz", hash = "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6"}, +] +appdirs = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-20.2.0-py2.py3-none-any.whl", hash = "sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc"}, + {file = "attrs-20.2.0.tar.gz", hash = "sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594"}, +] +bcrypt = [ + {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"}, + {file = "bcrypt-3.2.0-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7"}, + {file = "bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1"}, + {file = "bcrypt-3.2.0-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"}, + {file = "bcrypt-3.2.0-cp36-abi3-win32.whl", hash = "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55"}, + {file = "bcrypt-3.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34"}, + {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"}, +] +black = [ + {file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"}, +] +cffi = [ + {file = "cffi-1.14.3-2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc"}, + {file = "cffi-1.14.3-2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768"}, + {file = "cffi-1.14.3-2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d"}, + {file = "cffi-1.14.3-2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1"}, + {file = "cffi-1.14.3-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca"}, + {file = "cffi-1.14.3-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a"}, + {file = "cffi-1.14.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c"}, + {file = "cffi-1.14.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730"}, + {file = "cffi-1.14.3-cp27-cp27m-win32.whl", hash = "sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d"}, + {file = "cffi-1.14.3-cp27-cp27m-win_amd64.whl", hash = "sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05"}, + {file = "cffi-1.14.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b"}, + {file = "cffi-1.14.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171"}, + {file = "cffi-1.14.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f"}, + {file = "cffi-1.14.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4"}, + {file = "cffi-1.14.3-cp35-cp35m-win32.whl", hash = "sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d"}, + {file = "cffi-1.14.3-cp35-cp35m-win_amd64.whl", hash = "sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d"}, + {file = "cffi-1.14.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3"}, + {file = "cffi-1.14.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808"}, + {file = "cffi-1.14.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537"}, + {file = "cffi-1.14.3-cp36-cp36m-win32.whl", hash = "sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0"}, + {file = "cffi-1.14.3-cp36-cp36m-win_amd64.whl", hash = "sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e"}, + {file = "cffi-1.14.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1"}, + {file = "cffi-1.14.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579"}, + {file = "cffi-1.14.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394"}, + {file = "cffi-1.14.3-cp37-cp37m-win32.whl", hash = "sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc"}, + {file = "cffi-1.14.3-cp37-cp37m-win_amd64.whl", hash = "sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869"}, + {file = "cffi-1.14.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e"}, + {file = "cffi-1.14.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828"}, + {file = "cffi-1.14.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9"}, + {file = "cffi-1.14.3-cp38-cp38-win32.whl", hash = "sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522"}, + {file = "cffi-1.14.3-cp38-cp38-win_amd64.whl", hash = "sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15"}, + {file = "cffi-1.14.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d"}, + {file = "cffi-1.14.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c"}, + {file = "cffi-1.14.3-cp39-cp39-win32.whl", hash = "sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b"}, + {file = "cffi-1.14.3-cp39-cp39-win_amd64.whl", hash = "sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3"}, + {file = "cffi-1.14.3.tar.gz", hash = "sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591"}, +] +click = [ + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, +] +colorama = [ + {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, + {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, +] +contextlib2 = [ + {file = "contextlib2-0.6.0.post1-py2.py3-none-any.whl", hash = "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"}, + {file = "contextlib2-0.6.0.post1.tar.gz", hash = "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e"}, +] +cryptography = [ + {file = "cryptography-3.1.1-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:65beb15e7f9c16e15934569d29fb4def74ea1469d8781f6b3507ab896d6d8719"}, + {file = "cryptography-3.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:983c0c3de4cb9fcba68fd3f45ed846eb86a2a8b8d8bc5bb18364c4d00b3c61fe"}, + {file = "cryptography-3.1.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e97a3b627e3cb63c415a16245d6cef2139cca18bb1183d1b9375a1c14e83f3b3"}, + {file = "cryptography-3.1.1-cp27-cp27m-win32.whl", hash = "sha256:cb179acdd4ae1e4a5a160d80b87841b3d0e0be84af46c7bb2cd7ece57a39c4ba"}, + {file = "cryptography-3.1.1-cp27-cp27m-win_amd64.whl", hash = "sha256:b372026ebf32fe2523159f27d9f0e9f485092e43b00a5adacf732192a70ba118"}, + {file = "cryptography-3.1.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:680da076cad81cdf5ffcac50c477b6790be81768d30f9da9e01960c4b18a66db"}, + {file = "cryptography-3.1.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5d52c72449bb02dd45a773a203196e6d4fae34e158769c896012401f33064396"}, + {file = "cryptography-3.1.1-cp35-abi3-macosx_10_10_x86_64.whl", hash = "sha256:f0e099fc4cc697450c3dd4031791559692dd941a95254cb9aeded66a7aa8b9bc"}, + {file = "cryptography-3.1.1-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:a7597ffc67987b37b12e09c029bd1dc43965f75d328076ae85721b84046e9ca7"}, + {file = "cryptography-3.1.1-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:4549b137d8cbe3c2eadfa56c0c858b78acbeff956bd461e40000b2164d9167c6"}, + {file = "cryptography-3.1.1-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:89aceb31cd5f9fc2449fe8cf3810797ca52b65f1489002d58fe190bfb265c536"}, + {file = "cryptography-3.1.1-cp35-cp35m-win32.whl", hash = "sha256:559d622aef2a2dff98a892eef321433ba5bc55b2485220a8ca289c1ecc2bd54f"}, + {file = "cryptography-3.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:451cdf60be4dafb6a3b78802006a020e6cd709c22d240f94f7a0696240a17154"}, + {file = "cryptography-3.1.1-cp36-abi3-win32.whl", hash = "sha256:762bc5a0df03c51ee3f09c621e1cee64e3a079a2b5020de82f1613873d79ee70"}, + {file = "cryptography-3.1.1-cp36-abi3-win_amd64.whl", hash = "sha256:b12e715c10a13ca1bd27fbceed9adc8c5ff640f8e1f7ea76416352de703523c8"}, + {file = "cryptography-3.1.1-cp36-cp36m-win32.whl", hash = "sha256:21b47c59fcb1c36f1113f3709d37935368e34815ea1d7073862e92f810dc7499"}, + {file = "cryptography-3.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:48ee615a779ffa749d7d50c291761dc921d93d7cf203dca2db663b4f193f0e49"}, + {file = "cryptography-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:b2bded09c578d19e08bd2c5bb8fed7f103e089752c9cf7ca7ca7de522326e921"}, + {file = "cryptography-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f99317a0fa2e49917689b8cf977510addcfaaab769b3f899b9c481bbd76730c2"}, + {file = "cryptography-3.1.1-cp38-cp38-win32.whl", hash = "sha256:ab010e461bb6b444eaf7f8c813bb716be2d78ab786103f9608ffd37a4bd7d490"}, + {file = "cryptography-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:99d4984aabd4c7182050bca76176ce2dbc9fa9748afe583a7865c12954d714ba"}, + {file = "cryptography-3.1.1.tar.gz", hash = "sha256:9d9fc6a16357965d282dd4ab6531013935425d0dc4950df2e0cf2a1b1ac1017d"}, +] +execnet = [ + {file = "execnet-1.7.1-py2.py3-none-any.whl", hash = "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"}, + {file = "execnet-1.7.1.tar.gz", hash = "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50"}, +] +fabric = [ + {file = "fabric-2.5.0-py2.py3-none-any.whl", hash = "sha256:160331934ea60036604928e792fa8e9f813266b098ef5562aa82b88527740389"}, + {file = "fabric-2.5.0.tar.gz", hash = "sha256:24842d7d51556adcabd885ac3cf5e1df73fc622a1708bf3667bf5927576cdfa6"}, +] +filelock = [ + {file = "filelock-3.0.12-py3-none-any.whl", hash = "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"}, + {file = "filelock-3.0.12.tar.gz", hash = "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59"}, +] +flake8 = [ + {file = "flake8-3.8.4-py2.py3-none-any.whl", hash = "sha256:749dbbd6bfd0cf1318af27bf97a14e28e5ff548ef8e5b1566ccfb25a11e7c839"}, + {file = "flake8-3.8.4.tar.gz", hash = "sha256:aadae8761ec651813c24be05c6f7b4680857ef6afaae4651a4eccaef97ce6c3b"}, +] +flake8-black = [ + {file = "flake8-black-0.2.1.tar.gz", hash = "sha256:f26651bc10db786c03f4093414f7c9ea982ed8a244cec323c984feeffdf4c118"}, +] +flake8-bugbear = [ + {file = "flake8-bugbear-20.1.4.tar.gz", hash = "sha256:bd02e4b009fb153fe6072c31c52aeab5b133d508095befb2ffcf3b41c4823162"}, + {file = "flake8_bugbear-20.1.4-py36.py37.py38-none-any.whl", hash = "sha256:a3ddc03ec28ba2296fc6f89444d1c946a6b76460f859795b35b77d4920a51b63"}, +] +flake8-isort = [ + {file = "flake8-isort-4.0.0.tar.gz", hash = "sha256:2b91300f4f1926b396c2c90185844eb1a3d5ec39ea6138832d119da0a208f4d9"}, + {file = "flake8_isort-4.0.0-py2.py3-none-any.whl", hash = "sha256:729cd6ef9ba3659512dee337687c05d79c78e1215fdf921ed67e5fe46cce2f3c"}, +] +iniconfig = [ + {file = "iniconfig-1.0.1-py3-none-any.whl", hash = "sha256:80cf40c597eb564e86346103f609d74efce0f6b4d4f30ec8ce9e2c26411ba437"}, + {file = "iniconfig-1.0.1.tar.gz", hash = "sha256:e5f92f89355a67de0595932a6c6c02ab4afddc6fcdc0bfc5becd0d60884d3f69"}, +] +invoke = [ + {file = "invoke-1.4.1-py2-none-any.whl", hash = "sha256:93e12876d88130c8e0d7fd6618dd5387d6b36da55ad541481dfa5e001656f134"}, + {file = "invoke-1.4.1-py3-none-any.whl", hash = "sha256:87b3ef9d72a1667e104f89b159eaf8a514dbf2f3576885b2bbdefe74c3fb2132"}, + {file = "invoke-1.4.1.tar.gz", hash = "sha256:de3f23bfe669e3db1085789fd859eb8ca8e0c5d9c20811e2407fa042e8a5e15d"}, +] +isort = [ + {file = "isort-5.6.1-py3-none-any.whl", hash = "sha256:dd3211f513f4a92ec1ec1876fc1dc3c686649c349d49523f5b5adbb0814e5960"}, + {file = "isort-5.6.1.tar.gz", hash = "sha256:2f510f34ae18a8d0958c53eec51ef84fd099f07c4c639676525acbcd7b5bd3ff"}, +] +jedi = [ + {file = "jedi-0.17.2-py2.py3-none-any.whl", hash = "sha256:98cc583fa0f2f8304968199b01b6b4b94f469a1f4a74c1560506ca2a211378b5"}, + {file = "jedi-0.17.2.tar.gz", hash = "sha256:86ed7d9b750603e4ba582ea8edc678657fb4007894a12bcf6f4bb97892f31d20"}, +] +mccabe = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] +mypy = [ + {file = "mypy-0.782-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:2c6cde8aa3426c1682d35190b59b71f661237d74b053822ea3d748e2c9578a7c"}, + {file = "mypy-0.782-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9c7a9a7ceb2871ba4bac1cf7217a7dd9ccd44c27c2950edbc6dc08530f32ad4e"}, + {file = "mypy-0.782-cp35-cp35m-win_amd64.whl", hash = "sha256:c05b9e4fb1d8a41d41dec8786c94f3b95d3c5f528298d769eb8e73d293abc48d"}, + {file = "mypy-0.782-cp36-cp36m-macosx_10_6_x86_64.whl", hash = "sha256:6731603dfe0ce4352c555c6284c6db0dc935b685e9ce2e4cf220abe1e14386fd"}, + {file = "mypy-0.782-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f05644db6779387ccdb468cc47a44b4356fc2ffa9287135d05b70a98dc83b89a"}, + {file = "mypy-0.782-cp36-cp36m-win_amd64.whl", hash = "sha256:b7fbfabdbcc78c4f6fc4712544b9b0d6bf171069c6e0e3cb82440dd10ced3406"}, + {file = "mypy-0.782-cp37-cp37m-macosx_10_6_x86_64.whl", hash = "sha256:3fdda71c067d3ddfb21da4b80e2686b71e9e5c72cca65fa216d207a358827f86"}, + {file = "mypy-0.782-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:d7df6eddb6054d21ca4d3c6249cae5578cb4602951fd2b6ee2f5510ffb098707"}, + {file = "mypy-0.782-cp37-cp37m-win_amd64.whl", hash = "sha256:a4a2cbcfc4cbf45cd126f531dedda8485671545b43107ded25ce952aac6fb308"}, + {file = "mypy-0.782-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6bb93479caa6619d21d6e7160c552c1193f6952f0668cdda2f851156e85186fc"}, + {file = "mypy-0.782-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:81c7908b94239c4010e16642c9102bfc958ab14e36048fa77d0be3289dda76ea"}, + {file = "mypy-0.782-cp38-cp38-win_amd64.whl", hash = "sha256:5dd13ff1f2a97f94540fd37a49e5d255950ebcdf446fb597463a40d0df3fac8b"}, + {file = "mypy-0.782-py3-none-any.whl", hash = "sha256:e0b61738ab504e656d1fe4ff0c0601387a5489ca122d55390ade31f9ca0e252d"}, + {file = "mypy-0.782.tar.gz", hash = "sha256:eff7d4a85e9eea55afa34888dfeaccde99e7520b51f867ac28a48492c0b1130c"}, +] +mypy-extensions = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] +packaging = [ + {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, + {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, +] +paramiko = [ + {file = "paramiko-2.7.2-py2.py3-none-any.whl", hash = "sha256:4f3e316fef2ac628b05097a637af35685183111d4bc1b5979bd397c2ab7b5898"}, + {file = "paramiko-2.7.2.tar.gz", hash = "sha256:7f36f4ba2c0d81d219f4595e35f70d56cc94f9ac40a6acdf51d6ca210ce65035"}, +] +parso = [ + {file = "parso-0.7.1-py2.py3-none-any.whl", hash = "sha256:97218d9159b2520ff45eb78028ba8b50d2bc61dcc062a9682666f2dc4bd331ea"}, + {file = "parso-0.7.1.tar.gz", hash = "sha256:caba44724b994a8a5e086460bb212abc5a8bc46951bf4a9a1210745953622eb9"}, +] +pathspec = [ + {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, + {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, +] +pluggy = [ + {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, + {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, +] +py = [ + {file = "py-1.9.0-py2.py3-none-any.whl", hash = "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2"}, + {file = "py-1.9.0.tar.gz", hash = "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"}, +] +pycodestyle = [ + {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, + {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, +] +pycparser = [ + {file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"}, + {file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"}, +] +pyflakes = [ + {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, + {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, +] +pyls-black = [ + {file = "pyls-black-0.4.6.tar.gz", hash = "sha256:33700e5ed605636ea7ba39188a1362d2f8602f7301f8f2b8544773886f965663"}, + {file = "pyls_black-0.4.6-py3-none-any.whl", hash = "sha256:8f5fb8fed503588c10435d2d48e2c3751437f1bdb8116134b05a4591c4899940"}, +] +pyls-isort = [ + {file = "pyls-isort-0.2.0.tar.gz", hash = "sha256:a6c292332746d3dc690f2a3dcdb9a01d913b9ee8444defe3cbffcddb7e3874eb"}, +] +pyls-mypy = [ + {file = "pyls-mypy-0.1.8.tar.gz", hash = "sha256:3fd83028961f0ca9eb3048b7a01cf42a9e3d46d8ea4935c1424c33da22c3eb03"}, +] +pynacl = [ + {file = "PyNaCl-1.4.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff"}, + {file = "PyNaCl-1.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514"}, + {file = "PyNaCl-1.4.0-cp27-cp27m-win32.whl", hash = "sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574"}, + {file = "PyNaCl-1.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80"}, + {file = "PyNaCl-1.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7"}, + {file = "PyNaCl-1.4.0-cp35-abi3-macosx_10_10_x86_64.whl", hash = "sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122"}, + {file = "PyNaCl-1.4.0-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d"}, + {file = "PyNaCl-1.4.0-cp35-abi3-win32.whl", hash = "sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634"}, + {file = "PyNaCl-1.4.0-cp35-abi3-win_amd64.whl", hash = "sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6"}, + {file = "PyNaCl-1.4.0-cp35-cp35m-win32.whl", hash = "sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4"}, + {file = "PyNaCl-1.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25"}, + {file = "PyNaCl-1.4.0-cp36-cp36m-win32.whl", hash = "sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4"}, + {file = "PyNaCl-1.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6"}, + {file = "PyNaCl-1.4.0-cp37-cp37m-win32.whl", hash = "sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f"}, + {file = "PyNaCl-1.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f"}, + {file = "PyNaCl-1.4.0-cp38-cp38-win32.whl", hash = "sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96"}, + {file = "PyNaCl-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420"}, + {file = "PyNaCl-1.4.0.tar.gz", hash = "sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505"}, +] +pyparsing = [ + {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, + {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, +] +pytest = [ + {file = "pytest-6.1.1-py3-none-any.whl", hash = "sha256:7a8190790c17d79a11f847fba0b004ee9a8122582ebff4729a082c109e81a4c9"}, + {file = "pytest-6.1.1.tar.gz", hash = "sha256:8f593023c1a0f916110285b6efd7f99db07d59546e3d8c36fc60e2ab05d3be92"}, +] +pytest-flake8 = [ + {file = "pytest-flake8-1.0.6.tar.gz", hash = "sha256:1b82bb58c88eb1db40524018d3fcfd0424575029703b4e2d8e3ee873f2b17027"}, + {file = "pytest_flake8-1.0.6-py2.py3-none-any.whl", hash = "sha256:2e91578ecd9b200066f99c1e1de0f510fbb85bcf43712d46ea29fe47607cc234"}, +] +pytest-forked = [ + {file = "pytest-forked-1.3.0.tar.gz", hash = "sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca"}, + {file = "pytest_forked-1.3.0-py2.py3-none-any.whl", hash = "sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"}, +] +pytest-html = [ + {file = "pytest-html-2.1.1.tar.gz", hash = "sha256:6a4ac391e105e391208e3eb9bd294a60dd336447fd8e1acddff3a6de7f4e57c5"}, + {file = "pytest_html-2.1.1-py2.py3-none-any.whl", hash = "sha256:9e4817e8be8ddde62e8653c8934d0f296b605da3d2277a052f762c56a8b32df2"}, +] +pytest-metadata = [ + {file = "pytest-metadata-1.10.0.tar.gz", hash = "sha256:b7e6e0a45adacb17a03a97bf7a2ef60cc1f4e172bcce9732ce5e814191932315"}, + {file = "pytest_metadata-1.10.0-py2.py3-none-any.whl", hash = "sha256:fcbcc5781aee450107c620c79c57e50796b6777b82b3c504be9cbc3017201169"}, +] +pytest-mypy = [ + {file = "pytest-mypy-0.7.0.tar.gz", hash = "sha256:5a667d9a2b66bf98b3a494411f221923a6e2c3eafbe771104951aaec8985673d"}, + {file = "pytest_mypy-0.7.0-py3-none-any.whl", hash = "sha256:e0505ace48d2b19fe686366fce6b4a2ac0d090423736bb6aa2e39554d18974b7"}, +] +pytest-rerunfailures = [ + {file = "pytest-rerunfailures-9.1.1.tar.gz", hash = "sha256:1cb11a17fc121b3918414eb5eaf314ee325f2e693ac7cb3f6abf7560790827f2"}, + {file = "pytest_rerunfailures-9.1.1-py3-none-any.whl", hash = "sha256:2eb7d0ad651761fbe80e064b0fd415cf6730cdbc53c16a145fd84b66143e609f"}, +] +pytest-timeout = [ + {file = "pytest-timeout-1.4.2.tar.gz", hash = "sha256:20b3113cf6e4e80ce2d403b6fb56e9e1b871b510259206d40ff8d609f48bda76"}, + {file = "pytest_timeout-1.4.2-py2.py3-none-any.whl", hash = "sha256:541d7aa19b9a6b4e475c759fd6073ef43d7cdc9a92d95644c260076eb257a063"}, +] +pytest-xdist = [ + {file = "pytest-xdist-2.1.0.tar.gz", hash = "sha256:82d938f1a24186520e2d9d3a64ef7d9ac7ecdf1a0659e095d18e596b8cbd0672"}, + {file = "pytest_xdist-2.1.0-py3-none-any.whl", hash = "sha256:7c629016b3bb006b88ac68e2b31551e7becf173c76b977768848e2bbed594d90"}, +] +python-jsonrpc-server = [ + {file = "python-jsonrpc-server-0.4.0.tar.gz", hash = "sha256:62c543e541f101ec5b57dc654efc212d2c2e3ea47ff6f54b2e7dcb36ecf20595"}, + {file = "python_jsonrpc_server-0.4.0-py3-none-any.whl", hash = "sha256:e5a908ff182e620aac07db5f57887eeb0afe33993008f57dc1b85b594cea250c"}, +] +python-language-server = [ + {file = "python-language-server-0.35.1.tar.gz", hash = "sha256:6e0c9a3b2ae98e0eb22e98ed6b3c4e190a6bf9e27af53efd2396da60cd92b221"}, + {file = "python_language_server-0.35.1-py2.py3-none-any.whl", hash = "sha256:7051090259e3e81c0cdb140de8e32b8f11219808cda4427e6faf61f9ff9a3bf4"}, +] +pyyaml = [ + {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, + {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, + {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, + {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, +] +regex = [ + {file = "regex-2020.9.27-cp27-cp27m-win32.whl", hash = "sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3"}, + {file = "regex-2020.9.27-cp27-cp27m-win_amd64.whl", hash = "sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc"}, + {file = "regex-2020.9.27-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b"}, + {file = "regex-2020.9.27-cp36-cp36m-win32.whl", hash = "sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63"}, + {file = "regex-2020.9.27-cp36-cp36m-win_amd64.whl", hash = "sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c"}, + {file = "regex-2020.9.27-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100"}, + {file = "regex-2020.9.27-cp37-cp37m-win32.whl", hash = "sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707"}, + {file = "regex-2020.9.27-cp37-cp37m-win_amd64.whl", hash = "sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux1_i686.whl", hash = "sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b"}, + {file = "regex-2020.9.27-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637"}, + {file = "regex-2020.9.27-cp38-cp38-win32.whl", hash = "sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f"}, + {file = "regex-2020.9.27-cp38-cp38-win_amd64.whl", hash = "sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux1_i686.whl", hash = "sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67"}, + {file = "regex-2020.9.27-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad"}, + {file = "regex-2020.9.27-cp39-cp39-win32.whl", hash = "sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302"}, + {file = "regex-2020.9.27-cp39-cp39-win_amd64.whl", hash = "sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7"}, + {file = "regex-2020.9.27.tar.gz", hash = "sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d"}, +] +rope = [ + {file = "rope-0.18.0.tar.gz", hash = "sha256:786b5c38c530d4846aa68a42604f61b4e69a493390e3ca11b88df0fbfdc3ed04"}, +] +schema = [ + {file = "schema-0.7.3-py2.py3-none-any.whl", hash = "sha256:c331438b60f634cab5664ab720d3083cc444f924d55269530c36b33e3354276f"}, + {file = "schema-0.7.3.tar.gz", hash = "sha256:4cf529318cfd1e844ecbe02f41f7e5aa027463e7403666a52746f31f04f47a5e"}, +] +six = [ + {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, + {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, +] +tenacity = [ + {file = "tenacity-6.2.0-py2.py3-none-any.whl", hash = "sha256:5a5d3dcd46381abe8b4f82b5736b8726fd3160c6c7161f53f8af7f1eb9b82173"}, + {file = "tenacity-6.2.0.tar.gz", hash = "sha256:29ae90e7faf488a8628432154bb34ace1cca58244c6ea399fd33f066ac71339a"}, +] +testfixtures = [ + {file = "testfixtures-6.15.0-py2.py3-none-any.whl", hash = "sha256:e17f4f526fc90b0ac9bc7f8ca62b7dec17d9faf3d721f56bda4f0fd94d02f85a"}, + {file = "testfixtures-6.15.0.tar.gz", hash = "sha256:409f77cfbdad822d12a8ce5c4aa8fb4d0bb38073f4a5444fede3702716a2cec2"}, +] +toml = [ + {file = "toml-0.10.1-py2.py3-none-any.whl", hash = "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88"}, + {file = "toml-0.10.1.tar.gz", hash = "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f"}, +] +typed-ast = [ + {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3"}, + {file = "typed_ast-1.4.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb"}, + {file = "typed_ast-1.4.1-cp35-cp35m-win32.whl", hash = "sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919"}, + {file = "typed_ast-1.4.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01"}, + {file = "typed_ast-1.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75"}, + {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652"}, + {file = "typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7"}, + {file = "typed_ast-1.4.1-cp36-cp36m-win32.whl", hash = "sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1"}, + {file = "typed_ast-1.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa"}, + {file = "typed_ast-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614"}, + {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41"}, + {file = "typed_ast-1.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b"}, + {file = "typed_ast-1.4.1-cp37-cp37m-win32.whl", hash = "sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe"}, + {file = "typed_ast-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355"}, + {file = "typed_ast-1.4.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6"}, + {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907"}, + {file = "typed_ast-1.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d"}, + {file = "typed_ast-1.4.1-cp38-cp38-win32.whl", hash = "sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c"}, + {file = "typed_ast-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4"}, + {file = "typed_ast-1.4.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34"}, + {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, +] +typing-extensions = [ + {file = "typing_extensions-3.7.4.3-py2-none-any.whl", hash = "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f"}, + {file = "typing_extensions-3.7.4.3-py3-none-any.whl", hash = "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918"}, + {file = "typing_extensions-3.7.4.3.tar.gz", hash = "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c"}, +] +ujson = [ + {file = "ujson-4.0.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:5fe1536465b1c86e32a47113abd3178001b7c2dcd61f95f336fe2febf4661e74"}, + {file = "ujson-4.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0f412c3f59b1ab0f40018235224ca0cf29232d0201ff5085618565a8a9c810ed"}, + {file = "ujson-4.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4f12b0b4e235b35d49f15227b0a827e614c52dda903c58a8f5523936c233dfc7"}, + {file = "ujson-4.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:7a1545ac2476db4cc1f0f236603ccbb50991fc1bba480cda1bc06348cc2a2bf0"}, + {file = "ujson-4.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:078808c385036cba73cad96f498310c61e9b5ae5ac9ea01e7c3996ece544b556"}, + {file = "ujson-4.0.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:4fe8c6112b732cba5a722f7cbe22f18d405f6f44415794a5b46473a477635233"}, + {file = "ujson-4.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:71703a269f074ff65b9d7746662e4b3e76a4af443e532218af1e8ce15d9b1e7b"}, + {file = "ujson-4.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:b87379a3f8046d6d111762d81f3384bf38ab24b1535c841fe867a4a097d84523"}, + {file = "ujson-4.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a79bca47eafb31c74b38e68623bc9b2bb930cb48fab1af31c8f2cb68cf473421"}, + {file = "ujson-4.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:e7ab24942b2d57920d75b817b8eead293026db003247e26f99506bdad86c61b4"}, + {file = "ujson-4.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:51480048373cf97a6b97fcd70c3586ca0a31f27e22ab680fb14c1f22bedbf743"}, + {file = "ujson-4.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c604024bd853b5df6be7d933e934da8dd139e6159564db7c55b92a9937678093"}, + {file = "ujson-4.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:568bb3e7f035006147af4ce3a9ced7d126c92e1a8607c7b2266007b1c1162c53"}, + {file = "ujson-4.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:bd4c77aee3ffb920e2dbc21a9e0c7945a400557ce671cfd57dbd569f5ebc619d"}, + {file = "ujson-4.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c354c1617b0a4378b6279d0cd511b769500cf3fa7c42e8e004cbbbb6b4c2a875"}, + {file = "ujson-4.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:a5200a68f1dcf3ce275e1cefbcfa3914b70c2b5e2f71c2e31556aa1f7244c845"}, + {file = "ujson-4.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a618af22407baeadb3f046f81e7a5ee5e9f8b0b716d2b564f92276a54d26a823"}, + {file = "ujson-4.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:0a2e1b211714eb1ec0772a013ec9967f8f95f21c84e8f46382e9f8a32ae781fe"}, + {file = "ujson-4.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:2b2d9264ac76aeb11f590f7a1ccff0689ba1313adacbb6d38d3b15f21a392897"}, + {file = "ujson-4.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f8a60928737a9a47e692fcd661ef2b5d75ba22c7c930025bd95e338f2a6e15bc"}, + {file = "ujson-4.0.1.tar.gz", hash = "sha256:26cf6241b36ff5ce4539ae687b6b02673109c5e3efc96148806a7873eaa229d3"}, +] diff --git a/pytest/pyproject.toml b/pytest/pyproject.toml new file mode 100644 index 0000000000..47db86c319 --- /dev/null +++ b/pytest/pyproject.toml @@ -0,0 +1,51 @@ +[tool.poetry] +name = "pytest-lisa" +version = "0.1.0" +description = "LISA plugin for pytest" +authors = ["Andrew Schwartzmeyer "] +license = "MIT License" + +[tool.poetry.dependencies] +python = "^3.8" +pytest = "^6.1.1" +filelock = "^3.0.12" +fabric = "^2.5.0" +pytest-timeout = "^1.4.2" +pytest-html = "^2.1.1" +tenacity = "^6.2.0" +pytest-rerunfailures = "^9.1.1" +pytest-xdist = "^2.1.0" +PyYAML = "^5.3.1" +schema = "^0.7.3" + +[tool.poetry.dev-dependencies] +black = "^20.8b1" +flake8 = "^3.8.4" +flake8-black = "^0.2.1" +flake8-bugbear = "^20.1.4" +flake8-isort = "^4.0.0" +isort = "^5.6.1" +mypy = "^0.782" +python-language-server = "^0.35.1" +pyls-black = "^0.4.6" +pyls-isort = "^0.2.0" +pyls-mypy = "^0.1.8" +rope = "^0.18.0" +pytest-flake8 = "^1.0.6" +pytest-mypy = "^0.7.0" + +[tool.black] +line-length = 88 +target-version = ['py38'] + +[tool.isort] +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/pytest/pytest.ini b/pytest/pytest.ini new file mode 100644 index 0000000000..c80e7c884d --- /dev/null +++ b/pytest/pytest.ini @@ -0,0 +1,20 @@ +[pytest] +addopts = + --strict-markers + --self-contained-html + --capture=tee-sys + --tb=short + -rA +markers = + lisa +log_cli = true +log_cli_level = WARNING +log_cli_format = %(asctime)s %(levelname)s %(message)s +log_cli_date_format = %Y-%m-%d %H:%M:%S +render_collapsed = true +junit_logging = all +timeout = 1200 +filterwarnings = + error + ignore:unclosed:ResourceWarning + ignore:the imp module is deprecated in favour of importlib:DeprecationWarning diff --git a/pytest/selftests/__init__.py b/pytest/selftests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pytest/selftests/conftest.py b/pytest/selftests/conftest.py new file mode 100644 index 0000000000..cc7fd59999 --- /dev/null +++ b/pytest/selftests/conftest.py @@ -0,0 +1,17 @@ +from schema import Schema # type: ignore + +from target import Target + + +class Custom(Target): + schema: Schema = Schema(None) + # @property + # @classmethod + # def schema(cls) -> Schema: + # return Schema() + + def deploy(self) -> str: + return "localhost" + + def delete(self) -> None: + pass diff --git a/pytest/selftests/setup_plan/__init__.py b/pytest/selftests/setup_plan/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pytest/selftests/setup_plan/test_plan_A.py b/pytest/selftests/setup_plan/test_plan_A.py new file mode 100644 index 0000000000..5a4e049dd6 --- /dev/null +++ b/pytest/selftests/setup_plan/test_plan_A.py @@ -0,0 +1,23 @@ +import functools + +import lisa +from target import Target + +LISA = functools.partial( + lisa.LISA, platform="Custom", category="Functional", area="self-test", priority=1 +) + + +@LISA(features=["xdp"]) +def test_xdp_a(target: Target) -> None: + pass + + +@LISA(features=["gpu"]) +def test_gpu_a(target: Target) -> None: + pass + + +@LISA(features=["rdma"]) +def test_rdma_a(target: Target) -> None: + pass diff --git a/pytest/selftests/setup_plan/test_plan_B.py b/pytest/selftests/setup_plan/test_plan_B.py new file mode 100644 index 0000000000..0d89896300 --- /dev/null +++ b/pytest/selftests/setup_plan/test_plan_B.py @@ -0,0 +1,23 @@ +import functools + +import lisa +from target import Target + +LISA = functools.partial( + lisa.LISA, platform="Custom", category="Functional", area="self-test", priority=1 +) + + +@LISA(features=["xdp"]) +def test_xdp_b(target: Target) -> None: + pass + + +@LISA(features=["gpu"]) +def test_gpu_b(target: Target) -> None: + pass + + +@LISA(features=["rdma"]) +def test_rdma_b(target: Target) -> None: + pass diff --git a/pytest/selftests/setup_plan/test_plan_C.py b/pytest/selftests/setup_plan/test_plan_C.py new file mode 100644 index 0000000000..efc579fe10 --- /dev/null +++ b/pytest/selftests/setup_plan/test_plan_C.py @@ -0,0 +1,23 @@ +import functools + +import lisa +from target import Target + +LISA = functools.partial( + lisa.LISA, platform="Custom", category="Functional", area="self-test", priority=1 +) + + +@LISA(features=["xdp"]) +def test_xdp_c(target: Target) -> None: + pass + + +@LISA(features=["gpu"]) +def test_gpu_c(target: Target) -> None: + pass + + +@LISA(features=["rdma"]) +def test_rdma_c(target: Target) -> None: + pass diff --git a/pytest/selftests/test_basic.py b/pytest/selftests/test_basic.py new file mode 100644 index 0000000000..5b3012d8a4 --- /dev/null +++ b/pytest/selftests/test_basic.py @@ -0,0 +1,9 @@ +"""These tests are meant to run in a CI environment.""" +from lisa import LISA +from target import Target + + +@LISA(platform="Local", category="Functional", area="self-test", priority=1) +def test_basic(target: Target) -> None: + """Basic test which creates a Node connection to 'localhost'.""" + target.local("echo Hello World") diff --git a/pytest/target.py b/pytest/target.py new file mode 100644 index 0000000000..2608039cd5 --- /dev/null +++ b/pytest/target.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import platform +import typing +from abc import ABC, abstractmethod +from io import BytesIO +from uuid import uuid4 + +import fabric # type: ignore +import invoke # type: ignore +from invoke.runners import Result # type: ignore +from schema import Schema # type: ignore +from tenacity import retry, stop_after_attempt, wait_exponential # type: ignore + +import lisa + +if typing.TYPE_CHECKING: + from typing import Any, Mapping, Set + + +class Target(ABC): + """Extends 'fabric.Connection' with our own utilities.""" + + # Typed instance attributes, not class attributes. + parameters: Mapping[str, str] + features: Set[str] + name: str + host: str + conn: fabric.Connection + + def __init__( + self, + parameters: Mapping[str, str], + features: Set[str], + name: str = f"pytest-{uuid4()}", + ): + """If not given a name, generates one uniquely. + + Name is a unique identifier for the group of associated + resources. Features is a list of requirements such as sriov, + rdma, gpu, xdp. + + """ + # TODO: Do we need to re-validate the parameters here? + self.parameters = parameters + self.features = features + self.name = name + + # TODO: Review this thoroughly as currently it depends on + # parameters which is side-effecty. + self.host = self.deploy() + + config = lisa.config.copy() + config["run"]["env"] = { # type: ignore + # Set PATH since it’s not a login shell. + "PATH": "/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin" + } + self.connection = fabric.Connection( + self.host, config=fabric.Config(overrides=config), inline_ssh_env=True + ) + + # TODO: Use an abstract class property to ensure this is defined. + schema: Schema = Schema(None) + + # @property + # @classmethod + # @abstractmethod + # def schema(cls) -> Schema: + # """Must return the parameters schema for setup.""" + # ... + + @abstractmethod + def deploy(self) -> str: + """Must deploy the target resources and return hostname.""" + ... + + @abstractmethod + def delete(self) -> None: + """Must delete the target resources.""" + ... + + # A class attribute because it’s defined. + local_context = invoke.Context(config=invoke.Config(overrides=lisa.config)) + + @classmethod + def local(cls, *args: Any, **kwargs: Any) -> Result: + """This patches Fabric's 'local()' function to ignore SSH environment.""" + return Target.local_context.run(*args, **kwargs) + + # TODO: Refactor this. We don’t want to inherit from `Connection` + # because that’s overly complicated. Honestly we probably just + # want users to call `target.conn.run()` etc. + def run(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.run(*args, **kwargs) + + def sudo(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.sudo(*args, **kwargs) + + def get(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.get(*args, **kwargs) + + def put(self, *args: Any, **kwargs: Any) -> Result: + return self.connection.put(*args, **kwargs) + + @retry(reraise=True, wait=wait_exponential(), stop=stop_after_attempt(3)) + def ping(self, **kwargs: Any) -> Result: + """Ping the node from the local system in a cross-platform manner.""" + flag = "-c 1" if platform.system() == "Linux" else "-n 1" + return self.local(f"ping {flag} {self.host}", **kwargs) + + def cat(self, path: str) -> str: + """Gets the value of a remote file without a temporary file.""" + with BytesIO() as buf: + self.get(path, buf) + return buf.getvalue().decode("utf-8").strip() + + +class Local(Target): + schema: Schema = Schema(None) + + def deploy(self) -> str: + return "localhost" + + def delete(self) -> None: + pass diff --git a/pytest/testsuites/test_lis.py b/pytest/testsuites/test_lis.py new file mode 100644 index 0000000000..a35e6fe94e --- /dev/null +++ b/pytest/testsuites/test_lis.py @@ -0,0 +1,21 @@ +"""Runs 'LIS-Tests.xml' using Pytest.""" +from __future__ import annotations + +import typing + +if typing.TYPE_CHECKING: + from azure import Azure + +from lisa import LINUX_SCRIPTS, LISA + + +@LISA(platform="Azure", category="Functional", priority=0, area="LIS_DEPLOY") +def test_lis_driver_version(target: Azure) -> None: + """Checks that the installed drivers have the correct version.""" + # TODO: Include “utils.sh” automatically? Or something... + for f in ["utils.sh", "LIS-VERSION-CHECK.sh"]: + target.put(LINUX_SCRIPTS / f) + target.run(f"chmod +x {f}") + target.sudo("yum install -y bc") + target.run("./LIS-VERSION-CHECK.sh") + assert target.cat("state.txt") == "TestCompleted" diff --git a/pytest/testsuites/test_smoke.py b/pytest/testsuites/test_smoke.py new file mode 100644 index 0000000000..cfc911707f --- /dev/null +++ b/pytest/testsuites/test_smoke.py @@ -0,0 +1,91 @@ +"""Runs a 'smoke' test for an Azure Linux VM deployment.""" +import logging +import socket +import time + +from invoke.runners import CommandTimedOut, Result, UnexpectedExit # type: ignore +from paramiko import SSHException # type: ignore + +from azure import Azure +from lisa import LISA + + +@LISA( + platform="Azure", + category="Functional", + area="deploy", + priority=0, + sku="Standard_DS2_v2", +) +def test_smoke(target: Azure) -> None: + """Check that a VM can be deployed and is responsive. + + 1. Deploy the VM (via 'node' fixture) and log it. + 2. Ping the VM. + 3. Connect to the VM via SSH. + 4. Attempt to reboot via SSH, otherwise use the platform. + 5. Fetch the serial console logs. + + For commands where we expect a possible non-zero exit code, we + pass 'warn=True' to prevent it from throwing 'UnexpectedExit' and + we instead check its result at the end. + + SSH failures DO NOT fail this test. + + """ + logging.info("Pinging before reboot...") + ping1 = Result() + try: + ping1 = target.ping() + except UnexpectedExit: + logging.warning(f"Pinging {target.host} before reboot failed") + + ssh_errors = (TimeoutError, CommandTimedOut, SSHException, socket.error) + + try: + logging.info("SSHing before reboot...") + target.connection.open() + except ssh_errors as e: + logging.warning(f"SSH before reboot failed: '{e}'") + + reboot_exit = 0 + try: + logging.info("Rebooting...") + # If this succeeds, we should expect the exit code to be -1 + reboot_exit = target.sudo("reboot", timeout=5).exited + except ssh_errors as e: + logging.warning(f"SSH failed, using platform to reboot: '{e}'") + target.platform_restart() + except UnexpectedExit: + # TODO: How do we differentiate reboot working and the SSH + # connection disconnecting for other reasons? + if reboot_exit != -1: + logging.warning("While SSH worked, 'reboot' command failed") + + logging.info("Sleeping for 10 seconds after reboot...") + time.sleep(10) + + logging.info("Pinging after reboot...") + ping2 = Result() + try: + ping2 = target.ping() + except UnexpectedExit: + logging.warning(f"Pinging {target.host} after reboot failed") + + try: + logging.info("SSHing after reboot...") + target.connection.open() + except ssh_errors as e: + logging.warning(f"SSH after reboot failed: '{e}'") + + logging.info("Retrieving boot diagnostics...") + try: + target.get_boot_diagnostics() + except UnexpectedExit: + logging.warning("Retrieving boot diagnostics failed.") + else: + logging.info("See full report for boot diagnostics.") + + # NOTE: The test criteria is to fail only if ping fails. + assert ping1.ok + assert ping2.ok diff --git a/pytest/testsuites/test_xdp.py b/pytest/testsuites/test_xdp.py new file mode 100644 index 0000000000..b526db1f0f --- /dev/null +++ b/pytest/testsuites/test_xdp.py @@ -0,0 +1,36 @@ +"""Runs 'FunctionalTests-XDP.xml' using Pytest.""" + + +import pytest +from azure import Azure +from lisa import LINUX_SCRIPTS, LISA + + +@LISA( + platform="Azure", + category="Functional", + area="XDP", + tags=["xdp", "network", "hv_netvsc", "sriov"], + priority=0, +) +# TODO: This example is pending an update. +# setup="OneVM2NIC", +# networking="SRIOV", +# vm_image="Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest", +# vm_size="Standard_DS4_v2", +@pytest.mark.skip(reason="Not Finished") +def test_verify_xdp_compliance(target: Azure) -> None: + for f in [ + "utils.sh", + "XDPDumpSetup.sh", + "XDPUtils.sh", + "enable_passwordless_root.sh", + "enable_root.sh", + ]: + target.put(LINUX_SCRIPTS / f) + target.run(f"chmod +x {f}") + target.run("./enable_root.sh") + target.run("./enable_passwordless_root.sh") + synth_interface = target.run("source XDPUtils.sh ; get_extra_synth_nic").stdout + target.run(f"./XDPDumpSetup.sh {target.internal_address} {synth_interface}") + assert target.cat("state.txt") == "TestCompleted"