From e44843121be48de068db9bdd6d2b34a5a507abed Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Tue, 27 Nov 2018 16:10:28 -0800 Subject: [PATCH 01/49] [WIP] subprocess support Adds `trio.subprocess.Process` and `trio.subprocess.run`, async wrappers for the stdlib subprocess module. No communicate() yet due to discussion on #783. Note that this did not wind up using the code in linux_waitpid, instead running waitid() [which supports not consuming the status of the process] in a thread, in order to interoperate better with subprocess.Popen code. Still TODO: [ ] Windows support (need pipe implementations and testing) [ ] documentation writeup, newsfragment [ ] consider whether this module layout is the one we want (other option would be to move _subprocess/unix_pipes.py to just _unix_pipes.py, _subprocess/highlevel.py to _subprocess.py, and get rid of _subprocess/linux_waitpid.py) [ ] expose other subprocess functions such as call, check_call, output (simple wrappers around run()) [ ] expose a basic communicate()? --- .../subprocess-notes.txt | 0 trio/__init__.py | 1 + trio/_core/_windows_cffi.py | 6 + trio/_subprocess/__init__.py | 1 + trio/_subprocess/highlevel.py | 459 ++++++++++++++++++ trio/subprocess.py | 6 + trio/tests/subprocess/test_process.py | 247 ++++++++++ 7 files changed, 720 insertions(+) rename trio/_subprocess.py => notes-to-self/subprocess-notes.txt (100%) create mode 100644 trio/_subprocess/highlevel.py create mode 100644 trio/subprocess.py create mode 100644 trio/tests/subprocess/test_process.py diff --git a/trio/_subprocess.py b/notes-to-self/subprocess-notes.txt similarity index 100% rename from trio/_subprocess.py rename to notes-to-self/subprocess-notes.txt diff --git a/trio/__init__.py b/trio/__init__.py index e9b6f54448..7539ee9495 100644 --- a/trio/__init__.py +++ b/trio/__init__.py @@ -67,6 +67,7 @@ from . import socket from . import abc from . import ssl +from . import subprocess # Not imported by default: testing if False: from . import testing diff --git a/trio/_core/_windows_cffi.py b/trio/_core/_windows_cffi.py index 16dd9a232b..db294618bb 100644 --- a/trio/_core/_windows_cffi.py +++ b/trio/_core/_windows_cffi.py @@ -110,6 +110,12 @@ DWORD dwMilliseconds ); +HANDLE OpenProcess( + DWORD dwDesiredAccess, + BOOL bInheritHandle, + DWORD dwProcessId +); + """ # cribbed from pywincffi diff --git a/trio/_subprocess/__init__.py b/trio/_subprocess/__init__.py index e69de29bb2..9bb1c60671 100644 --- a/trio/_subprocess/__init__.py +++ b/trio/_subprocess/__init__.py @@ -0,0 +1 @@ +from .highlevel import Process, run diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py new file mode 100644 index 0000000000..7189181896 --- /dev/null +++ b/trio/_subprocess/highlevel.py @@ -0,0 +1,459 @@ +import math +import os +import subprocess +import select + +from .. import _core, BrokenResourceError +from .._sync import CapacityLimiter, Lock +from .._threads import run_sync_in_worker_thread + +__all__ = ["Process", "run"] + +# OS-specific hooks: + + +def create_pipe_to_child_stdin(): + """Create a new pipe suitable for sending data from this + process to the standard input of a child we're about to spawn. + + Returns: + A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is + something suitable for constructing a PipeSendStream around + and ``subprocess_end`` is something suitable for passing as + the ``stdin`` argument of :class:`subprocess.Popen`. + """ + raise NotImplementedError # pragma: no cover + + +def create_pipe_from_child_output(): + """Create a new pipe suitable for receiving data into this + process from the standard output or error stream of a child + we're about to spawn. + + Returns: + A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is + something suitable for constructing a PipeReceiveStream around + and ``subprocess_end`` is something suitable for passing as + the ``stdout`` or ``stderr`` argument of :class:`subprocess.Popen`. + """ + raise NotImplementedError # pragma: no cover + + +async def wait_reapable(pid): + """Block until a subprocess.Popen.wait() for the given PID would + complete immediately, without consuming the process's exit + code. Only one call for the same process may be active + simultaneously; this is not verified. + """ + raise NotImplementedError # pragma: no cover + + +if os.name == "posix": + from .unix_pipes import PipeSendStream, PipeReceiveStream + + def create_pipe_to_child_stdin(): + rfd, wfd = os.pipe() + return wfd, rfd + + def create_pipe_from_child_output(): + rfd, wfd = os.pipe() + return rfd, wfd + + if hasattr(_core, "wait_kevent"): + + async def wait_reapable(pid): + kqueue = _core.current_kqueue() + event = select.kevent( + pid, + filter=select.KQ_FILTER_PROC, + flags=select.KQ_EV_ADD | select.KQ_EV_ONESHOT, + fflags=select.KQ_NOTE_EXIT + ) + + try: + kqueue.control([event], 0) + except ProcessLookupError: + # This can happen if the process has already exited. + # Frustratingly, it does _not_ synchronize with calls + # to wait() and friends -- it's possible for kevent to + # return ESRCH but waitpid(..., WNOHANG) still returns + # nothing. And OS X doesn't support waitid() so we + # can't fall back to the Linux-style approach. So + # we'll just suppress the error, and not wait. + # Process.wait() calls us in a loop so the worst case + # is we busy-wait for a few iterations. + return + + def abort(_): + event.flags = select.KQ_EV_DELETE + kqueue.control([event], 0) + return _core.Abort.SUCCEEDED + + await _core.wait_kevent(pid, select.KQ_FILTER_PROC, abort) + + elif hasattr(os, "waitid"): + wait_limiter = CapacityLimiter(math.inf) + + async def wait_reapable(pid): + await run_sync_in_worker_thread( + os.waitid, + os.P_PID, + pid, + os.WEXITED | os.WNOWAIT, + cancellable=True, + limiter=wait_limiter + ) + + else: # pragma: no cover + raise NotImplementedError( + "subprocess reaping on UNIX requires select.kqueue or os.waitid" + ) + +elif os.name == "nt": + import msvcrt + + # TODO: implement the pipes + class PipeSendStream: + def __init__(self, handle): + raise NotImplementedError + + PipeReceiveStream = PipeSendStream + + # This isn't exported or documented, but it's also not + # underscore-prefixed, and seems kosher to use. The asyncio docs + # for 3.5 included an example that imported socketpair from + # windows_utils (before socket.socketpair existed on Windows), and + # when asyncio.windows_utils.socketpair was removed in 3.7, the + # removal was mentioned in the release notes. + from asyncio.windows_utils import pipe as windows_pipe + + def create_pipe_to_child_stdin(): + # for stdin, we want the write end (our end) to use overlapped I/O + rh, wh = windows_pipe(overlapped=(False, True)) + return wh, msvcrt.open_osfhandle(rh, os.O_RDONLY) + + def create_pipe_from_child_output(): + # for stdout/err, it's the read end that's overlapped + rh, wh = windows_pipe(overlapped=(True, False)) + return rh, msvcrt.open_osfhandle(wh, 0) + + async def wait_reapable(pid): + from .._wait_for_object import WaitForSingleObject + from .._core._windows_cffi import kernel32, raise_winerror + SYNCHRONIZE = 0x00100000 # dwDesiredAccess value + handle = kernel32.OpenProcess(SYNCHRONIZE, False, pid) + if not handle: + await _core.checkpoint() + raise_winerror() + try: + await WaitForSingleObject(handle) + finally: + kernel32.CloseHandle(handle) + +else: # pragma: no cover + raise NotImplementedError("unsupported os.name {!r}".format(os.name)) + + +def wrap_process_stream(child_fd, given_value): + """Perform any wrapping necessary to be able to use async operations + to interact with a subprocess stream. + + Args: + child_fd (0, 1, or 2): The file descriptor in the child that this + stream will be used to communicate with. + given_value (int or None): Anything accepted by the ``stdin``, + ``stdout``, or ``stderr`` kwargs to :class:`subprocess.Popen`. + For example, this could be ``None``, a file descriptor, + :data:`subprocess.PIPE`, :data:`subprocess.STDOUT`, or + :data:`subprocess.DEVNULL`. + + Returns: + A pair ``(trio_stream, subprocess_value)`` where ``trio_stream`` + is a :class:`trio.abc.SendStream` (for stdin) or + :class:`trio.abc.ReceiveStream` (for stdout/stderr) that can be + used to communicate with the child process, and + ``subprocess_value`` is the value that should be passed as the + ``stdin``, ``stdout``, or ``stderr`` argument of + :class:`subprocess.Popen` in order to set up the child end + appropriately. If ``given_value`` was not :data:`subprocess.PIPE`, + ``trio_stream`` will be ``None`` and ``subprocess_value`` will + equal ``given_value``. + """ + + if given_value == subprocess.PIPE: + maker, stream_cls = { + 0: (create_pipe_to_child_stdin, PipeSendStream), + 1: (create_pipe_from_child_output, PipeReceiveStream), + 2: (create_pipe_from_child_output, PipeReceiveStream), + }[child_fd] + + trio_end, subprocess_end = maker() + return stream_cls(trio_end), subprocess_end + + return None, given_value + + +class Process: + """Like :class:`subprocess.Popen`, but async. + + :class:`Process` has a public API identical to that of + :class:`subprocess.Popen`, except for the following differences: + + * All constructor arguments except the command to execute + must be passed as keyword arguments. + + * Text I/O is not supported: you may not use the constructor + arguments ``universal_newlines``, ``encoding``, or ``errors``. + + * :attr:`stdin` is a :class:`~trio.abc.SendStream` and + :attr:`stdout` and :attr:`stderr` are :class:`~trio.abc.ReceiveStream`s, + rather than file objects. The constructor argument ``bufsize`` is + not supported since there would be no file object to pass it to. + + * :meth:`wait` is an async function that does not take a ``timeout`` + argument; combine it with :func:`~trio.fail_after` if you want a timeout. + + * :meth:`~subprocess.Popen.communicate` does not exist due to the confusing + cancellation behavior exhibited by the stdlib version. Use :func:`run` + instead, or interact with :attr:`stdin` / :attr:`stdout` / :attr:`stderr` + directly. + + * Behavior when used as a context manager is different, as described + below. + + :class:`Process` can be used as an async context manager. It does + not block on entry; on exit it closes any pipe to the subprocess's + standard input, then blocks until the process exits. If the + blocking exit is cancelled, it kills the process and still waits + for termination before exiting the context. This is useful for + scoping the lifetime of a simple subprocess that doesn't spawn any + children of its own. (For subprocesses that do in turn spawn their + own subprocesses, there is not currently any way to clean up the + whole tree; moreover, using the :class:`Process` context manager + in such cases is likely to be counterproductive as killing the + top-level subprocess leaves it no chance to do any cleanup of its + children that might be desired.) + + """ + + universal_newlines = False + encoding = None + errors = None + + def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **kwds): + if any( + kwds.get(key) + for key in ('universal_newlines', 'encoding', 'errors') + ): + raise NotImplementedError( + "trio.Process does not support text I/O yet" + ) + if kwds.get('bufsize', -1) != -1: + raise ValueError("bufsize does not make sense for trio subprocess") + + self.stdin, stdin = wrap_process_stream(0, stdin) + self.stdout, stdout = wrap_process_stream(1, stdout) + if stderr == subprocess.STDOUT: + # If we created a pipe for stdout, pass the same pipe for + # stderr. If stdout was some non-pipe thing (DEVNULL or a + # given FD), pass the same thing. If stdout was passed as + # None, keep stderr as STDOUT to allow subprocess to dup + # our stdout. Regardless of which of these is applicable, + # don't create a new trio stream for stderr -- if stdout + # is piped, stderr will be intermixed on the stdout stream. + if stdout is not None: + stderr = stdout + self.stderr = None + else: + self.stderr, stderr = wrap_process_stream(2, stderr) + + try: + self._proc = subprocess.Popen( + args, stdin=stdin, stdout=stdout, stderr=stderr, **kwds + ) + finally: + # Close the parent's handle for each child side of a pipe; + # we want the child to have the only copy, so that when + # it exits we can read EOF on our side. + if self.stdin is not None: + os.close(stdin) + if self.stdout is not None: + os.close(stdout) + if self.stderr is not None: + os.close(stderr) + + self.args = self._proc.args + self.pid = self._proc.pid + # Lock to ensure at most one task is blocked in + # wait_reapable() for this pid at a time + self._wait_lock = Lock() + + @property + def returncode(self): + """The exit status of the process (an integer), or ``None`` if it has + not exited. + + Negative values indicate termination due to a signal (on UNIX only). + Like :attr:`subprocess.Popen.returncode`, this is not updated outside + of a call to :meth:`wait` or :meth:`poll`. + """ + return self._proc.returncode + + async def __aenter__(self): + return self + + async def __aexit__(self, *exc): + if self.stdin is not None: + with _core.open_cancel_scope(shield=True): + await self.stdin.aclose() + try: + await self.wait() + finally: + if self.returncode is None: + self.kill() + with _core.open_cancel_scope(shield=True): + await self.wait() + + async def wait(self): + """Block until the process exits. + + Returns: + The exit status of the process (a nonnegative integer, with + zero usually indicating success). On UNIX systems, a process + that exits due to a signal will have its exit status reported + as the negative of that signal number, e.g., -11 for ``SIGSEGV``. + """ + while True: + async with self._wait_lock: + if self.poll() is not None: + await _core.checkpoint() + return self.returncode + try: + await wait_reapable(self.pid) + except OSError: + # If the child is dead, suppress the exception we + # encountered waiting for it to die; maybe SIGCHLD + # is ignored so all waits fail with ECHILD, for example. + if self.poll() is not None: + return self.returncode + + # wait(2) and friends are documented as raising the errors: + # - ECHILD (always converted into a returncode of 0 by + # stdlib subprocess module) + # - EINTR (always converted into a retry by Python) + # - EINVAL (can't get unless we pass invalid flags) + # So it shouldn't actually be possible to get a failure + # here that's not swallowed by Popen.poll(). But if we + # somehow get one, we'll propagate it, I guess... + raise # pragma: no cover + + def poll(self): + """Forwards to :meth:`subprocess.Popen.poll`.""" + return self._proc.poll() + + def send_signal(self, sig): + """Forwards to :meth:`subprocess.Popen.send_signal`.""" + self._proc.send_signal(sig) + + def terminate(self): + """Forwards to :meth:`subprocess.Popen.terminate`.""" + self._proc.terminate() + + def kill(self): + """Forwards to :meth:`subprocess.Popen.kill`.""" + self._proc.kill() + + +async def run( + *popenargs, input=None, timeout=None, deadline=None, check=False, **kwargs +): + """Like :func:`subprocess.run`, but async. + + Unlike most Trio adaptations of standard library functions, this + one keeps the ``timeout`` parameter, so that it can provide you + with the process's partial output if it is killed due to a + timeout. It also adds ``deadline`` as an option if you prefer to + express your timeout absolutely. + + Returns: + A :class:`subprocess.CompletedProcess` instance describing the + return code and outputs. + + Raises: + subprocess.TimeoutExpired: if the process is killed due to timeout + expiry + subprocess.CalledProcessError: if check=True is passed and the process + exits with a nonzero exit status + OSError: if an error is encountered starting or communicating with + the process + + """ + if input is not None: + if 'stdin' in kwargs: + raise ValueError('stdin and input arguments may not both be used') + kwargs['stdin'] = subprocess.PIPE + + if timeout is not None and deadline is not None: + raise ValueError('timeout and deadline arguments may not both be used') + + stdout_chunks = [] + stderr_chunks = [] + + async with Process(*popenargs, **kwargs) as proc: + + async def feed_input(): + if input: + try: + await proc.stdin.send_all(input) + except BrokenResourceError: + pass + except OSError as e: # pragma: no cover + # According to the stdlib subprocess module, EINVAL can + # occur on Windows if the child closes its end of the + # pipe, and must be ignored. + if e.errno != errno.EINVAL: + raise + await proc.stdin.aclose() + + async def read_output(stream, chunks): + while True: + chunk = await stream.receive_some(32768) + if not chunk: + break + chunks.append(chunk) + + async with _core.open_nursery() as nursery: + if proc.stdin is not None: + nursery.start_soon(feed_input) + if proc.stdout is not None: + nursery.start_soon(read_output, proc.stdout, stdout_chunks) + if proc.stderr is not None: + nursery.start_soon(read_output, proc.stderr, stderr_chunks) + + with _core.open_cancel_scope() as wait_scope: + if timeout is not None: + wait_scope.deadline = _core.current_time() + timeout + if deadline is not None: + wait_scope.deadline = deadline + timeout = deadline - _core.current_time() + await proc.wait() + + if wait_scope.cancelled_caught: + proc.kill() + nursery.cancel_scope.cancel() + + stdout = b"".join(stdout_chunks) if proc.stdout is not None else None + stderr = b"".join(stderr_chunks) if proc.stderr is not None else None + + if wait_scope.cancelled_caught: + raise subprocess.TimeoutExpired( + proc.args, timeout, output=stdout, stderr=stderr + ) + if check and proc.returncode: + raise subprocess.CalledProcessError( + proc.returncode, proc.args, output=stdout, stderr=stderr + ) + + return subprocess.CompletedProcess( + proc.args, proc.returncode, stdout, stderr + ) diff --git a/trio/subprocess.py b/trio/subprocess.py new file mode 100644 index 0000000000..b37a9f43c5 --- /dev/null +++ b/trio/subprocess.py @@ -0,0 +1,6 @@ +from ._subprocess import Process, run +from subprocess import ( + PIPE, STDOUT, DEVNULL, CalledProcessError, SubprocessError, TimeoutExpired, + CompletedProcess +) +Popen = Process diff --git a/trio/tests/subprocess/test_process.py b/trio/tests/subprocess/test_process.py new file mode 100644 index 0000000000..6d3f8db087 --- /dev/null +++ b/trio/tests/subprocess/test_process.py @@ -0,0 +1,247 @@ +import math +import os +import random +import signal +import pytest + +from ... import _core, move_on_after, sleep, BrokenResourceError, subprocess +from ...testing import wait_all_tasks_blocked +from ..._core.tests.tutil import slow + +posix = os.name == "posix" + +pytestmark = pytest.mark.skipif(not posix, reason="no windows support yet") + + +async def test_basic(): + async with subprocess.Process(["true"]) as proc: + assert proc.returncode is None + assert proc.returncode == 0 + + +async def test_kill_when_context_cancelled(): + with _core.open_cancel_scope() as scope: + async with subprocess.Process(["sleep", "10"]) as proc: + while True: + await sleep(0.01) + assert proc.poll() is None + + # Don't set the deadline until we've done at least + # one poll, to avoid coverage flapping if starting + # the process takes a long time + if scope.deadline == +math.inf: + scope.deadline = _core.current_time() + 0.2 + + assert scope.cancelled_caught + assert proc.returncode == -signal.SIGKILL + + +COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = [ + "python", "-c", "import sys\n" + "data = sys.stdin.read()\n" + "sys.stdout.write(data)\n" + "sys.stderr.write(data[::-1])" +] + + +async def test_pipes(): + async with subprocess.Process( + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) as proc: + msg = b"the quick brown fox jumps over the lazy dog" + + async def feed_input(): + await proc.stdin.send_all(msg) + await proc.stdin.aclose() + + async def check_output(stream, expected): + seen = bytearray() + while True: + chunk = await stream.receive_some(4096) + if not chunk: + break + seen.extend(chunk) + assert seen == expected + + async with _core.open_nursery() as nursery: + # fail quickly if something is broken + nursery.cancel_scope.deadline = _core.current_time() + 3.0 + nursery.start_soon(feed_input) + nursery.start_soon(check_output, proc.stdout, msg) + nursery.start_soon(check_output, proc.stderr, msg[::-1]) + + assert not nursery.cancel_scope.cancelled_caught + assert 0 == await proc.wait() + + +async def test_run(): + data = bytes(random.randint(0, 255) for _ in range(2**18)) + + result = await subprocess.run(["cat"], input=data, stdout=subprocess.PIPE) + assert result.args == ["cat"] + assert result.returncode == 0 + assert result.stdout == data + assert result.stderr is None + + result = await subprocess.run( + ["cat"], stdin=subprocess.PIPE, stdout=subprocess.PIPE + ) + assert result.args == ["cat"] + assert result.returncode == 0 + assert result.stdout == b"" + assert result.stderr is None + + result = await subprocess.run( + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, + input=data, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + assert result.args == COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR + assert result.returncode == 0 + assert result.stdout == data + assert result.stderr == data[::-1] + + with pytest.raises(ValueError): + # can't use both input and stdin + await subprocess.run( + ["cat"], input=b"la di dah", stdin=subprocess.PIPE + ) + + with pytest.raises(ValueError): + # can't use both timeout and deadline + await subprocess.run( + ["true"], timeout=1, deadline=_core.current_time() + ) + + +async def test_run_timeout(): + data = b"1" * 65536 + b"2" * 65536 + b"3" * 65536 + child_script = """ +import sys, time +sys.stdout.write(sys.stdin.read(32768)) +time.sleep(10) +sys.stdout.write(sys.stdin.read()) +""" + + with pytest.raises(subprocess.TimeoutExpired) as excinfo: + await subprocess.run( + ["python", "-c", child_script], + input=data, + stdout=subprocess.PIPE, + timeout=0.5, + ) + assert excinfo.value.cmd == ["python", "-c", child_script] + assert excinfo.value.timeout == 0.5 + assert excinfo.value.stdout == data[:32768] + assert excinfo.value.stderr is None + + +async def test_run_check(): + with pytest.raises(subprocess.CalledProcessError) as excinfo: + await subprocess.run( + "echo test >&2; false", + shell=True, + stderr=subprocess.PIPE, + check=True, + ) + assert excinfo.value.cmd == "echo test >&2; false" + assert excinfo.value.returncode == 1 + assert excinfo.value.stderr == b"test\n" + assert excinfo.value.stdout is None + + +async def test_run_with_broken_pipe(): + result = await subprocess.run( + ["python", "-c", "import sys; sys.stdin.close()"], + input=b"x" * 131072, + stdout=subprocess.PIPE, + ) + assert result.returncode == 0 + assert result.stdout == b"" + assert result.stderr is None + + +async def test_stderr_stdout(): + async with subprocess.Process( + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) as proc: + assert proc.stdout is not None + assert proc.stderr is None + + result = await subprocess.run( + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, + input=b"1234", + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + assert result.returncode == 0 + assert result.stdout == b"43211234" + assert result.stderr is None + + try: + r, w = os.pipe() + + async with subprocess.Process( + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, + stdin=subprocess.PIPE, + stdout=w, + stderr=subprocess.STDOUT, + ) as proc: + os.close(w) + assert proc.stdout is None + assert proc.stderr is None + await proc.stdin.send_all(b"1234") + await proc.stdin.aclose() + assert await proc.wait() == 0 + assert os.read(r, 4096) == b"43211234" + assert os.read(r, 4096) == b"" + finally: + os.close(r) + + +async def test_errors(): + with pytest.raises(NotImplementedError): + subprocess.Process(["ls"], universal_newlines=True) + with pytest.raises(ValueError): + subprocess.Process(["ls"], bufsize=4096) + + +async def test_signals(): + async def test_one_signal(send_it, signum): + with move_on_after(1.0) as scope: + async with subprocess.Process(["sleep", "3600"]) as proc: + send_it(proc) + assert not scope.cancelled_caught + assert proc.returncode == -signum + + await test_one_signal(subprocess.Process.kill, signal.SIGKILL) + await test_one_signal(subprocess.Process.terminate, signal.SIGTERM) + await test_one_signal( + lambda proc: proc.send_signal(signal.SIGINT), signal.SIGINT + ) + + +async def test_wait_reapable_fails(): + old_sigchld = signal.signal(signal.SIGCHLD, signal.SIG_IGN) + try: + # With SIGCHLD disabled, the wait() syscall will wait for the + # process to exit but then fail with ECHILD. Process.wait() + # has logic that suppresses wait() errors if the process has + # in fact exited; this test tries to exercise that logic. + async with subprocess.Process(["sleep", "3600"]) as proc: + async with _core.open_nursery() as nursery: + nursery.start_soon(proc.wait) + await wait_all_tasks_blocked() + proc.kill() + nursery.cancel_scope.deadline = _core.current_time() + 1.0 + assert not nursery.cancel_scope.cancelled_caught + assert proc.returncode == 0 # exit status unknowable, so... + finally: + signal.signal(signal.SIGCHLD, old_sigchld) From 7a8f9180186f80d5bad1d7fdbf57640d6a04a3c2 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Tue, 27 Nov 2018 16:37:17 -0800 Subject: [PATCH 02/49] flake8 doesn't like redefinitions so move the OS-specific method docstrings into comments --- trio/_subprocess/highlevel.py | 64 +++++++++++++++-------------------- 1 file changed, 27 insertions(+), 37 deletions(-) diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py index 7189181896..c0d67693d8 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess/highlevel.py @@ -10,43 +10,33 @@ __all__ = ["Process", "run"] # OS-specific hooks: - - -def create_pipe_to_child_stdin(): - """Create a new pipe suitable for sending data from this - process to the standard input of a child we're about to spawn. - - Returns: - A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is - something suitable for constructing a PipeSendStream around - and ``subprocess_end`` is something suitable for passing as - the ``stdin`` argument of :class:`subprocess.Popen`. - """ - raise NotImplementedError # pragma: no cover - - -def create_pipe_from_child_output(): - """Create a new pipe suitable for receiving data into this - process from the standard output or error stream of a child - we're about to spawn. - - Returns: - A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is - something suitable for constructing a PipeReceiveStream around - and ``subprocess_end`` is something suitable for passing as - the ``stdout`` or ``stderr`` argument of :class:`subprocess.Popen`. - """ - raise NotImplementedError # pragma: no cover - - -async def wait_reapable(pid): - """Block until a subprocess.Popen.wait() for the given PID would - complete immediately, without consuming the process's exit - code. Only one call for the same process may be active - simultaneously; this is not verified. - """ - raise NotImplementedError # pragma: no cover - +# +# create_pipe_to_child_stdin() -> (int, int): +# Create a new pipe suitable for sending data from this +# process to the standard input of a child we're about to spawn. +# +# Returns: +# A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is +# something suitable for constructing a PipeSendStream around +# and ``subprocess_end`` is something suitable for passing as +# the ``stdin`` argument of :class:`subprocess.Popen`. +# +# create_pipe_from_child_output() -> (int, int): +# Create a new pipe suitable for receiving data into this +# process from the standard output or error stream of a child +# we're about to spawn. +# +# Returns: +# A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is +# something suitable for constructing a PipeReceiveStream around +# and ``subprocess_end`` is something suitable for passing as +# the ``stdout`` or ``stderr`` argument of :class:`subprocess.Popen`. +# +# wait_reapable(pid: int) -> None: +# Block until a subprocess.Popen.wait() for the given PID would +# complete immediately, without consuming the process's exit +# code. Only one call for the same process may be active +# simultaneously; this is not verified. if os.name == "posix": from .unix_pipes import PipeSendStream, PipeReceiveStream From 581554b7a2707372f50cc2ff44ec0174faa2f626 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Tue, 27 Nov 2018 16:40:07 -0800 Subject: [PATCH 03/49] don't assume executing 'python' invokes python 2 --- trio/tests/subprocess/test_process.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/trio/tests/subprocess/test_process.py b/trio/tests/subprocess/test_process.py index 6d3f8db087..f59098fb7f 100644 --- a/trio/tests/subprocess/test_process.py +++ b/trio/tests/subprocess/test_process.py @@ -2,6 +2,7 @@ import os import random import signal +import sys import pytest from ... import _core, move_on_after, sleep, BrokenResourceError, subprocess @@ -37,10 +38,10 @@ async def test_kill_when_context_cancelled(): COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = [ - "python", "-c", "import sys\n" - "data = sys.stdin.read()\n" - "sys.stdout.write(data)\n" - "sys.stderr.write(data[::-1])" + sys.executable, "-c", "import sys\n" + "data = sys.stdin.buffer.read()\n" + "sys.stdout.buffer.write(data)\n" + "sys.stderr.buffer.write(data[::-1])" ] @@ -122,19 +123,19 @@ async def test_run_timeout(): data = b"1" * 65536 + b"2" * 65536 + b"3" * 65536 child_script = """ import sys, time -sys.stdout.write(sys.stdin.read(32768)) +sys.stdout.buffer.write(sys.stdin.buffer.read(32768)) time.sleep(10) -sys.stdout.write(sys.stdin.read()) +sys.stdout.buffer.write(sys.stdin.buffer.read()) """ with pytest.raises(subprocess.TimeoutExpired) as excinfo: await subprocess.run( - ["python", "-c", child_script], + [sys.executable, "-c", child_script], input=data, stdout=subprocess.PIPE, timeout=0.5, ) - assert excinfo.value.cmd == ["python", "-c", child_script] + assert excinfo.value.cmd == [sys.executable, "-c", child_script] assert excinfo.value.timeout == 0.5 assert excinfo.value.stdout == data[:32768] assert excinfo.value.stderr is None @@ -156,7 +157,7 @@ async def test_run_check(): async def test_run_with_broken_pipe(): result = await subprocess.run( - ["python", "-c", "import sys; sys.stdin.close()"], + [sys.executable, "-c", "import sys; sys.stdin.close()"], input=b"x" * 131072, stdout=subprocess.PIPE, ) @@ -182,7 +183,7 @@ async def test_stderr_stdout(): stderr=subprocess.STDOUT, ) assert result.returncode == 0 - assert result.stdout == b"43211234" + assert result.stdout == b"12344321" assert result.stderr is None try: @@ -200,7 +201,7 @@ async def test_stderr_stdout(): await proc.stdin.send_all(b"1234") await proc.stdin.aclose() assert await proc.wait() == 0 - assert os.read(r, 4096) == b"43211234" + assert os.read(r, 4096) == b"12344321" assert os.read(r, 4096) == b"" finally: os.close(r) From 6d2bc090424d06112fec026fb3aa3cacc13ef265 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Tue, 27 Nov 2018 17:34:48 -0800 Subject: [PATCH 04/49] fix pypy issues --- trio/_subprocess/highlevel.py | 71 +++++++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 15 deletions(-) diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py index c0d67693d8..a218ead62a 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess/highlevel.py @@ -53,15 +53,22 @@ def create_pipe_from_child_output(): async def wait_reapable(pid): kqueue = _core.current_kqueue() - event = select.kevent( + try: + from select import KQ_NOTE_EXIT + except ImportError: + # pypy doesn't define KQ_NOTE_EXIT + KQ_NOTE_EXIT = 0x80000000 + make_event = lambda flags: select.kevent( pid, filter=select.KQ_FILTER_PROC, - flags=select.KQ_EV_ADD | select.KQ_EV_ONESHOT, - fflags=select.KQ_NOTE_EXIT + flags=flags, + fflags=KQ_NOTE_EXIT ) try: - kqueue.control([event], 0) + kqueue.control( + [make_event(select.KQ_EV_ADD | select.KQ_EV_ONESHOT)], 0 + ) except ProcessLookupError: # This can happen if the process has already exited. # Frustratingly, it does _not_ synchronize with calls @@ -75,30 +82,64 @@ async def wait_reapable(pid): return def abort(_): - event.flags = select.KQ_EV_DELETE - kqueue.control([event], 0) + kqueue.control([make_event(select.KQ_EV_DELETE)], 0) return _core.Abort.SUCCEEDED await _core.wait_kevent(pid, select.KQ_FILTER_PROC, abort) - elif hasattr(os, "waitid"): + else: wait_limiter = CapacityLimiter(math.inf) + try: + from os import waitid + + def sync_wait_reapable(pid): + waitid(os.P_PID, pid, os.WEXITED | os.WNOWAIT) + except ImportError: + # pypy doesn't define os.waitid so we need to pull it + # out ourselves using cffi + import cffi + waitid_ffi = cffi.FFI() + waitid_ffi.cdef( + """ +typedef struct siginfo_s { + int si_signo; + int si_errno; + int si_code; + int si_pid; + int si_uid; + int si_status; + int pad[26]; +} siginfo_t; +int waitid(int idtype, int id, siginfo_t* result, int options); +""" + ) + waitid = waitid_ffi.dlopen(None).waitid + + def sync_wait_reapable(pid): + P_PID = 1 + WEXITED = 0x00000004 + if sys.platform == 'darwin': + # waitid() is not exposed on Python on Darwin but does + # work through CFFI; note that we typically won't get + # here since Darwin also defines kqueue + WNOWAIT = 0x00000020 + else: + WNOWAIT = 0x01000000 + result = waitid_ffi.new("siginfo_t *") + rc = waitid(P_PID, pid, result, WEXITED | WNOWAIT) + if rc < 0: + errno = waitid_ffi.errno + raise OSError(errno, os.strerror(errno)) + async def wait_reapable(pid): await run_sync_in_worker_thread( - os.waitid, - os.P_PID, + sync_wait_reapable, pid, - os.WEXITED | os.WNOWAIT, cancellable=True, limiter=wait_limiter ) - else: # pragma: no cover - raise NotImplementedError( - "subprocess reaping on UNIX requires select.kqueue or os.waitid" - ) - elif os.name == "nt": import msvcrt From 188548727b2ed46af07c799cfb006fd58274c57d Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Tue, 27 Nov 2018 17:47:03 -0800 Subject: [PATCH 05/49] fix pypy harder --- trio/_subprocess/highlevel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py index a218ead62a..5de90401dd 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess/highlevel.py @@ -1,7 +1,8 @@ import math import os -import subprocess import select +import subprocess +import sys from .. import _core, BrokenResourceError from .._sync import CapacityLimiter, Lock From 3807c64dcb028a616f894706183eb6484a22f215 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 28 Nov 2018 14:10:27 -0800 Subject: [PATCH 06/49] add links to pypy issues --- trio/_subprocess/highlevel.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py index 5de90401dd..35c69c243a 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess/highlevel.py @@ -57,7 +57,9 @@ async def wait_reapable(pid): try: from select import KQ_NOTE_EXIT except ImportError: - # pypy doesn't define KQ_NOTE_EXIT + # pypy doesn't define KQ_NOTE_EXIT: + # https://bitbucket.org/pypy/pypy/issues/2921/ + # I verified this value against both Darwin and FreeBSD KQ_NOTE_EXIT = 0x80000000 make_event = lambda flags: select.kevent( pid, @@ -97,8 +99,8 @@ def abort(_): def sync_wait_reapable(pid): waitid(os.P_PID, pid, os.WEXITED | os.WNOWAIT) except ImportError: - # pypy doesn't define os.waitid so we need to pull it - # out ourselves using cffi + # pypy doesn't define os.waitid so we need to pull it out ourselves + # using cffi: https://bitbucket.org/pypy/pypy/issues/2922/ import cffi waitid_ffi = cffi.FFI() waitid_ffi.cdef( From 8d5f5a10b8304c94cbf6df09e1506bf99f4f0991 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 28 Nov 2018 14:36:18 -0800 Subject: [PATCH 07/49] add call(), check_call(), check_output(), fix some coverage gaps --- trio/_subprocess/__init__.py | 2 +- trio/_subprocess/highlevel.py | 45 +++++++++++++++++++- trio/subprocess.py | 2 +- trio/tests/subprocess/test_process.py | 60 ++++++++++++++++++++++----- 4 files changed, 94 insertions(+), 15 deletions(-) diff --git a/trio/_subprocess/__init__.py b/trio/_subprocess/__init__.py index 9bb1c60671..216b95bd3e 100644 --- a/trio/_subprocess/__init__.py +++ b/trio/_subprocess/__init__.py @@ -1 +1 @@ -from .highlevel import Process, run +from .highlevel import Process, run, call, check_call, check_output diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py index 35c69c243a..b6aa290129 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess/highlevel.py @@ -122,7 +122,7 @@ def sync_wait_reapable(pid): def sync_wait_reapable(pid): P_PID = 1 WEXITED = 0x00000004 - if sys.platform == 'darwin': + if sys.platform == 'darwin': # pragma: no cover # waitid() is not exposed on Python on Darwin but does # work through CFFI; note that we typically won't get # here since Darwin also defines kqueue @@ -407,7 +407,9 @@ async def run( one keeps the ``timeout`` parameter, so that it can provide you with the process's partial output if it is killed due to a timeout. It also adds ``deadline`` as an option if you prefer to - express your timeout absolutely. + express your timeout absolutely. If you don't care about preserving + partial output on a timeout, you can of course also nest run() + inside a normal Trio cancel scope. Returns: A :class:`subprocess.CompletedProcess` instance describing the @@ -491,3 +493,42 @@ async def read_output(stream, chunks): return subprocess.CompletedProcess( proc.args, proc.returncode, stdout, stderr ) + + +async def call(*popenargs, **kwargs): + """Like :func:`subprocess.call`, but async.""" + async with Process(*popenargs, **kwargs) as proc: + return await proc.wait() + + +async def check_call(*popenargs, **kwargs): + """Like :func:`subprocess.check_call`, but async.""" + async with Process(*popenargs, **kwargs) as proc: + retcode = await proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, proc.args) + return 0 + + +async def check_output(*popenargs, timeout=None, deadline=None, **kwargs): + """Like :func:`subprocess.check_output`, but async. + + Like :func:`run`, this takes an optional ``timeout`` or ``deadline`` + argument; if the timeout expires or deadline passes, the child process + will be killed and its partial output wrapped up in a + :exc:`subprocess.TimeoutExpired` exception. You can also nest + :func:`check_output` in a normal Trio cancel scope to impose + a timeout without capturing partial output. + """ + if 'stdout' in kwargs: + raise ValueError("stdout argument not allowed, it will be overridden.") + + result = await run( + *popenargs, + stdout=subprocess.PIPE, + timeout=timeout, + deadline=deadline, + check=True, + **kwargs + ) + return result.stdout diff --git a/trio/subprocess.py b/trio/subprocess.py index b37a9f43c5..86b3d9c5e9 100644 --- a/trio/subprocess.py +++ b/trio/subprocess.py @@ -1,4 +1,4 @@ -from ._subprocess import Process, run +from ._subprocess import Process, run, call, check_call, check_output from subprocess import ( PIPE, STDOUT, DEVNULL, CalledProcessError, SubprocessError, TimeoutExpired, CompletedProcess diff --git a/trio/tests/subprocess/test_process.py b/trio/tests/subprocess/test_process.py index f59098fb7f..6e0965d1c2 100644 --- a/trio/tests/subprocess/test_process.py +++ b/trio/tests/subprocess/test_process.py @@ -128,17 +128,46 @@ async def test_run_timeout(): sys.stdout.buffer.write(sys.stdin.buffer.read()) """ - with pytest.raises(subprocess.TimeoutExpired) as excinfo: - await subprocess.run( - [sys.executable, "-c", child_script], - input=data, - stdout=subprocess.PIPE, - timeout=0.5, - ) - assert excinfo.value.cmd == [sys.executable, "-c", child_script] - assert excinfo.value.timeout == 0.5 - assert excinfo.value.stdout == data[:32768] - assert excinfo.value.stderr is None + for make_timeout_arg in ( + lambda: {"timeout": 0.25}, + lambda: {"deadline": _core.current_time() + 0.25} + ): + with pytest.raises(subprocess.TimeoutExpired) as excinfo: + await subprocess.run( + [sys.executable, "-c", child_script], + input=data, + stdout=subprocess.PIPE, + **make_timeout_arg() + ) + assert excinfo.value.cmd == [sys.executable, "-c", child_script] + if "timeout" in make_timeout_arg(): + assert excinfo.value.timeout == 0.25 + else: + assert 0.2 < excinfo.value.timeout < 0.3 + assert excinfo.value.stdout == data[:32768] + assert excinfo.value.stderr is None + + +async def test_call_and_check(): + assert await subprocess.call(["true"]) == 0 + assert await subprocess.call(["false"]) == 1 + + assert await subprocess.check_call(["true"]) == 0 + with pytest.raises(subprocess.CalledProcessError) as excinfo: + await subprocess.check_call(["false"]) + assert excinfo.value.returncode == 1 + assert excinfo.value.cmd == ["false"] + assert excinfo.value.output is None + + assert await subprocess.check_output(["echo", "hi"]) == b"hi\n" + with pytest.raises(subprocess.CalledProcessError) as excinfo: + await subprocess.check_output("echo lo; false", shell=True) + assert excinfo.value.returncode == 1 + assert excinfo.value.cmd == "echo lo; false" + assert excinfo.value.output == b"lo\n" + + with pytest.raises(ValueError): + await subprocess.check_output(["true"], stdout=subprocess.DEVNULL) async def test_run_check(): @@ -186,6 +215,15 @@ async def test_stderr_stdout(): assert result.stdout == b"12344321" assert result.stderr is None + # this one hits the branch where stderr=STDOUT but stdout + # is not redirected + result = await subprocess.run( + ["cat"], input=b"", stderr=subprocess.STDOUT + ) + assert result.returncode == 0 + assert result.stdout is None + assert result.stderr is None + try: r, w = os.pipe() From 701655ba0ae97ea244968d480a9953587b46b94d Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 28 Nov 2018 14:46:23 -0800 Subject: [PATCH 08/49] add a comment about the dangerous-looking siginfo_t layout assumption --- trio/_subprocess/highlevel.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess/highlevel.py index b6aa290129..de96501984 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess/highlevel.py @@ -103,6 +103,11 @@ def sync_wait_reapable(pid): # using cffi: https://bitbucket.org/pypy/pypy/issues/2922/ import cffi waitid_ffi = cffi.FFI() + # Believe it or not, siginfo_t starts with fields in the + # same layout on both Linux and Darwin. The Linux structure + # is bigger so that's what we use to size `pad`; while + # there are a few extra fields in there, most of it is + # true padding which would not be written by the syscall. waitid_ffi.cdef( """ typedef struct siginfo_s { From 51d6a5014e66e712589bf5c4251043037f9a0833 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 28 Nov 2018 14:47:28 -0800 Subject: [PATCH 09/49] yapfify --- trio/tests/subprocess/test_process.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/trio/tests/subprocess/test_process.py b/trio/tests/subprocess/test_process.py index 6e0965d1c2..683426386f 100644 --- a/trio/tests/subprocess/test_process.py +++ b/trio/tests/subprocess/test_process.py @@ -217,9 +217,7 @@ async def test_stderr_stdout(): # this one hits the branch where stderr=STDOUT but stdout # is not redirected - result = await subprocess.run( - ["cat"], input=b"", stderr=subprocess.STDOUT - ) + result = await subprocess.run(["cat"], input=b"", stderr=subprocess.STDOUT) assert result.returncode == 0 assert result.stdout is None assert result.stderr is None From 756e55b6ee2a76c47f5f2db68281b964bec7c8e9 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 12 Dec 2018 19:50:26 -0500 Subject: [PATCH 10/49] Explode _subprocess package into its constituent parts - _subprocess/highlevel.py becomes _subprocess.py - _subprocess/unix_pipes.py becomes _unix_pipes.py - _subprocess/linux_waitpid.py removed (obsoleted by use of `waitid`) --- .../highlevel.py => _subprocess.py} | 10 ++-- trio/_subprocess/__init__.py | 1 - trio/_subprocess/linux_waitpid.py | 47 ------------------- .../unix_pipes.py => _unix_pipes.py} | 8 ++-- trio/tests/subprocess/__init__.py | 0 trio/tests/subprocess/test_waitpid_linux.py | 39 --------------- .../test_process.py => test_subprocess.py} | 5 +- .../tests/{subprocess => }/test_unix_pipes.py | 10 ++-- 8 files changed, 15 insertions(+), 105 deletions(-) rename trio/{_subprocess/highlevel.py => _subprocess.py} (98%) delete mode 100644 trio/_subprocess/__init__.py delete mode 100644 trio/_subprocess/linux_waitpid.py rename trio/{_subprocess/unix_pipes.py => _unix_pipes.py} (94%) delete mode 100644 trio/tests/subprocess/__init__.py delete mode 100644 trio/tests/subprocess/test_waitpid_linux.py rename trio/tests/{subprocess/test_process.py => test_subprocess.py} (98%) rename trio/tests/{subprocess => }/test_unix_pipes.py (93%) diff --git a/trio/_subprocess/highlevel.py b/trio/_subprocess.py similarity index 98% rename from trio/_subprocess/highlevel.py rename to trio/_subprocess.py index de96501984..e7e91c8f36 100644 --- a/trio/_subprocess/highlevel.py +++ b/trio/_subprocess.py @@ -4,9 +4,9 @@ import subprocess import sys -from .. import _core, BrokenResourceError -from .._sync import CapacityLimiter, Lock -from .._threads import run_sync_in_worker_thread +from . import _core +from ._sync import CapacityLimiter, Lock +from ._threads import run_sync_in_worker_thread __all__ = ["Process", "run"] @@ -40,7 +40,7 @@ # simultaneously; this is not verified. if os.name == "posix": - from .unix_pipes import PipeSendStream, PipeReceiveStream + from ._unix_pipes import PipeSendStream, PipeReceiveStream def create_pipe_to_child_stdin(): rfd, wfd = os.pipe() @@ -446,7 +446,7 @@ async def feed_input(): if input: try: await proc.stdin.send_all(input) - except BrokenResourceError: + except _core.BrokenResourceError: pass except OSError as e: # pragma: no cover # According to the stdlib subprocess module, EINVAL can diff --git a/trio/_subprocess/__init__.py b/trio/_subprocess/__init__.py deleted file mode 100644 index 216b95bd3e..0000000000 --- a/trio/_subprocess/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .highlevel import Process, run, call, check_call, check_output diff --git a/trio/_subprocess/linux_waitpid.py b/trio/_subprocess/linux_waitpid.py deleted file mode 100644 index b9319865ec..0000000000 --- a/trio/_subprocess/linux_waitpid.py +++ /dev/null @@ -1,47 +0,0 @@ -import attr -import functools -import math -import os -import outcome -from typing import Any - -from .. import _core -from .._sync import CapacityLimiter, Event -from .._threads import run_sync_in_worker_thread - - -@attr.s -class WaitpidState: - pid = attr.ib() - event = attr.ib(default=attr.Factory(Event)) - outcome = attr.ib(default=None) - - -waitpid_limiter = CapacityLimiter(math.inf) - - -# adapted from -# https://github.com/python-trio/trio/issues/4#issuecomment-398967572 -async def _task(state: WaitpidState) -> None: - """The waitpid thread runner task. This must be spawned as a system - task.""" - partial = functools.partial( - os.waitpid, # function - state.pid, # pid - 0 # no options - ) - - tresult = await run_sync_in_worker_thread( - outcome.capture, partial, cancellable=True, limiter=waitpid_limiter - ) - state.outcome = tresult - state.event.set() - - -async def waitpid(pid: int) -> Any: - """Waits for a child process with the specified PID to finish running.""" - waiter = WaitpidState(pid=pid) - _core.spawn_system_task(_task, waiter) - - await waiter.event.wait() - return waiter.outcome.unwrap() diff --git a/trio/_subprocess/unix_pipes.py b/trio/_unix_pipes.py similarity index 94% rename from trio/_subprocess/unix_pipes.py rename to trio/_unix_pipes.py index 7d7c94891e..629d66da3a 100644 --- a/trio/_subprocess/unix_pipes.py +++ b/trio/_unix_pipes.py @@ -2,8 +2,8 @@ import os from typing import Tuple -from .. import _core, BrokenResourceError -from .._abc import SendStream, ReceiveStream +from . import _core +from ._abc import SendStream, ReceiveStream __all__ = ["PipeSendStream", "PipeReceiveStream", "make_pipe"] @@ -65,7 +65,7 @@ async def send_all(self, data: bytes): total_sent += os.write(self._pipe, remaining) except BrokenPipeError as e: await _core.checkpoint() - raise BrokenResourceError from e + raise _core.BrokenResourceError from e except BlockingIOError: await self.wait_send_all_might_not_block() @@ -82,7 +82,7 @@ async def wait_send_all_might_not_block(self) -> None: # also doesn't checkpoint so we have to do that # ourselves here too await _core.checkpoint() - raise BrokenResourceError from e + raise _core.BrokenResourceError from e class PipeReceiveStream(_PipeMixin, ReceiveStream): diff --git a/trio/tests/subprocess/__init__.py b/trio/tests/subprocess/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trio/tests/subprocess/test_waitpid_linux.py b/trio/tests/subprocess/test_waitpid_linux.py deleted file mode 100644 index 53a37c61a1..0000000000 --- a/trio/tests/subprocess/test_waitpid_linux.py +++ /dev/null @@ -1,39 +0,0 @@ -import sys - -import os -import pytest -import signal - -from ... import _core -from ..._subprocess.linux_waitpid import waitpid - -pytestmark = pytest.mark.skipif( - sys.platform != "linux", reason="linux waitpid only works on linux" -) - - -async def test_waitpid(): - pid = os.spawnvp(os.P_NOWAIT, "/bin/false", ("false",)) - result = await waitpid(pid) - # exit code is a 16-bit int: (code, signal) - assert result[0] == pid - assert os.WIFEXITED(result[1]) and os.WEXITSTATUS(result[1]) == 1 - - pid2 = os.spawnvp(os.P_NOWAIT, "/bin/true", ("true",)) - result = await waitpid(pid2) - assert result[0] == pid2 - assert os.WIFEXITED(result[1]) and os.WEXITSTATUS(result[1]) == 0 - - pid3 = os.spawnvp(os.P_NOWAIT, "/bin/sleep", ("/bin/sleep", "5")) - os.kill(pid3, signal.SIGKILL) - result = await waitpid(pid3) - assert result[0] == pid3 - status = os.WTERMSIG(result[1]) - assert os.WIFSIGNALED(result[1]) and status == 9 - - -async def test_waitpid_no_process(): - with pytest.raises(ChildProcessError): - # this PID does exist, but it's ourselves - # which doesn't work - await waitpid(os.getpid()) diff --git a/trio/tests/subprocess/test_process.py b/trio/tests/test_subprocess.py similarity index 98% rename from trio/tests/subprocess/test_process.py rename to trio/tests/test_subprocess.py index 683426386f..3f2e2af724 100644 --- a/trio/tests/subprocess/test_process.py +++ b/trio/tests/test_subprocess.py @@ -5,9 +5,8 @@ import sys import pytest -from ... import _core, move_on_after, sleep, BrokenResourceError, subprocess -from ...testing import wait_all_tasks_blocked -from ..._core.tests.tutil import slow +from .. import _core, move_on_after, sleep, subprocess +from ..testing import wait_all_tasks_blocked posix = os.name == "posix" diff --git a/trio/tests/subprocess/test_unix_pipes.py b/trio/tests/test_unix_pipes.py similarity index 93% rename from trio/tests/subprocess/test_unix_pipes.py rename to trio/tests/test_unix_pipes.py index b8cc2d496a..48afe4cbd7 100644 --- a/trio/tests/subprocess/test_unix_pipes.py +++ b/trio/tests/test_unix_pipes.py @@ -4,9 +4,9 @@ import os import pytest -from trio._core.tests.tutil import gc_collect_harder -from ... import _core -from ...testing import (wait_all_tasks_blocked, check_one_way_stream) +from .._core.tests.tutil import gc_collect_harder +from .. import _core +from ..testing import wait_all_tasks_blocked, check_one_way_stream posix = os.name == "posix" @@ -15,9 +15,7 @@ ) if posix: - from ..._subprocess.unix_pipes import ( - PipeSendStream, PipeReceiveStream, make_pipe - ) + from .._unix_pipes import PipeSendStream, PipeReceiveStream, make_pipe async def test_send_pipe(): From 3720fe714fe630b17767c937e9491a1db22e734d Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 12 Dec 2018 22:21:46 -0500 Subject: [PATCH 11/49] split off wait_reapable into a separate module --- trio/_platform/__init__.py | 34 +++++++++ trio/_platform/kqueue.py | 40 ++++++++++ trio/_platform/waitid.py | 106 ++++++++++++++++++++++++++ trio/_platform/windows.py | 14 ++++ trio/_subprocess.py | 147 ++----------------------------------- 5 files changed, 199 insertions(+), 142 deletions(-) create mode 100644 trio/_platform/__init__.py create mode 100644 trio/_platform/kqueue.py create mode 100644 trio/_platform/waitid.py create mode 100644 trio/_platform/windows.py diff --git a/trio/_platform/__init__.py b/trio/_platform/__init__.py new file mode 100644 index 0000000000..35ca558fba --- /dev/null +++ b/trio/_platform/__init__.py @@ -0,0 +1,34 @@ +# Platform-specific logic, currently mostly for subprocesses. + +import os +import sys + +from .. import _core + + +# Fallback versions of the functions provided -- implementations +# per OS are imported atop these at the bottom of the module. + +async def wait_child_exiting(pid: int) -> None: + """Block until the child process with PID ``pid`` is exiting. + + When this function returns, it indicates that a call to + :meth:`os.waitpid` or similar is likely to immediately succeed in + returning ``pid``'s exit status, or to immediately fail because + ``pid`` doesn't exist. The actual exit status is not consumed; + you need to do that yourself. (On some platforms, a wait with + :const:`os.WNOHANG` might not succeed right away, but a blocking + wait won't block for a noticeable amount of time.) + """ + raise NotImplementedError from wait_child_exiting._error + + +try: + if os.name == "nt": + from .windows import wait_child_exiting + elif hasattr(_core, "wait_kevent"): + from .kqueue import wait_child_exiting + else: + from .waitid import wait_child_exiting +except ImportError as ex: + wait_child_exiting._error = ex diff --git a/trio/_platform/kqueue.py b/trio/_platform/kqueue.py new file mode 100644 index 0000000000..997422b59c --- /dev/null +++ b/trio/_platform/kqueue.py @@ -0,0 +1,40 @@ +import select +from .. import _core + +async def wait_child_exiting(pid): + kqueue = _core.current_kqueue() + try: + from select import KQ_NOTE_EXIT + except ImportError: + # pypy doesn't define KQ_NOTE_EXIT: + # https://bitbucket.org/pypy/pypy/issues/2921/ + # I verified this value against both Darwin and FreeBSD + KQ_NOTE_EXIT = 0x80000000 + + make_event = lambda flags: select.kevent( + pid, + filter=select.KQ_FILTER_PROC, + flags=flags, + fflags=KQ_NOTE_EXIT + ) + + try: + kqueue.control( + [make_event(select.KQ_EV_ADD | select.KQ_EV_ONESHOT)], 0 + ) + except ProcessLookupError: + # This can happen if the process has already exited. + # Frustratingly, it does _not_ synchronize with calls + # to wait() and friends -- it's possible for kevent to + # return ESRCH but waitpid(..., WNOHANG) still returns + # nothing. And OS X doesn't support waitid() so we + # can't fall back to the Linux-style approach. So + # we'll just suppress the error, and let the caller + # assume their wait won't block for long. + return + + def abort(_): + kqueue.control([make_event(select.KQ_EV_DELETE)], 0) + return _core.Abort.SUCCEEDED + + await _core.wait_kevent(pid, select.KQ_FILTER_PROC, abort) diff --git a/trio/_platform/waitid.py b/trio/_platform/waitid.py new file mode 100644 index 0000000000..867b87e126 --- /dev/null +++ b/trio/_platform/waitid.py @@ -0,0 +1,106 @@ +import math +import os + +from .. import _core +from .._sync import CapacityLimiter, Event +from .._threads import run_sync_in_worker_thread + +try: + from os import waitid + + def sync_wait_reapable(pid): + waitid(os.P_PID, pid, os.WEXITED | os.WNOWAIT) + +except ImportError: + # pypy doesn't define os.waitid so we need to pull it out ourselves + # using cffi: https://bitbucket.org/pypy/pypy/issues/2922/ + import cffi + waitid_ffi = cffi.FFI() + + # Believe it or not, siginfo_t starts with fields in the + # same layout on both Linux and Darwin. The Linux structure + # is bigger so that's what we use to size `pad`; while + # there are a few extra fields in there, most of it is + # true padding which would not be written by the syscall. + waitid_ffi.cdef( + """ +typedef struct siginfo_s { + int si_signo; + int si_errno; + int si_code; + int si_pid; + int si_uid; + int si_status; + int pad[26]; +} siginfo_t; +int waitid(int idtype, int id, siginfo_t* result, int options); +""" + ) + waitid = waitid_ffi.dlopen(None).waitid + + def sync_wait_reapable(pid): + P_PID = 1 + WEXITED = 0x00000004 + if sys.platform == 'darwin': # pragma: no cover + # waitid() is not exposed on Python on Darwin but does + # work through CFFI; note that we typically won't get + # here since Darwin also defines kqueue + WNOWAIT = 0x00000020 + else: + WNOWAIT = 0x01000000 + result = waitid_ffi.new("siginfo_t *") + while True: + if waitid(P_PID, pid, result, WEXITED | WNOWAIT) < 0: + got_errno = waitid_ffi.errno + if got_errno == errno.EINTR: + continue + raise OSError(got_errno, os.strerror(got_errno)) + + +# adapted from +# https://github.com/python-trio/trio/issues/4#issuecomment-398967572 + +waitid_limiter = CapacityLimiter(math.inf) + +# RunVar mapping pid => Event to set when it completes +waitid_thread_results = _core.RunVar("waitid_thread_results") + +async def _waitid_system_task(pid: int) -> None: + """Spawn a thread that waits for ``pid`` to exit, then wake any tasks + that were waiting on it. + """ + # cancellable=True: if this task is cancelled, then we abandon the + # thread to keep running waitpid in the background. Since this is + # always run as a system task, this will only happen if the whole + # call to trio.run is shutting down, in which case the waitid_threads + # dictionary is too -- so, nothing to clean up. + + try: + await run_sync_in_worker_thread( + sync_wait_reapable, + pid, + cancellable=True, + limiter=waitid_limiter + ) + except Exception: + # If waitid fails, waitpid will fail too, so it still makes + # sense to wake up the callers of wait_process_exiting(). The + # most likely reason for this error in practice is a child + # exiting when wait() is not possible because SIGCHLD is + # ignored. + pass + finally: + waitid_thread_results.get().pop(pid).set() + + +async def wait_child_exiting(pid: int) -> None: + try: + return await waitid_thread_results.get()[pid].wait() + except LookupError: + waitid_thread_results.set({}) + except KeyError: + pass + finished = Event() + waitid_thread_results.get()[pid] = finished + _core.spawn_system_task(_waitid_system_task, pid) + await finished.wait() diff --git a/trio/_platform/windows.py b/trio/_platform/windows.py new file mode 100644 index 0000000000..4624f53ca7 --- /dev/null +++ b/trio/_platform/windows.py @@ -0,0 +1,14 @@ +import _core +from ._core._windows_cffi import kernel32, raise_winerror +from ._wait_for_object import WaitForSingleObject + +async def wait_child_exiting(pid): + SYNCHRONIZE = 0x00100000 # dwDesiredAccess value + handle = kernel32.OpenProcess(SYNCHRONIZE, False, pid) + if not handle: + await _core.checkpoint() + raise_winerror() + try: + await WaitForSingleObject(handle) + finally: + kernel32.CloseHandle(handle) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index e7e91c8f36..1706a123b2 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -7,6 +7,7 @@ from . import _core from ._sync import CapacityLimiter, Lock from ._threads import run_sync_in_worker_thread +from ._platform import wait_child_exiting __all__ = ["Process", "run"] @@ -32,12 +33,6 @@ # something suitable for constructing a PipeReceiveStream around # and ``subprocess_end`` is something suitable for passing as # the ``stdout`` or ``stderr`` argument of :class:`subprocess.Popen`. -# -# wait_reapable(pid: int) -> None: -# Block until a subprocess.Popen.wait() for the given PID would -# complete immediately, without consuming the process's exit -# code. Only one call for the same process may be active -# simultaneously; this is not verified. if os.name == "posix": from ._unix_pipes import PipeSendStream, PipeReceiveStream @@ -50,104 +45,6 @@ def create_pipe_from_child_output(): rfd, wfd = os.pipe() return rfd, wfd - if hasattr(_core, "wait_kevent"): - - async def wait_reapable(pid): - kqueue = _core.current_kqueue() - try: - from select import KQ_NOTE_EXIT - except ImportError: - # pypy doesn't define KQ_NOTE_EXIT: - # https://bitbucket.org/pypy/pypy/issues/2921/ - # I verified this value against both Darwin and FreeBSD - KQ_NOTE_EXIT = 0x80000000 - make_event = lambda flags: select.kevent( - pid, - filter=select.KQ_FILTER_PROC, - flags=flags, - fflags=KQ_NOTE_EXIT - ) - - try: - kqueue.control( - [make_event(select.KQ_EV_ADD | select.KQ_EV_ONESHOT)], 0 - ) - except ProcessLookupError: - # This can happen if the process has already exited. - # Frustratingly, it does _not_ synchronize with calls - # to wait() and friends -- it's possible for kevent to - # return ESRCH but waitpid(..., WNOHANG) still returns - # nothing. And OS X doesn't support waitid() so we - # can't fall back to the Linux-style approach. So - # we'll just suppress the error, and not wait. - # Process.wait() calls us in a loop so the worst case - # is we busy-wait for a few iterations. - return - - def abort(_): - kqueue.control([make_event(select.KQ_EV_DELETE)], 0) - return _core.Abort.SUCCEEDED - - await _core.wait_kevent(pid, select.KQ_FILTER_PROC, abort) - - else: - wait_limiter = CapacityLimiter(math.inf) - - try: - from os import waitid - - def sync_wait_reapable(pid): - waitid(os.P_PID, pid, os.WEXITED | os.WNOWAIT) - except ImportError: - # pypy doesn't define os.waitid so we need to pull it out ourselves - # using cffi: https://bitbucket.org/pypy/pypy/issues/2922/ - import cffi - waitid_ffi = cffi.FFI() - # Believe it or not, siginfo_t starts with fields in the - # same layout on both Linux and Darwin. The Linux structure - # is bigger so that's what we use to size `pad`; while - # there are a few extra fields in there, most of it is - # true padding which would not be written by the syscall. - waitid_ffi.cdef( - """ -typedef struct siginfo_s { - int si_signo; - int si_errno; - int si_code; - int si_pid; - int si_uid; - int si_status; - int pad[26]; -} siginfo_t; -int waitid(int idtype, int id, siginfo_t* result, int options); -""" - ) - waitid = waitid_ffi.dlopen(None).waitid - - def sync_wait_reapable(pid): - P_PID = 1 - WEXITED = 0x00000004 - if sys.platform == 'darwin': # pragma: no cover - # waitid() is not exposed on Python on Darwin but does - # work through CFFI; note that we typically won't get - # here since Darwin also defines kqueue - WNOWAIT = 0x00000020 - else: - WNOWAIT = 0x01000000 - result = waitid_ffi.new("siginfo_t *") - rc = waitid(P_PID, pid, result, WEXITED | WNOWAIT) - if rc < 0: - errno = waitid_ffi.errno - raise OSError(errno, os.strerror(errno)) - - async def wait_reapable(pid): - await run_sync_in_worker_thread( - sync_wait_reapable, - pid, - cancellable=True, - limiter=wait_limiter - ) - elif os.name == "nt": import msvcrt @@ -176,19 +73,6 @@ def create_pipe_from_child_output(): rh, wh = windows_pipe(overlapped=(True, False)) return rh, msvcrt.open_osfhandle(wh, 0) - async def wait_reapable(pid): - from .._wait_for_object import WaitForSingleObject - from .._core._windows_cffi import kernel32, raise_winerror - SYNCHRONIZE = 0x00100000 # dwDesiredAccess value - handle = kernel32.OpenProcess(SYNCHRONIZE, False, pid) - if not handle: - await _core.checkpoint() - raise_winerror() - try: - await WaitForSingleObject(handle) - finally: - kernel32.CloseHandle(handle) - else: # pragma: no cover raise NotImplementedError("unsupported os.name {!r}".format(os.name)) @@ -323,9 +207,6 @@ def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **kwds): self.args = self._proc.args self.pid = self._proc.pid - # Lock to ensure at most one task is blocked in - # wait_reapable() for this pid at a time - self._wait_lock = Lock() @property def returncode(self): @@ -363,28 +244,10 @@ async def wait(self): as the negative of that signal number, e.g., -11 for ``SIGSEGV``. """ while True: - async with self._wait_lock: - if self.poll() is not None: - await _core.checkpoint() - return self.returncode - try: - await wait_reapable(self.pid) - except OSError: - # If the child is dead, suppress the exception we - # encountered waiting for it to die; maybe SIGCHLD - # is ignored so all waits fail with ECHILD, for example. - if self.poll() is not None: - return self.returncode - - # wait(2) and friends are documented as raising the errors: - # - ECHILD (always converted into a returncode of 0 by - # stdlib subprocess module) - # - EINTR (always converted into a retry by Python) - # - EINVAL (can't get unless we pass invalid flags) - # So it shouldn't actually be possible to get a failure - # here that's not swallowed by Popen.poll(). But if we - # somehow get one, we'll propagate it, I guess... - raise # pragma: no cover + if self.poll() is not None: + await _core.checkpoint() + return self.returncode + await wait_child_exiting(self.pid) def poll(self): """Forwards to :meth:`subprocess.Popen.poll`.""" From 6be26eb5620e4690cbf21ed2edce02e6bd643e08 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 12 Dec 2018 22:22:15 -0500 Subject: [PATCH 12/49] use AsyncResource, make test less silly --- trio/_subprocess.py | 35 +++++++++++++++-------------------- trio/tests/test_subprocess.py | 18 ++++++------------ 2 files changed, 21 insertions(+), 32 deletions(-) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 1706a123b2..ec9c0f2534 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -5,6 +5,7 @@ import sys from . import _core +from ._abc import AsyncResource from ._sync import CapacityLimiter, Lock from ._threads import run_sync_in_worker_thread from ._platform import wait_child_exiting @@ -116,7 +117,7 @@ def wrap_process_stream(child_fd, given_value): return None, given_value -class Process: +class Process(AsyncResource): """Like :class:`subprocess.Popen`, but async. :class:`Process` has a public API identical to that of @@ -141,21 +142,18 @@ class Process: instead, or interact with :attr:`stdin` / :attr:`stdout` / :attr:`stderr` directly. - * Behavior when used as a context manager is different, as described - below. - - :class:`Process` can be used as an async context manager. It does - not block on entry; on exit it closes any pipe to the subprocess's - standard input, then blocks until the process exits. If the - blocking exit is cancelled, it kills the process and still waits - for termination before exiting the context. This is useful for - scoping the lifetime of a simple subprocess that doesn't spawn any - children of its own. (For subprocesses that do in turn spawn their - own subprocesses, there is not currently any way to clean up the - whole tree; moreover, using the :class:`Process` context manager - in such cases is likely to be counterproductive as killing the - top-level subprocess leaves it no chance to do any cleanup of its - children that might be desired.) + * :meth:`aclose` (and thus also ``__aexit__``) behave like the + standard :class:`Popen` context manager exit (close pipes to the + process, then wait for it to exit), but add additional behavior + if cancelled: kill the process and wait for it to finish + terminating. This is useful for scoping the lifetime of a + simple subprocess that doesn't spawn any children of its + own. (For subprocesses that do in turn spawn their own + subprocesses, there is not currently any way to clean up the + whole tree; moreover, using the :class:`Process` context manager + in such cases is likely to be counterproductive as killing the + top-level subprocess leaves it no chance to do any cleanup of + its children that might be desired.) """ @@ -219,10 +217,7 @@ def returncode(self): """ return self._proc.returncode - async def __aenter__(self): - return self - - async def __aexit__(self, *exc): + async def aclose(self): if self.stdin is not None: with _core.open_cancel_scope(shield=True): await self.stdin.aclose() diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 3f2e2af724..6244ed074e 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -5,7 +5,7 @@ import sys import pytest -from .. import _core, move_on_after, sleep, subprocess +from .. import _core, move_on_after, sleep_forever, subprocess from ..testing import wait_all_tasks_blocked posix = os.name == "posix" @@ -20,18 +20,12 @@ async def test_basic(): async def test_kill_when_context_cancelled(): - with _core.open_cancel_scope() as scope: + with move_on_after(0) as scope: async with subprocess.Process(["sleep", "10"]) as proc: - while True: - await sleep(0.01) - assert proc.poll() is None - - # Don't set the deadline until we've done at least - # one poll, to avoid coverage flapping if starting - # the process takes a long time - if scope.deadline == +math.inf: - scope.deadline = _core.current_time() + 0.2 - + assert proc.poll() is None + # Process context entry is synchronous, so this is the + # only checkpoint: + await trio.sleep_forever() assert scope.cancelled_caught assert proc.returncode == -signal.SIGKILL From 77ec5ee69b90a985dab94610e2e6746bfa933dbc Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 12 Dec 2018 22:27:09 -0500 Subject: [PATCH 13/49] close stdout/stderr when closing child --- trio/_subprocess.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index ec9c0f2534..fb3c16d38c 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -218,9 +218,19 @@ def returncode(self): return self._proc.returncode async def aclose(self): - if self.stdin is not None: - with _core.open_cancel_scope(shield=True): + """Close any pipes we have to the process (both input and output) + and wait for it to exit. + + If cancelled, kills the process and waits for it to finish + exiting before propagating the cancellation. + """ + with _core.open_cancel_scope(shield=True): + if self.stdin is not None: await self.stdin.aclose() + if self.stdout is not None: + await self.stdout.aclose() + if self.stderr is not None: + await self.stderr.aclose() try: await self.wait() finally: From f2db9719c2b204671969a78c43a49a469d164868 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 14 Dec 2018 22:35:28 -0500 Subject: [PATCH 14/49] Rename _platform to _subprocess_platform, move more stuff there, misc other cleanups --- trio/_platform/__init__.py | 34 ---- trio/_platform/windows.py | 14 -- trio/_subprocess.py | 173 ++---------------- trio/_subprocess_platform/__init__.py | 103 +++++++++++ .../kqueue.py | 10 +- .../waitid.py | 62 ++++--- trio/_subprocess_platform/windows.py | 7 + trio/subprocess.py | 2 +- trio/tests/test_subprocess.py | 27 +-- 9 files changed, 176 insertions(+), 256 deletions(-) delete mode 100644 trio/_platform/__init__.py delete mode 100644 trio/_platform/windows.py create mode 100644 trio/_subprocess_platform/__init__.py rename trio/{_platform => _subprocess_platform}/kqueue.py (84%) rename trio/{_platform => _subprocess_platform}/waitid.py (62%) create mode 100644 trio/_subprocess_platform/windows.py diff --git a/trio/_platform/__init__.py b/trio/_platform/__init__.py deleted file mode 100644 index 35ca558fba..0000000000 --- a/trio/_platform/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Platform-specific logic, currently mostly for subprocesses. - -import os -import sys - -from .. import _core - - -# Fallback versions of the functions provided -- implementations -# per OS are imported atop these at the bottom of the module. - -async def wait_child_exiting(pid: int) -> None: - """Block until the child process with PID ``pid`` is exiting. - - When this function returns, it indicates that a call to - :meth:`os.waitpid` or similar is likely to immediately succeed in - returning ``pid``'s exit status, or to immediately fail because - ``pid`` doesn't exist. The actual exit status is not consumed; - you need to do that yourself. (On some platforms, a wait with - :const:`os.WNOHANG` might not succeed right away, but a blocking - wait won't block for a noticeable amount of time.) - """ - raise NotImplementedError from wait_child_exiting._error - - -try: - if os.name == "nt": - from .windows import wait_child_exiting - elif hasattr(_core, "wait_kevent"): - from .kqueue import wait_child_exiting - else: - from .waitid import wait_child_exiting -except ImportError as ex: - wait_child_exiting._error = ex diff --git a/trio/_platform/windows.py b/trio/_platform/windows.py deleted file mode 100644 index 4624f53ca7..0000000000 --- a/trio/_platform/windows.py +++ /dev/null @@ -1,14 +0,0 @@ -import _core -from ._core._windows_cffi import kernel32, raise_winerror -from ._wait_for_object import WaitForSingleObject - -async def wait_child_exiting(pid): - SYNCHRONIZE = 0x00100000 # dwDesiredAccess value - handle = kernel32.OpenProcess(SYNCHRONIZE, False, pid) - if not handle: - await _core.checkpoint() - raise_winerror() - try: - await WaitForSingleObject(handle) - finally: - kernel32.CloseHandle(handle) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index fb3c16d38c..1a51c096f9 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -8,114 +8,12 @@ from ._abc import AsyncResource from ._sync import CapacityLimiter, Lock from ._threads import run_sync_in_worker_thread -from ._platform import wait_child_exiting +from ._subprocess_platform import ( + wait_child_exiting, create_pipe_to_child_stdin, create_pipe_from_child_output +) __all__ = ["Process", "run"] -# OS-specific hooks: -# -# create_pipe_to_child_stdin() -> (int, int): -# Create a new pipe suitable for sending data from this -# process to the standard input of a child we're about to spawn. -# -# Returns: -# A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is -# something suitable for constructing a PipeSendStream around -# and ``subprocess_end`` is something suitable for passing as -# the ``stdin`` argument of :class:`subprocess.Popen`. -# -# create_pipe_from_child_output() -> (int, int): -# Create a new pipe suitable for receiving data into this -# process from the standard output or error stream of a child -# we're about to spawn. -# -# Returns: -# A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is -# something suitable for constructing a PipeReceiveStream around -# and ``subprocess_end`` is something suitable for passing as -# the ``stdout`` or ``stderr`` argument of :class:`subprocess.Popen`. - -if os.name == "posix": - from ._unix_pipes import PipeSendStream, PipeReceiveStream - - def create_pipe_to_child_stdin(): - rfd, wfd = os.pipe() - return wfd, rfd - - def create_pipe_from_child_output(): - rfd, wfd = os.pipe() - return rfd, wfd - -elif os.name == "nt": - import msvcrt - - # TODO: implement the pipes - class PipeSendStream: - def __init__(self, handle): - raise NotImplementedError - - PipeReceiveStream = PipeSendStream - - # This isn't exported or documented, but it's also not - # underscore-prefixed, and seems kosher to use. The asyncio docs - # for 3.5 included an example that imported socketpair from - # windows_utils (before socket.socketpair existed on Windows), and - # when asyncio.windows_utils.socketpair was removed in 3.7, the - # removal was mentioned in the release notes. - from asyncio.windows_utils import pipe as windows_pipe - - def create_pipe_to_child_stdin(): - # for stdin, we want the write end (our end) to use overlapped I/O - rh, wh = windows_pipe(overlapped=(False, True)) - return wh, msvcrt.open_osfhandle(rh, os.O_RDONLY) - - def create_pipe_from_child_output(): - # for stdout/err, it's the read end that's overlapped - rh, wh = windows_pipe(overlapped=(True, False)) - return rh, msvcrt.open_osfhandle(wh, 0) - -else: # pragma: no cover - raise NotImplementedError("unsupported os.name {!r}".format(os.name)) - - -def wrap_process_stream(child_fd, given_value): - """Perform any wrapping necessary to be able to use async operations - to interact with a subprocess stream. - - Args: - child_fd (0, 1, or 2): The file descriptor in the child that this - stream will be used to communicate with. - given_value (int or None): Anything accepted by the ``stdin``, - ``stdout``, or ``stderr`` kwargs to :class:`subprocess.Popen`. - For example, this could be ``None``, a file descriptor, - :data:`subprocess.PIPE`, :data:`subprocess.STDOUT`, or - :data:`subprocess.DEVNULL`. - - Returns: - A pair ``(trio_stream, subprocess_value)`` where ``trio_stream`` - is a :class:`trio.abc.SendStream` (for stdin) or - :class:`trio.abc.ReceiveStream` (for stdout/stderr) that can be - used to communicate with the child process, and - ``subprocess_value`` is the value that should be passed as the - ``stdin``, ``stdout``, or ``stderr`` argument of - :class:`subprocess.Popen` in order to set up the child end - appropriately. If ``given_value`` was not :data:`subprocess.PIPE`, - ``trio_stream`` will be ``None`` and ``subprocess_value`` will - equal ``given_value``. - """ - - if given_value == subprocess.PIPE: - maker, stream_cls = { - 0: (create_pipe_to_child_stdin, PipeSendStream), - 1: (create_pipe_from_child_output, PipeReceiveStream), - 2: (create_pipe_from_child_output, PipeReceiveStream), - }[child_fd] - - trio_end, subprocess_end = maker() - return stream_cls(trio_end), subprocess_end - - return None, given_value - class Process(AsyncResource): """Like :class:`subprocess.Popen`, but async. @@ -172,8 +70,14 @@ def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **kwds): if kwds.get('bufsize', -1) != -1: raise ValueError("bufsize does not make sense for trio subprocess") - self.stdin, stdin = wrap_process_stream(0, stdin) - self.stdout, stdout = wrap_process_stream(1, stdout) + self.stdin = None + self.stdout = None + self.stderr = None + + if stdin == subprocess.PIPE: + self.stdin, stdin = create_pipe_to_child_stdin() + if stdout == subprocess.PIPE: + self.stdout, stdout = create_pipe_from_child_output() if stderr == subprocess.STDOUT: # If we created a pipe for stdout, pass the same pipe for # stderr. If stdout was some non-pipe thing (DEVNULL or a @@ -184,9 +88,8 @@ def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **kwds): # is piped, stderr will be intermixed on the stdout stream. if stdout is not None: stderr = stdout - self.stderr = None - else: - self.stderr, stderr = wrap_process_stream(2, stderr) + elif stderr == subprocess.PIPE: + self.stderr, stderr = create_pipe_from_child_output() try: self._proc = subprocess.Popen( @@ -248,11 +151,12 @@ async def wait(self): that exits due to a signal will have its exit status reported as the negative of that signal number, e.g., -11 for ``SIGSEGV``. """ - while True: - if self.poll() is not None: - await _core.checkpoint() - return self.returncode - await wait_child_exiting(self.pid) + if self.poll() is None: + await wait_child_exiting(self._proc) + self._proc.wait() + else: + await _core.checkpoint() + return self.returncode def poll(self): """Forwards to :meth:`subprocess.Popen.poll`.""" @@ -366,42 +270,3 @@ async def read_output(stream, chunks): return subprocess.CompletedProcess( proc.args, proc.returncode, stdout, stderr ) - - -async def call(*popenargs, **kwargs): - """Like :func:`subprocess.call`, but async.""" - async with Process(*popenargs, **kwargs) as proc: - return await proc.wait() - - -async def check_call(*popenargs, **kwargs): - """Like :func:`subprocess.check_call`, but async.""" - async with Process(*popenargs, **kwargs) as proc: - retcode = await proc.wait() - if retcode: - raise subprocess.CalledProcessError(retcode, proc.args) - return 0 - - -async def check_output(*popenargs, timeout=None, deadline=None, **kwargs): - """Like :func:`subprocess.check_output`, but async. - - Like :func:`run`, this takes an optional ``timeout`` or ``deadline`` - argument; if the timeout expires or deadline passes, the child process - will be killed and its partial output wrapped up in a - :exc:`subprocess.TimeoutExpired` exception. You can also nest - :func:`check_output` in a normal Trio cancel scope to impose - a timeout without capturing partial output. - """ - if 'stdout' in kwargs: - raise ValueError("stdout argument not allowed, it will be overridden.") - - result = await run( - *popenargs, - stdout=subprocess.PIPE, - timeout=timeout, - deadline=deadline, - check=True, - **kwargs - ) - return result.stdout diff --git a/trio/_subprocess_platform/__init__.py b/trio/_subprocess_platform/__init__.py new file mode 100644 index 0000000000..cca33d69cb --- /dev/null +++ b/trio/_subprocess_platform/__init__.py @@ -0,0 +1,103 @@ +# Platform-specific subprocess bits'n'pieces. + +import os +import subprocess +import sys +from typing import Tuple + +from .. import _core +from .._abc import SendStream, ReceiveStream + + +# Fallback versions of the functions provided -- implementations +# per OS are imported atop these at the bottom of the module. + +async def wait_child_exiting(process: subprocess.Popen) -> None: + """Block until the child process managed by ``process`` is exiting. + + When this function returns, it indicates that a call to + :meth:`subprocess.Popen.wait` will immediately be able to + return the process's exit status. The actual exit status is not + consumed by this call, since :class:`~subprocess.Popen` wants + to be able to do that. + """ + raise NotImplementedError from wait_child_exiting._error + + +async def create_pipe_to_child_stdin() -> Tuple[SendStream, int]: + """Create a new pipe suitable for sending data from this + process to the standard input of a child we're about to spawn. + + Returns: + A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a + :class:`~trio.abc.SendStream` and ``subprocess_end`` is + something suitable for passing as the ``stdin`` argument of + :class:`subprocess.Popen`. + """ + raise NotImplementedError from create_pipe_to_child_stdin._error + + +async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: + """Create a new pipe suitable for receiving data into this + process from the standard output or error stream of a child + we're about to spawn. + + Returns: + A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a + :class:`~trio.abc.ReceiveStream` and ``subprocess_end`` is + something suitable for passing as the ``stdin`` argument of + :class:`subprocess.Popen`. + """ + raise NotImplementedError from create_pipe_to_child_stdin._error + + +try: + if os.name == "nt": + from .windows import wait_child_exiting + elif hasattr(_core, "wait_kevent"): + from .kqueue import wait_child_exiting + else: + from .waitid import wait_child_exiting +except ImportError as ex: + wait_child_exiting._error = ex + +try: + if os.name == "posix": + from .._unix_pipes import PipeSendStream, PipeReceiveStream + + def create_pipe_to_child_stdin(): + rfd, wfd = os.pipe() + return PipeSendStream(wfd), rfd + + def create_pipe_from_child_output(): + rfd, wfd = os.pipe() + return PipeReceiveStream(rfd), wfd + + elif os.name == "nt": + from .._windows_pipes import PipeSendStream, PipeReceiveStream + + # This isn't exported or documented, but it's also not + # underscore-prefixed, and seems kosher to use. The asyncio docs + # for 3.5 included an example that imported socketpair from + # windows_utils (before socket.socketpair existed on Windows), and + # when asyncio.windows_utils.socketpair was removed in 3.7, the + # removal was mentioned in the release notes. + from asyncio.windows_utils import pipe as windows_pipe + import msvcrt + + def create_pipe_to_child_stdin(): + # for stdin, we want the write end (our end) to use overlapped I/O + rh, wh = windows_pipe(overlapped=(False, True)) + return PipeSendStream(wh), msvcrt.open_osfhandle(rh, os.O_RDONLY) + + def create_pipe_from_child_output(): + # for stdout/err, it's the read end that's overlapped + rh, wh = windows_pipe(overlapped=(True, False)) + return PipeReceiveStream(rh), msvcrt.open_osfhandle(wh, 0) + + else: + raise ImportError("pipes not implemented on this platform") + +except ImportError as ex: + create_pipe_to_child_stdin._error = ex + create_pipe_from_child_output._error = ex diff --git a/trio/_platform/kqueue.py b/trio/_subprocess_platform/kqueue.py similarity index 84% rename from trio/_platform/kqueue.py rename to trio/_subprocess_platform/kqueue.py index 997422b59c..7db53e8786 100644 --- a/trio/_platform/kqueue.py +++ b/trio/_subprocess_platform/kqueue.py @@ -1,7 +1,11 @@ import select +import subprocess from .. import _core -async def wait_child_exiting(pid): +async def wait_child_exiting(process: subprocess.Popen) -> None: + if process.returncode is not None: + return + kqueue = _core.current_kqueue() try: from select import KQ_NOTE_EXIT @@ -12,7 +16,7 @@ async def wait_child_exiting(pid): KQ_NOTE_EXIT = 0x80000000 make_event = lambda flags: select.kevent( - pid, + process.pid, filter=select.KQ_FILTER_PROC, flags=flags, fflags=KQ_NOTE_EXIT @@ -37,4 +41,4 @@ def abort(_): kqueue.control([make_event(select.KQ_EV_DELETE)], 0) return _core.Abort.SUCCEEDED - await _core.wait_kevent(pid, select.KQ_FILTER_PROC, abort) + await _core.wait_kevent(process.pid, select.KQ_FILTER_PROC, abort) diff --git a/trio/_platform/waitid.py b/trio/_subprocess_platform/waitid.py similarity index 62% rename from trio/_platform/waitid.py rename to trio/_subprocess_platform/waitid.py index 867b87e126..e9a360ec53 100644 --- a/trio/_platform/waitid.py +++ b/trio/_subprocess_platform/waitid.py @@ -1,5 +1,8 @@ +import errno import math import os +import subprocess +import sys from .. import _core from .._sync import CapacityLimiter, Event @@ -49,12 +52,11 @@ def sync_wait_reapable(pid): else: WNOWAIT = 0x01000000 result = waitid_ffi.new("siginfo_t *") - while True: - if waitid(P_PID, pid, result, WEXITED | WNOWAIT) < 0: - got_errno = waitid_ffi.errno - if got_errno == errno.EINTR: - continue - raise OSError(got_errno, os.strerror(got_errno)) + while waitid(P_PID, pid, result, WEXITED | WNOWAIT) < 0: + got_errno = waitid_ffi.errno + if got_errno == errno.EINTR: + continue + raise OSError(got_errno, os.strerror(got_errno)) # adapted from @@ -62,27 +64,23 @@ def sync_wait_reapable(pid): waitid_limiter = CapacityLimiter(math.inf) -# RunVar mapping pid => Event to set when it completes -waitid_thread_results = _core.RunVar("waitid_thread_results") - -async def _waitid_system_task(pid: int) -> None: +async def _waitid_system_task(pid: int, event: Event) -> None: """Spawn a thread that waits for ``pid`` to exit, then wake any tasks that were waiting on it. """ # cancellable=True: if this task is cancelled, then we abandon the # thread to keep running waitpid in the background. Since this is # always run as a system task, this will only happen if the whole - # call to trio.run is shutting down, in which case the waitid_threads - # dictionary is too -- so, nothing to clean up. + # call to trio.run is shutting down. try: await run_sync_in_worker_thread( sync_wait_reapable, pid, cancellable=True, - limiter=waitid_limiter + limiter=waitid_limiter, ) - except Exception: + except OSError: # If waitid fails, waitpid will fail too, so it still makes # sense to wake up the callers of wait_process_exiting(). The # most likely reason for this error in practice is a child @@ -90,17 +88,31 @@ async def _waitid_system_task(pid: int) -> None: # ignored. pass finally: - waitid_thread_results.get().pop(pid).set() + event.set() + + +async def wait_child_exiting(process: subprocess.Popen) -> None: + # Logic of this function: + # - The first time we get called, we create an Event and start + # an instance of _waitid_system_task that will set the Event + # when waitid() completes. If that Event is set before + # we get cancelled, we're good. + # - Otherwise, a following call after the cancellation must + # reuse the Event created during the first call, lest we + # create an arbitrary number of threads waiting on the same + # process. + if process.returncode is not None: + return -async def wait_child_exiting(pid: int) -> None: + ATTR = "@trio_wait_event" # an unlikely attribute name, to be sure try: - return await waitid_thread_results.get()[pid].wait() - except LookupError: - waitid_thread_results.set({}) - except KeyError: - pass - finished = Event() - waitid_thread_results.get()[pid] = finished - _core.spawn_system_task(_waitid_system_task, pid) - await finished.wait() + event = getattr(process, ATTR) + except AttributeError: + event = Event() + setattr(process, ATTR, event) + _core.spawn_system_task(_waitid_system_task, process.pid, event) + + await event.wait() + # If not cancelled, there's no need to keep the event around anymore: + delattr(process, ATTR) diff --git a/trio/_subprocess_platform/windows.py b/trio/_subprocess_platform/windows.py new file mode 100644 index 0000000000..883df84ce0 --- /dev/null +++ b/trio/_subprocess_platform/windows.py @@ -0,0 +1,7 @@ +import subprocess +from ._wait_for_object import WaitForSingleObject + +async def wait_child_exiting(process: subprocess.Popen) -> None: + if process.returncode is not None: + return + await WaitForSingleObject(process._handle) diff --git a/trio/subprocess.py b/trio/subprocess.py index 86b3d9c5e9..b37a9f43c5 100644 --- a/trio/subprocess.py +++ b/trio/subprocess.py @@ -1,4 +1,4 @@ -from ._subprocess import Process, run, call, check_call, check_output +from ._subprocess import Process, run from subprocess import ( PIPE, STDOUT, DEVNULL, CalledProcessError, SubprocessError, TimeoutExpired, CompletedProcess diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 6244ed074e..25be8f801b 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -141,28 +141,6 @@ async def test_run_timeout(): assert excinfo.value.stderr is None -async def test_call_and_check(): - assert await subprocess.call(["true"]) == 0 - assert await subprocess.call(["false"]) == 1 - - assert await subprocess.check_call(["true"]) == 0 - with pytest.raises(subprocess.CalledProcessError) as excinfo: - await subprocess.check_call(["false"]) - assert excinfo.value.returncode == 1 - assert excinfo.value.cmd == ["false"] - assert excinfo.value.output is None - - assert await subprocess.check_output(["echo", "hi"]) == b"hi\n" - with pytest.raises(subprocess.CalledProcessError) as excinfo: - await subprocess.check_output("echo lo; false", shell=True) - assert excinfo.value.returncode == 1 - assert excinfo.value.cmd == "echo lo; false" - assert excinfo.value.output == b"lo\n" - - with pytest.raises(ValueError): - await subprocess.check_output(["true"], stdout=subprocess.DEVNULL) - - async def test_run_check(): with pytest.raises(subprocess.CalledProcessError) as excinfo: await subprocess.run( @@ -262,9 +240,8 @@ async def test_wait_reapable_fails(): old_sigchld = signal.signal(signal.SIGCHLD, signal.SIG_IGN) try: # With SIGCHLD disabled, the wait() syscall will wait for the - # process to exit but then fail with ECHILD. Process.wait() - # has logic that suppresses wait() errors if the process has - # in fact exited; this test tries to exercise that logic. + # process to exit but then fail with ECHILD. Make sure we + # support this case as the stdlib subprocess module does. async with subprocess.Process(["sleep", "3600"]) as proc: async with _core.open_nursery() as nursery: nursery.start_soon(proc.wait) From ce78d8d3539f577566e2f3222e2b2776b10d1352 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 14 Dec 2018 22:44:05 -0500 Subject: [PATCH 15/49] yapfify --- trio/_subprocess.py | 3 ++- trio/_subprocess_platform/__init__.py | 1 - trio/_subprocess_platform/kqueue.py | 1 + trio/_subprocess_platform/waitid.py | 1 + trio/_subprocess_platform/windows.py | 1 + 5 files changed, 5 insertions(+), 2 deletions(-) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 1a51c096f9..593e90270d 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -9,7 +9,8 @@ from ._sync import CapacityLimiter, Lock from ._threads import run_sync_in_worker_thread from ._subprocess_platform import ( - wait_child_exiting, create_pipe_to_child_stdin, create_pipe_from_child_output + wait_child_exiting, create_pipe_to_child_stdin, + create_pipe_from_child_output ) __all__ = ["Process", "run"] diff --git a/trio/_subprocess_platform/__init__.py b/trio/_subprocess_platform/__init__.py index cca33d69cb..dea762ef15 100644 --- a/trio/_subprocess_platform/__init__.py +++ b/trio/_subprocess_platform/__init__.py @@ -11,7 +11,6 @@ # Fallback versions of the functions provided -- implementations # per OS are imported atop these at the bottom of the module. - async def wait_child_exiting(process: subprocess.Popen) -> None: """Block until the child process managed by ``process`` is exiting. diff --git a/trio/_subprocess_platform/kqueue.py b/trio/_subprocess_platform/kqueue.py index 7db53e8786..ffd9e76d28 100644 --- a/trio/_subprocess_platform/kqueue.py +++ b/trio/_subprocess_platform/kqueue.py @@ -2,6 +2,7 @@ import subprocess from .. import _core + async def wait_child_exiting(process: subprocess.Popen) -> None: if process.returncode is not None: return diff --git a/trio/_subprocess_platform/waitid.py b/trio/_subprocess_platform/waitid.py index e9a360ec53..068577db45 100644 --- a/trio/_subprocess_platform/waitid.py +++ b/trio/_subprocess_platform/waitid.py @@ -64,6 +64,7 @@ def sync_wait_reapable(pid): waitid_limiter = CapacityLimiter(math.inf) + async def _waitid_system_task(pid: int, event: Event) -> None: """Spawn a thread that waits for ``pid`` to exit, then wake any tasks that were waiting on it. diff --git a/trio/_subprocess_platform/windows.py b/trio/_subprocess_platform/windows.py index 883df84ce0..29d6a7aa36 100644 --- a/trio/_subprocess_platform/windows.py +++ b/trio/_subprocess_platform/windows.py @@ -1,6 +1,7 @@ import subprocess from ._wait_for_object import WaitForSingleObject + async def wait_child_exiting(process: subprocess.Popen) -> None: if process.returncode is not None: return From a1d70ca43c1c63c49021f7f6f8bac330edcc8676 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 14 Dec 2018 22:46:39 -0500 Subject: [PATCH 16/49] less aggressive timeout in the hope of reducing test flakiness --- trio/tests/test_subprocess.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 25be8f801b..aa30654a53 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -6,6 +6,7 @@ import pytest from .. import _core, move_on_after, sleep_forever, subprocess +from .._core.tests.tutil import slow from ..testing import wait_all_tasks_blocked posix = os.name == "posix" @@ -112,6 +113,7 @@ async def test_run(): ) +@slow async def test_run_timeout(): data = b"1" * 65536 + b"2" * 65536 + b"3" * 65536 child_script = """ @@ -122,8 +124,8 @@ async def test_run_timeout(): """ for make_timeout_arg in ( - lambda: {"timeout": 0.25}, - lambda: {"deadline": _core.current_time() + 0.25} + lambda: {"timeout": 1.0}, + lambda: {"deadline": _core.current_time() + 1.0} ): with pytest.raises(subprocess.TimeoutExpired) as excinfo: await subprocess.run( @@ -134,9 +136,9 @@ async def test_run_timeout(): ) assert excinfo.value.cmd == [sys.executable, "-c", child_script] if "timeout" in make_timeout_arg(): - assert excinfo.value.timeout == 0.25 + assert excinfo.value.timeout == 1.0 else: - assert 0.2 < excinfo.value.timeout < 0.3 + assert 0.9 < excinfo.value.timeout < 1.1 assert excinfo.value.stdout == data[:32768] assert excinfo.value.stderr is None From 0ed693b0e5a3a0877db9a9eeb802059954b740fa Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 14 Dec 2018 23:01:27 -0500 Subject: [PATCH 17/49] placate flake8 --- trio/_subprocess_platform/__init__.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/trio/_subprocess_platform/__init__.py b/trio/_subprocess_platform/__init__.py index dea762ef15..174048bb16 100644 --- a/trio/_subprocess_platform/__init__.py +++ b/trio/_subprocess_platform/__init__.py @@ -52,11 +52,11 @@ async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: try: if os.name == "nt": - from .windows import wait_child_exiting + from .windows import wait_child_exiting # noqa: F811 elif hasattr(_core, "wait_kevent"): - from .kqueue import wait_child_exiting + from .kqueue import wait_child_exiting # noqa: F811 else: - from .waitid import wait_child_exiting + from .waitid import wait_child_exiting # noqa: F811 except ImportError as ex: wait_child_exiting._error = ex @@ -64,11 +64,11 @@ async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: if os.name == "posix": from .._unix_pipes import PipeSendStream, PipeReceiveStream - def create_pipe_to_child_stdin(): + def create_pipe_to_child_stdin(): # noqa: F811 rfd, wfd = os.pipe() return PipeSendStream(wfd), rfd - def create_pipe_from_child_output(): + def create_pipe_from_child_output(): # noqa: F811 rfd, wfd = os.pipe() return PipeReceiveStream(rfd), wfd @@ -84,12 +84,12 @@ def create_pipe_from_child_output(): from asyncio.windows_utils import pipe as windows_pipe import msvcrt - def create_pipe_to_child_stdin(): + def create_pipe_to_child_stdin(): # noqa: F811 # for stdin, we want the write end (our end) to use overlapped I/O rh, wh = windows_pipe(overlapped=(False, True)) return PipeSendStream(wh), msvcrt.open_osfhandle(rh, os.O_RDONLY) - def create_pipe_from_child_output(): + def create_pipe_from_child_output(): # noqa: F811 # for stdout/err, it's the read end that's overlapped rh, wh = windows_pipe(overlapped=(True, False)) return PipeReceiveStream(rh), msvcrt.open_osfhandle(wh, 0) From ccefeac219c47e0c92c3144f9c660e309d7361d0 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 03:33:16 -0500 Subject: [PATCH 18/49] win32 support --- trio/_core/_io_windows.py | 106 ++++++++++++---- trio/_core/_windows_cffi.py | 28 ++++- trio/_subprocess_platform/windows.py | 4 +- trio/hazmat.py | 3 + .../{test_unix_pipes.py => test_pipes.py} | 46 ++++--- trio/tests/test_subprocess.py | 115 +++++++++++------- 6 files changed, 213 insertions(+), 89 deletions(-) rename trio/tests/{test_unix_pipes.py => test_pipes.py} (73%) diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index 44d298c469..f5424420e5 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -19,6 +19,7 @@ from ._windows_cffi import ( ffi, kernel32, + ntdll, INVALID_HANDLE_VALUE, raise_winerror, ErrorCodes, @@ -289,13 +290,14 @@ def current_iocp(self): @_public def register_with_iocp(self, handle): - handle = _handle(obj) + handle = _handle(handle) # https://msdn.microsoft.com/en-us/library/windows/desktop/aa363862(v=vs.85).aspx + # INVALID_PARAMETER seems to mean "already registered" _check(kernel32.CreateIoCompletionPort(handle, self._iocp, 0, 0)) @_public async def wait_overlapped(self, handle, lpOverlapped): - handle = _handle(obj) + handle = _handle(handle) if isinstance(lpOverlapped, int): lpOverlapped = ffi.cast("LPOVERLAPPED", lpOverlapped) if lpOverlapped in self._overlapped_waiters: @@ -313,16 +315,33 @@ def abort(raise_cancel_): # possible -- the docs are pretty unclear. nonlocal raise_cancel raise_cancel = raise_cancel_ - _check(kernel32.CancelIoEx(handle, lpOverlapped)) + try: + _check(kernel32.CancelIoEx(handle, lpOverlapped)) + except OSError as exc: + if exc.winerror == ErrorCodes.ERROR_NOT_FOUND: + # Too late to cancel. Presumably the completion will + # be coming in soon. (Unfortunately, if you make an + # overlapped request through this IOManager on a + # handle not registered with our IOCP, you wind up + # hanging here when you try to cancel it.) + pass + else: + raise TrioInternalError( + "CancelIoEx failed with unexpected error" + ) from exc return _core.Abort.FAILED await _core.wait_task_rescheduled(abort) if lpOverlapped.Internal != 0: - if lpOverlapped.Internal == ErrorCodes.ERROR_OPERATION_ABORTED: + # the lpOverlapped reports the error as an NT status code, + # which we must convert back to a Win32 error code before + # it will produce the right sorts of exceptions + code = ntdll.RtlNtStatusToDosError(lpOverlapped.Internal) + if code == ErrorCodes.ERROR_OPERATION_ABORTED: assert raise_cancel is not None raise_cancel() else: - raise_winerror(lpOverlapped.Internal) + raise_winerror(code) @_public @contextmanager @@ -372,21 +391,62 @@ def notify_socket_close(self, sock): ) _core.reschedule(task, outcome.Error(exc)) - # This has cffi-isms in it and is untested... but it demonstrates the - # logic we'll want when we start actually using overlapped I/O. - # - # @_public - # async def perform_overlapped(self, handle, submit_fn): - # # submit_fn(lpOverlapped) submits some I/O - # # it may raise an OSError with ERROR_IO_PENDING - # await _core.checkpoint_if_cancelled() - # self.register_with_iocp(handle) - # lpOverlapped = ffi.new("LPOVERLAPPED") - # try: - # submit_fn(lpOverlapped) - # except OSError as exc: - # if exc.winerror != Error.ERROR_IO_PENDING: - # await _core.cancel_shielded_checkpoint() - # raise - # await self.wait_overlapped(handle, lpOverlapped) - # return lpOverlapped + @_public + async def perform_overlapped(self, handle, submit_fn): + # submit_fn(lpOverlapped) submits some I/O + # it may raise an OSError with ERROR_IO_PENDING + # the handle must already be registered using + # register_with_iocp(handle) + await _core.checkpoint_if_cancelled() + lpOverlapped = ffi.new("LPOVERLAPPED") + try: + submit_fn(lpOverlapped) + except OSError as exc: + if exc.winerror != ErrorCodes.ERROR_IO_PENDING: + await _core.cancel_shielded_checkpoint() + raise + await self.wait_overlapped(handle, lpOverlapped) + return lpOverlapped + + @_public + async def write_overlapped(self, handle, data, file_offset=0): + def submit_write(lpOverlapped): + # yes, these are the real documented names + offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME + offset_fields.Offset = file_offset & 0xffffffff + offset_fields.OffsetHigh = file_offset >> 32 + _check( + kernel32.WriteFile( + _handle(handle), + ffi.cast("LPCVOID", ffi.from_buffer(data)), + len(data), + ffi.NULL, + lpOverlapped, + ) + ) + lpOverlapped = await self.perform_overlapped(handle, submit_write) + # this is "number of bytes transferred" + return lpOverlapped.InternalHigh + + @_public + async def readinto_overlapped(self, handle, buffer, file_offset=0): + if memoryview(buffer).readonly: + raise TypeError( + "you must pass a mutable buffer such as a bytearray, not bytes" + ) + + def submit_read(lpOverlapped): + offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME + offset_fields.Offset = file_offset & 0xffffffff + offset_fields.OffsetHigh = file_offset >> 32 + _check( + kernel32.ReadFile( + _handle(handle), + ffi.cast("LPVOID", ffi.from_buffer(buffer)), + len(buffer), + ffi.NULL, + lpOverlapped, + ) + ) + lpOverlapped = await self.perform_overlapped(handle, submit_read) + return lpOverlapped.InternalHigh diff --git a/trio/_core/_windows_cffi.py b/trio/_core/_windows_cffi.py index db294618bb..38e9c79e30 100644 --- a/trio/_core/_windows_cffi.py +++ b/trio/_core/_windows_cffi.py @@ -11,8 +11,11 @@ typedef PVOID HANDLE; typedef unsigned long DWORD; typedef unsigned long ULONG; +typedef unsigned int NTSTATUS; typedef unsigned long u_long; typedef ULONG *PULONG; +typedef const void *LPCVOID; +typedef void *LPVOID; typedef uintptr_t ULONG_PTR; typedef uintptr_t UINT_PTR; @@ -78,6 +81,22 @@ _In_opt_ LPOVERLAPPED lpOverlapped ); +BOOL WriteFile( + HANDLE hFile, + LPCVOID lpBuffer, + DWORD nNumberOfBytesToWrite, + LPDWORD lpNumberOfBytesWritten, + LPOVERLAPPED lpOverlapped +); + +BOOL ReadFile( + HANDLE hFile, + LPVOID lpBuffer, + DWORD nNumberOfBytesToRead, + LPDWORD lpNumberOfBytesRead, + LPOVERLAPPED lpOverlapped +); + BOOL WINAPI SetConsoleCtrlHandler( _In_opt_ void* HandlerRoutine, _In_ BOOL Add @@ -110,10 +129,8 @@ DWORD dwMilliseconds ); -HANDLE OpenProcess( - DWORD dwDesiredAccess, - BOOL bInheritHandle, - DWORD dwProcessId +ULONG RtlNtStatusToDosError( + NTSTATUS Status ); """ @@ -136,6 +153,7 @@ ffi.cdef(LIB) kernel32 = ffi.dlopen("kernel32.dll") +ntdll = ffi.dlopen("ntdll.dll") INVALID_HANDLE_VALUE = ffi.cast("HANDLE", -1) @@ -173,3 +191,5 @@ class ErrorCodes(enum.IntEnum): ERROR_OPERATION_ABORTED = 995 ERROR_ABANDONED_WAIT_0 = 735 ERROR_INVALID_HANDLE = 6 + ERROR_INVALID_PARMETER = 87 + ERROR_NOT_FOUND = 1168 diff --git a/trio/_subprocess_platform/windows.py b/trio/_subprocess_platform/windows.py index 29d6a7aa36..6b6118a44c 100644 --- a/trio/_subprocess_platform/windows.py +++ b/trio/_subprocess_platform/windows.py @@ -1,8 +1,8 @@ import subprocess -from ._wait_for_object import WaitForSingleObject +from .._wait_for_object import WaitForSingleObject async def wait_child_exiting(process: subprocess.Popen) -> None: if process.returncode is not None: return - await WaitForSingleObject(process._handle) + await WaitForSingleObject(int(process._handle)) diff --git a/trio/hazmat.py b/trio/hazmat.py index d39e51d715..9a5ce34075 100644 --- a/trio/hazmat.py +++ b/trio/hazmat.py @@ -51,6 +51,9 @@ "register_with_iocp", "wait_overlapped", "monitor_completion_key", + "perform_overlapped", + "write_overlapped", + "readinto_overlapped", ] from . import _core diff --git a/trio/tests/test_unix_pipes.py b/trio/tests/test_pipes.py similarity index 73% rename from trio/tests/test_unix_pipes.py rename to trio/tests/test_pipes.py index 48afe4cbd7..a86b48aa95 100644 --- a/trio/tests/test_unix_pipes.py +++ b/trio/tests/test_pipes.py @@ -5,23 +5,25 @@ import pytest from .._core.tests.tutil import gc_collect_harder -from .. import _core +from .. import _core, move_on_after from ..testing import wait_all_tasks_blocked, check_one_way_stream -posix = os.name == "posix" - -pytestmark = pytest.mark.skipif( - not posix, reason="pipes are only supported on posix" -) - -if posix: +posix = False +if os.name == "posix": from .._unix_pipes import PipeSendStream, PipeReceiveStream, make_pipe + posix = True +elif os.name == "nt": + from .._windows_pipes import PipeSendStream, PipeReceiveStream, make_pipe +else: + pytestmark = pytest.mark.skip("pipes not supported on this OS") +@pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_send_pipe(): r, w = os.pipe() send = PipeSendStream(w) - assert send.fileno() == w + if posix: + assert send.fileno() == w await send.send_all(b"123") assert (os.read(r, 8)) == b"123" @@ -30,6 +32,7 @@ async def test_send_pipe(): send._closed = True +@pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_receive_pipe(): r, w = os.pipe() recv = PipeReceiveStream(r) @@ -80,10 +83,13 @@ async def test_pipe_errors(): with pytest.raises(TypeError): PipeReceiveStream(None) - with pytest.raises(ValueError): - await PipeReceiveStream(0).receive_some(0) + if posix: + # can only construct with invalid fd on posix + with pytest.raises(ValueError): + await PipeReceiveStream(0).receive_some(0) +@pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_del(): w, r = await make_pipe() f1, f2 = w.fileno(), r.fileno() @@ -107,13 +113,14 @@ async def test_async_with(): assert w._closed assert r._closed - with pytest.raises(OSError) as excinfo: - os.close(w.fileno()) - assert excinfo.value.errno == errno.EBADF + if posix: + with pytest.raises(OSError) as excinfo: + os.close(w.fileno()) + assert excinfo.value.errno == errno.EBADF - with pytest.raises(OSError) as excinfo: - os.close(r.fileno()) - assert excinfo.value.errno == errno.EBADF + with pytest.raises(OSError) as excinfo: + os.close(r.fileno()) + assert excinfo.value.errno == errno.EBADF async def make_clogged_pipe(): @@ -139,8 +146,9 @@ async def make_clogged_pipe(): os.write(s.fileno(), b"x" * buf_size * 2) except BlockingIOError: pass - return s, r async def test_pipe_fully(): - await check_one_way_stream(make_pipe, make_clogged_pipe) + # passing make_clogged_pipe tests wait_send_all_might_not_block, and we + # can't implement that on Windows + await check_one_way_stream(make_pipe, make_clogged_pipe if posix else None) diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index aa30654a53..1f1297227c 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -10,25 +10,49 @@ from ..testing import wait_all_tasks_blocked posix = os.name == "posix" +if posix: + from signal import SIGKILL, SIGTERM, SIGINT +else: + SIGKILL, SIGTERM, SIGINT = None, None, None + + +def cmd_switch(posix_option, windows_pycode): + return posix_option if posix else [ + sys.executable, "-c", "import sys; " + windows_pycode + ] + +EXIT_TRUE = cmd_switch(["true"], "sys.exit(0)") +EXIT_FALSE = cmd_switch(["false"], "sys.exit(1)") +CAT = cmd_switch(["cat"], "sys.stdout.buffer.write(sys.stdin.buffer.read())") +def SLEEP(seconds): + return cmd_switch( + ["sleep", str(seconds)], + "import time; time.sleep({})".format(seconds) + ) + -pytestmark = pytest.mark.skipif(not posix, reason="no windows support yet") +def got_signal(proc, sig): + if posix: + return proc.returncode == -sig + else: + return proc.returncode != 0 async def test_basic(): - async with subprocess.Process(["true"]) as proc: + async with subprocess.Process(EXIT_TRUE) as proc: assert proc.returncode is None assert proc.returncode == 0 async def test_kill_when_context_cancelled(): with move_on_after(0) as scope: - async with subprocess.Process(["sleep", "10"]) as proc: + async with subprocess.Process(SLEEP(10)) as proc: assert proc.poll() is None # Process context entry is synchronous, so this is the # only checkpoint: await trio.sleep_forever() assert scope.cancelled_caught - assert proc.returncode == -signal.SIGKILL + assert got_signal(proc, SIGKILL) COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = [ @@ -75,16 +99,16 @@ async def check_output(stream, expected): async def test_run(): data = bytes(random.randint(0, 255) for _ in range(2**18)) - result = await subprocess.run(["cat"], input=data, stdout=subprocess.PIPE) - assert result.args == ["cat"] + result = await subprocess.run(CAT, input=data, stdout=subprocess.PIPE) + assert result.args == CAT assert result.returncode == 0 assert result.stdout == data assert result.stderr is None result = await subprocess.run( - ["cat"], stdin=subprocess.PIPE, stdout=subprocess.PIPE + CAT, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) - assert result.args == ["cat"] + assert result.args == CAT assert result.returncode == 0 assert result.stdout == b"" assert result.stderr is None @@ -103,13 +127,13 @@ async def test_run(): with pytest.raises(ValueError): # can't use both input and stdin await subprocess.run( - ["cat"], input=b"la di dah", stdin=subprocess.PIPE + CAT, input=b"la di dah", stdin=subprocess.PIPE ) with pytest.raises(ValueError): # can't use both timeout and deadline await subprocess.run( - ["true"], timeout=1, deadline=_core.current_time() + EXIT_TRUE, timeout=1, deadline=_core.current_time() ) @@ -144,14 +168,19 @@ async def test_run_timeout(): async def test_run_check(): + cmd = cmd_switch( + "echo test >&2; false", + "sys.stderr.buffer.write(b'test\\n'); sys.exit(1)", + ) + with pytest.raises(subprocess.CalledProcessError) as excinfo: await subprocess.run( - "echo test >&2; false", - shell=True, + cmd, + shell=posix, stderr=subprocess.PIPE, check=True, ) - assert excinfo.value.cmd == "echo test >&2; false" + assert excinfo.value.cmd == cmd assert excinfo.value.returncode == 1 assert excinfo.value.stderr == b"test\n" assert excinfo.value.stdout is None @@ -190,30 +219,31 @@ async def test_stderr_stdout(): # this one hits the branch where stderr=STDOUT but stdout # is not redirected - result = await subprocess.run(["cat"], input=b"", stderr=subprocess.STDOUT) + result = await subprocess.run(CAT, input=b"", stderr=subprocess.STDOUT) assert result.returncode == 0 assert result.stdout is None assert result.stderr is None - try: - r, w = os.pipe() - - async with subprocess.Process( - COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, - stdin=subprocess.PIPE, - stdout=w, - stderr=subprocess.STDOUT, - ) as proc: - os.close(w) - assert proc.stdout is None - assert proc.stderr is None - await proc.stdin.send_all(b"1234") - await proc.stdin.aclose() - assert await proc.wait() == 0 - assert os.read(r, 4096) == b"12344321" - assert os.read(r, 4096) == b"" - finally: - os.close(r) + if posix: + try: + r, w = os.pipe() + + async with subprocess.Process( + COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, + stdin=subprocess.PIPE, + stdout=w, + stderr=subprocess.STDOUT, + ) as proc: + os.close(w) + assert proc.stdout is None + assert proc.stderr is None + await proc.stdin.send_all(b"1234") + await proc.stdin.aclose() + assert await proc.wait() == 0 + assert os.read(r, 4096) == b"12344321" + assert os.read(r, 4096) == b"" + finally: + os.close(r) async def test_errors(): @@ -226,25 +256,28 @@ async def test_errors(): async def test_signals(): async def test_one_signal(send_it, signum): with move_on_after(1.0) as scope: - async with subprocess.Process(["sleep", "3600"]) as proc: + async with subprocess.Process(SLEEP(3600)) as proc: send_it(proc) assert not scope.cancelled_caught - assert proc.returncode == -signum + if posix: + assert proc.returncode == -signum + else: + assert proc.returncode != 0 - await test_one_signal(subprocess.Process.kill, signal.SIGKILL) - await test_one_signal(subprocess.Process.terminate, signal.SIGTERM) - await test_one_signal( - lambda proc: proc.send_signal(signal.SIGINT), signal.SIGINT - ) + await test_one_signal(subprocess.Process.kill, SIGKILL) + await test_one_signal(subprocess.Process.terminate, SIGTERM) + if posix: + await test_one_signal(lambda proc: proc.send_signal(SIGINT), SIGINT) +@pytest.mark.skipif(not posix, reason="POSIX specific") async def test_wait_reapable_fails(): old_sigchld = signal.signal(signal.SIGCHLD, signal.SIG_IGN) try: # With SIGCHLD disabled, the wait() syscall will wait for the # process to exit but then fail with ECHILD. Make sure we # support this case as the stdlib subprocess module does. - async with subprocess.Process(["sleep", "3600"]) as proc: + async with subprocess.Process(SLEEP(3600)) as proc: async with _core.open_nursery() as nursery: nursery.start_soon(proc.wait) await wait_all_tasks_blocked() From ab43b2b9f309b014575d0ac950fcc339520434f7 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 03:35:09 -0500 Subject: [PATCH 19/49] merge with new layout of hazmat --- trio/hazmat.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/trio/hazmat.py b/trio/hazmat.py index eaab2ea3dd..da63f62cd8 100644 --- a/trio/hazmat.py +++ b/trio/hazmat.py @@ -47,7 +47,8 @@ try: from ._core import ( current_iocp, register_with_iocp, wait_overlapped, - monitor_completion_key + monitor_completion_key, perform_overlapped, + readinto_overlapped, write_overlapped, ) except ImportError: pass From b21f308e02a0f7e9677225603ec1ffc0156400eb Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 03:38:29 -0500 Subject: [PATCH 20/49] yapfify --- trio/_core/_io_windows.py | 2 ++ trio/hazmat.py | 10 +++++++--- trio/tests/test_subprocess.py | 10 +++++----- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index f5424420e5..e162b328a9 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -424,6 +424,7 @@ def submit_write(lpOverlapped): lpOverlapped, ) ) + lpOverlapped = await self.perform_overlapped(handle, submit_write) # this is "number of bytes transferred" return lpOverlapped.InternalHigh @@ -448,5 +449,6 @@ def submit_read(lpOverlapped): lpOverlapped, ) ) + lpOverlapped = await self.perform_overlapped(handle, submit_read) return lpOverlapped.InternalHigh diff --git a/trio/hazmat.py b/trio/hazmat.py index da63f62cd8..014386e132 100644 --- a/trio/hazmat.py +++ b/trio/hazmat.py @@ -46,9 +46,13 @@ # Windows symbols try: from ._core import ( - current_iocp, register_with_iocp, wait_overlapped, - monitor_completion_key, perform_overlapped, - readinto_overlapped, write_overlapped, + current_iocp, + register_with_iocp, + wait_overlapped, + monitor_completion_key, + perform_overlapped, + readinto_overlapped, + write_overlapped, ) except ImportError: pass diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 1f1297227c..d6d2822079 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -21,13 +21,15 @@ def cmd_switch(posix_option, windows_pycode): sys.executable, "-c", "import sys; " + windows_pycode ] + EXIT_TRUE = cmd_switch(["true"], "sys.exit(0)") EXIT_FALSE = cmd_switch(["false"], "sys.exit(1)") CAT = cmd_switch(["cat"], "sys.stdout.buffer.write(sys.stdin.buffer.read())") + + def SLEEP(seconds): return cmd_switch( - ["sleep", str(seconds)], - "import time; time.sleep({})".format(seconds) + ["sleep", str(seconds)], "import time; time.sleep({})".format(seconds) ) @@ -126,9 +128,7 @@ async def test_run(): with pytest.raises(ValueError): # can't use both input and stdin - await subprocess.run( - CAT, input=b"la di dah", stdin=subprocess.PIPE - ) + await subprocess.run(CAT, input=b"la di dah", stdin=subprocess.PIPE) with pytest.raises(ValueError): # can't use both timeout and deadline From 94b8416de82db468c25beb5e66fc6dc826bcc58a Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 03:40:07 -0500 Subject: [PATCH 21/49] fix accidental change to make_clogged_pipe --- trio/tests/test_pipes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/trio/tests/test_pipes.py b/trio/tests/test_pipes.py index a86b48aa95..3b91d0e54c 100644 --- a/trio/tests/test_pipes.py +++ b/trio/tests/test_pipes.py @@ -146,6 +146,7 @@ async def make_clogged_pipe(): os.write(s.fileno(), b"x" * buf_size * 2) except BlockingIOError: pass + return s, r async def test_pipe_fully(): From e35f11c12346692ec15af6ed1ca81819de8881ff Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 03:47:03 -0500 Subject: [PATCH 22/49] add missing file --- trio/_windows_pipes.py | 123 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 trio/_windows_pipes.py diff --git a/trio/_windows_pipes.py b/trio/_windows_pipes.py new file mode 100644 index 0000000000..6d357ee5d4 --- /dev/null +++ b/trio/_windows_pipes.py @@ -0,0 +1,123 @@ +import os +from typing import Tuple + +from . import _core +from ._abc import SendStream, ReceiveStream +from ._util import ConflictDetector +from ._core._windows_cffi import _handle, raise_winerror, kernel32, ffi + +__all__ = ["PipeSendStream", "PipeReceiveStream", "make_pipe"] + + +class _PipeMixin: + def __init__(self, pipe_handle: int) -> None: + self._pipe = _handle(pipe_handle) + _core.register_with_iocp(self._pipe) + self._closed = False + self._conflict_detector = ConflictDetector( + "another task is currently {}ing data on this pipe".format( + type(self).__name__[4:-5].lower() # "send" or "receive" + ) + ) + + def _close(self): + if self._closed: + return + + self._closed = True + if not kernel32.CloseHandle(self._pipe): + raise_winerror() + + async def aclose(self): + self._close() + await _core.checkpoint() + + def __del__(self): + self._close() + + +class PipeSendStream(_PipeMixin, SendStream): + """Represents a send stream over a Windows named pipe that has been + opened in OVERLAPPED mode. + """ + + async def send_all(self, data: bytes): + # adapted from the SocketStream code + async with self._conflict_detector: + if self._closed: + raise _core.ClosedResourceError("this pipe is already closed") + + if not data: + return + + # adapted from the SocketStream code + length = len(data) + with memoryview(data) as view: + total_sent = 0 + while total_sent < length: + # Only send 64K at a time. IOCP buffers can sometimes + # get pinned in kernel memory and we don't want to use + # an arbitrary amount. + with view[total_sent:total_sent + 65536] as chunk: + try: + total_sent += await _core.write_overlapped( + self._pipe, chunk + ) + except BrokenPipeError as ex: + # BrokenPipeError seems to be used to deliver + # "your side of the pipe got closed by someone + # else" too + if self._closed: + raise _core.ClosedResourceError( + "another task closed this pipe" + ) from None + else: + raise _core.BrokenResourceError from None + + async def wait_send_all_might_not_block(self) -> None: + async with self._conflict_detector: + if self._closed: + raise _core.ClosedResourceError("This pipe is already closed") + + # not implemented yet, and probably not needed + pass + + +class PipeReceiveStream(_PipeMixin, ReceiveStream): + """Represents a receive stream over an os.pipe object.""" + + async def receive_some(self, max_bytes: int) -> bytes: + async with self._conflict_detector: + if self._closed: + raise _core.ClosedResourceError("this pipe is already closed") + + if not isinstance(max_bytes, int): + raise TypeError("max_bytes must be integer >= 1") + + if max_bytes < 1: + raise ValueError("max_bytes must be integer >= 1") + + buffer = bytearray(max_bytes) + try: + size = await _core.readinto_overlapped(self._pipe, buffer) + except BrokenPipeError: + if self._closed: + raise _core.ClosedResourceError( + "another task closed this pipe" + ) from None + + # Windows raises BrokenPipeError on one end of a pipe + # whenever the other end closes, regardless of direction. + # Convert this to the Unix behavior of returning EOF to the + # reader when the writer closes. + return b"" + else: + del buffer[size:] + return buffer + + +async def make_pipe() -> Tuple[PipeSendStream, PipeReceiveStream]: + """Makes a new pair of pipes.""" + from asyncio.windows_utils import pipe + (r, w) = pipe() + return PipeSendStream(w), PipeReceiveStream(r) From fc6e60afcd0d26a0f9172024ab71a164a8dedf3c Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 15:01:24 -0500 Subject: [PATCH 23/49] fix unused variable warning --- trio/_windows_pipes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trio/_windows_pipes.py b/trio/_windows_pipes.py index 6d357ee5d4..4d5cadfe79 100644 --- a/trio/_windows_pipes.py +++ b/trio/_windows_pipes.py @@ -63,7 +63,7 @@ async def send_all(self, data: bytes): total_sent += await _core.write_overlapped( self._pipe, chunk ) - except BrokenPipeError as ex: + except BrokenPipeError: # BrokenPipeError seems to be used to deliver # "your side of the pipe got closed by someone # else" too From 869e42bcc5758c8553760279760f8a75682b90db Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 17:20:39 -0500 Subject: [PATCH 24/49] attempt to fix remaining coverage issues --- .travis.yml | 3 ++ ci/travis.sh | 4 +-- trio/_core/_io_windows.py | 16 +++++++--- trio/_core/_windows_cffi.py | 11 +++++++ trio/_core/tests/test_windows.py | 42 +++++++++++++++++++++++++ trio/_subprocess_platform/__init__.py | 15 ++++++--- trio/_subprocess_platform/kqueue.py | 3 -- trio/_subprocess_platform/waitid.py | 3 -- trio/_subprocess_platform/windows.py | 2 -- trio/_windows_pipes.py | 10 +----- trio/tests/test_pipes.py | 34 +++++++++++++++----- trio/tests/test_subprocess.py | 45 +++++++++++++++++++++++++-- 12 files changed, 150 insertions(+), 38 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1a193f6e18..263928320b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,6 +34,9 @@ matrix: - os: osx language: generic env: MACPYTHON=3.7.1 + - os: osx + language: generic + env: PYPY_NIGHTLY_BRANCH=py3.6 script: - ci/travis.sh diff --git a/ci/travis.sh b/ci/travis.sh index e125407d21..0886f200f7 100755 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -4,7 +4,7 @@ set -ex git rev-parse HEAD -if [ "$TRAVIS_OS_NAME" = "osx" ]; then +if [ "$TRAVIS_OS_NAME" = "osx" -a "$PYPY_NIGHTLY_BRANCH" = "" ]; then curl -Lo macpython.pkg https://www.python.org/ftp/python/${MACPYTHON}/python-${MACPYTHON}-macosx10.6.pkg sudo installer -pkg macpython.pkg -target / ls /Library/Frameworks/Python.framework/Versions/*/bin/ @@ -17,7 +17,7 @@ if [ "$TRAVIS_OS_NAME" = "osx" ]; then fi if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then - curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-linux64.tar.bz2 + curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-${TRAVIS_OS_NAME}64.tar.bz2 if [ ! -s pypy.tar.bz2 ]; then # We know: # - curl succeeded (200 response code; -f means "exit with error if diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index e162b328a9..a20a337a1a 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -292,7 +292,8 @@ def current_iocp(self): def register_with_iocp(self, handle): handle = _handle(handle) # https://msdn.microsoft.com/en-us/library/windows/desktop/aa363862(v=vs.85).aspx - # INVALID_PARAMETER seems to mean "already registered" + # INVALID_PARAMETER seems to be used for both "can't register + # because not opened in OVERLAPPED mode" and "already registered" _check(kernel32.CreateIoCompletionPort(handle, self._iocp, 0, 0)) @_public @@ -317,7 +318,7 @@ def abort(raise_cancel_): raise_cancel = raise_cancel_ try: _check(kernel32.CancelIoEx(handle, lpOverlapped)) - except OSError as exc: + except OSError as exc: # pragma: no cover if exc.winerror == ErrorCodes.ERROR_NOT_FOUND: # Too late to cancel. Presumably the completion will # be coming in soon. (Unfortunately, if you make an @@ -338,8 +339,15 @@ def abort(raise_cancel_): # it will produce the right sorts of exceptions code = ntdll.RtlNtStatusToDosError(lpOverlapped.Internal) if code == ErrorCodes.ERROR_OPERATION_ABORTED: - assert raise_cancel is not None - raise_cancel() + if raise_cancel is not None: + raise_cancel() + else: + # We didn't request this cancellation, so assume + # it happened due to the underlying handle being + # closed before the operation could complete. + raise _core.ClosedResourceError( + "another task closed this resource" + ) else: raise_winerror(code) diff --git a/trio/_core/_windows_cffi.py b/trio/_core/_windows_cffi.py index 38e9c79e30..5c59c2cafe 100644 --- a/trio/_core/_windows_cffi.py +++ b/trio/_core/_windows_cffi.py @@ -16,6 +16,7 @@ typedef ULONG *PULONG; typedef const void *LPCVOID; typedef void *LPVOID; +typedef const wchar_t *LPCWSTR; typedef uintptr_t ULONG_PTR; typedef uintptr_t UINT_PTR; @@ -56,6 +57,16 @@ _In_ DWORD NumberOfConcurrentThreads ); +HANDLE CreateFileW( + LPCWSTR lpFileName, + DWORD dwDesiredAccess, + DWORD dwShareMode, + LPSECURITY_ATTRIBUTES lpSecurityAttributes, + DWORD dwCreationDisposition, + DWORD dwFlagsAndAttributes, + HANDLE hTemplateFile +); + BOOL WINAPI CloseHandle( _In_ HANDLE hObject ); diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 18436c7704..04fd3b5226 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -1,4 +1,5 @@ import os +import tempfile import pytest @@ -45,5 +46,46 @@ async def post(key): print("end loop") +async def test_readinto_overlapped(): + data = b"1" * 1024 + b"2" * 1024 + b"3" * 1024 + b"4" * 1024 + buffer = bytearray(len(data)) + + with tempfile.TemporaryDirectory() as tdir: + tfile = os.path.join(tdir, "numbers.txt") + with open(tfile, "wb") as fp: + fp.write(data) + fp.flush() + + rawname = tfile.encode("utf-16le") + handle = kernel32.CreateFileW( + ffi.cast("LPCWSTR", ffi.from_buffer(rawname)), + 0x80000000, # GENERIC_READ + 0, # no sharing + ffi.NULL, # no security attributes + 3, # OPEN_EXISTING + 0x40000000, # FILE_FLAG_OVERLAPPED + ffi.NULL, # no template file + ) + _core.register_with_iocp(handle) + + async def read_region(start, end): + await _core.readinto_overlapped( + handle, + memoryview(buffer)[start:end], start + ) + + try: + async with _core.open_nursery() as nursery: + for start in range(0, 4096, 512): + nursery.start_soon(read_region, start, start + 512) + + assert buffer == data + + with pytest.raises(TypeError): + await _core.readinto_overlapped(handle, b"immutable") + finally: + kernel32.CloseHandle(handle) + + # XX test setting the iomanager._iocp to something weird to make sure that the # IOCP thread can send exceptions back to the main thread diff --git a/trio/_subprocess_platform/__init__.py b/trio/_subprocess_platform/__init__.py index 174048bb16..2fbeb96870 100644 --- a/trio/_subprocess_platform/__init__.py +++ b/trio/_subprocess_platform/__init__.py @@ -14,13 +14,16 @@ async def wait_child_exiting(process: subprocess.Popen) -> None: """Block until the child process managed by ``process`` is exiting. + It is invalid to call this function if the process has already + been waited on; that is, ``process.returncode`` must be None. + When this function returns, it indicates that a call to :meth:`subprocess.Popen.wait` will immediately be able to return the process's exit status. The actual exit status is not consumed by this call, since :class:`~subprocess.Popen` wants - to be able to do that. + to be able to do that itself. """ - raise NotImplementedError from wait_child_exiting._error + raise NotImplementedError from wait_child_exiting._error # pragma: no cover async def create_pipe_to_child_stdin() -> Tuple[SendStream, int]: @@ -33,6 +36,7 @@ async def create_pipe_to_child_stdin() -> Tuple[SendStream, int]: something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`. """ + # pragma: no cover raise NotImplementedError from create_pipe_to_child_stdin._error @@ -47,6 +51,7 @@ async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`. """ + # pragma: no cover raise NotImplementedError from create_pipe_to_child_stdin._error @@ -57,7 +62,7 @@ async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: from .kqueue import wait_child_exiting # noqa: F811 else: from .waitid import wait_child_exiting # noqa: F811 -except ImportError as ex: +except ImportError as ex: # pragma: no cover wait_child_exiting._error = ex try: @@ -94,9 +99,9 @@ def create_pipe_from_child_output(): # noqa: F811 rh, wh = windows_pipe(overlapped=(True, False)) return PipeReceiveStream(rh), msvcrt.open_osfhandle(wh, 0) - else: + else: # pragma: no cover raise ImportError("pipes not implemented on this platform") -except ImportError as ex: +except ImportError as ex: # pragma: no cover create_pipe_to_child_stdin._error = ex create_pipe_from_child_output._error = ex diff --git a/trio/_subprocess_platform/kqueue.py b/trio/_subprocess_platform/kqueue.py index ffd9e76d28..95625ec285 100644 --- a/trio/_subprocess_platform/kqueue.py +++ b/trio/_subprocess_platform/kqueue.py @@ -4,9 +4,6 @@ async def wait_child_exiting(process: subprocess.Popen) -> None: - if process.returncode is not None: - return - kqueue = _core.current_kqueue() try: from select import KQ_NOTE_EXIT diff --git a/trio/_subprocess_platform/waitid.py b/trio/_subprocess_platform/waitid.py index 068577db45..615568a065 100644 --- a/trio/_subprocess_platform/waitid.py +++ b/trio/_subprocess_platform/waitid.py @@ -103,9 +103,6 @@ async def wait_child_exiting(process: subprocess.Popen) -> None: # create an arbitrary number of threads waiting on the same # process. - if process.returncode is not None: - return - ATTR = "@trio_wait_event" # an unlikely attribute name, to be sure try: event = getattr(process, ATTR) diff --git a/trio/_subprocess_platform/windows.py b/trio/_subprocess_platform/windows.py index 6b6118a44c..b9b4e85d2b 100644 --- a/trio/_subprocess_platform/windows.py +++ b/trio/_subprocess_platform/windows.py @@ -3,6 +3,4 @@ async def wait_child_exiting(process: subprocess.Popen) -> None: - if process.returncode is not None: - return await WaitForSingleObject(int(process._handle)) diff --git a/trio/_windows_pipes.py b/trio/_windows_pipes.py index 4d5cadfe79..3c32fc9c20 100644 --- a/trio/_windows_pipes.py +++ b/trio/_windows_pipes.py @@ -64,15 +64,7 @@ async def send_all(self, data: bytes): self._pipe, chunk ) except BrokenPipeError: - # BrokenPipeError seems to be used to deliver - # "your side of the pipe got closed by someone - # else" too - if self._closed: - raise _core.ClosedResourceError( - "another task closed this pipe" - ) from None - else: - raise _core.BrokenResourceError from None + raise _core.BrokenResourceError from None async def wait_send_all_might_not_block(self) -> None: async with self._conflict_detector: diff --git a/trio/tests/test_pipes.py b/trio/tests/test_pipes.py index 3b91d0e54c..cbd6596447 100644 --- a/trio/tests/test_pipes.py +++ b/trio/tests/test_pipes.py @@ -8,22 +8,18 @@ from .. import _core, move_on_after from ..testing import wait_all_tasks_blocked, check_one_way_stream -posix = False -if os.name == "posix": +posix = os.name == "posix" +if posix: from .._unix_pipes import PipeSendStream, PipeReceiveStream, make_pipe - posix = True -elif os.name == "nt": - from .._windows_pipes import PipeSendStream, PipeReceiveStream, make_pipe else: - pytestmark = pytest.mark.skip("pipes not supported on this OS") + from .._windows_pipes import PipeSendStream, PipeReceiveStream, make_pipe @pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_send_pipe(): r, w = os.pipe() send = PipeSendStream(w) - if posix: - assert send.fileno() == w + assert send.fileno() == w await send.send_all(b"123") assert (os.read(r, 8)) == b"123" @@ -122,6 +118,28 @@ async def test_async_with(): os.close(r.fileno()) assert excinfo.value.errno == errno.EBADF + else: + # test failue-to-close on Windows + w._closed = False + with pytest.raises(OSError) as excinfo: + await w.aclose() + + +async def test_close_during_write(): + w, r = await make_pipe() + async with _core.open_nursery() as nursery: + + async def write_forever(): + with pytest.raises(_core.ClosedResourceError) as excinfo: + while True: + await w.send_all(b"x" * 4096) + assert excinfo.value + assert "another task" in str(excinfo) + + nursery.start_soon(write_forever) + await wait_all_tasks_blocked(0.1) + await w.aclose() + async def make_clogged_pipe(): s, r = await make_pipe() diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index d6d2822079..b6e69a51dc 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -5,7 +5,7 @@ import sys import pytest -from .. import _core, move_on_after, sleep_forever, subprocess +from .. import _core, move_on_after, sleep, sleep_forever, subprocess from .._core.tests.tutil import slow from ..testing import wait_all_tasks_blocked @@ -52,7 +52,7 @@ async def test_kill_when_context_cancelled(): assert proc.poll() is None # Process context entry is synchronous, so this is the # only checkpoint: - await trio.sleep_forever() + await sleep_forever() assert scope.cancelled_caught assert got_signal(proc, SIGKILL) @@ -287,3 +287,44 @@ async def test_wait_reapable_fails(): assert proc.returncode == 0 # exit status unknowable, so... finally: signal.signal(signal.SIGCHLD, old_sigchld) + + +@pytest.mark.skipif(not posix, reason="POSIX specific") +@slow +async def test_wait_reapable_eintr(): + # This only matters on PyPy (where we're coding EINTR handling + # ourselves) but the test works on all waitid platforms. It does + # depend on having a second thread to interrupt a blocking call in, + # so we skip it elsewhere. + + got_alarm = False + + def on_alarm(sig, frame): + nonlocal got_alarm + got_alarm = True + + # make sure SIGALRM gets delivered to the waitid thread, not ours + old_sigalrm = signal.signal(signal.SIGALRM, on_alarm) + old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM]) + try: + async with subprocess.Process(SLEEP(3600)) as proc: + async with _core.open_nursery() as nursery: + nursery.start_soon(proc.wait) + await wait_all_tasks_blocked() + + # waitid platforms only + if hasattr(proc._proc, "@trio_wait_event"): + # thread has been created at this point, so we can + # mask the signal for us without affecting them + signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM]) + signal.alarm(1) + await sleep(1.5) + assert got_alarm + + proc.kill() + nursery.cancel_scope.deadline = _core.current_time() + 1.0 + assert not nursery.cancel_scope.cancelled_caught + assert proc.returncode == -signal.SIGKILL + finally: + signal.pthread_sigmask(signal.SIG_SETMASK, old_mask) + signal.signal(signal.SIGALRM, old_sigalrm) From 054951dafff0ee1e6eb8d789ce1c75244126502b Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 17:58:32 -0500 Subject: [PATCH 25/49] more small coverage tweaks --- .travis.yml | 6 +++--- ci/travis.sh | 2 +- trio/_subprocess_platform/__init__.py | 10 ++++++---- trio/tests/test_pipes.py | 1 - 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 263928320b..53738b4c9b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,9 @@ matrix: env: PYPY_NIGHTLY_BRANCH=py3.5 - language: generic env: PYPY_NIGHTLY_BRANCH=py3.6 + - os: osx + language: generic + env: PYPY_NIGHTLY_BRANCH=py3.6 MAC=1 # 3.5.0 and 3.5.1 have different __aiter__ semantics than all # other versions, so we need to test them specially. Travis's # xenial dist only provides 3.5.2+. @@ -34,9 +37,6 @@ matrix: - os: osx language: generic env: MACPYTHON=3.7.1 - - os: osx - language: generic - env: PYPY_NIGHTLY_BRANCH=py3.6 script: - ci/travis.sh diff --git a/ci/travis.sh b/ci/travis.sh index 0886f200f7..5dac329b10 100755 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -30,7 +30,7 @@ if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then echo "Skipping testing against the nightly build for right now." exit 0 fi - tar xaf pypy.tar.bz2 + tar xjf pypy.tar.bz2 # something like "pypy-c-jit-89963-748aa3022295-linux64" PYPY_DIR=$(echo pypy-c-jit-*) PYTHON_EXE=$PYPY_DIR/bin/pypy3 diff --git a/trio/_subprocess_platform/__init__.py b/trio/_subprocess_platform/__init__.py index 2fbeb96870..db8536d9cc 100644 --- a/trio/_subprocess_platform/__init__.py +++ b/trio/_subprocess_platform/__init__.py @@ -36,8 +36,9 @@ async def create_pipe_to_child_stdin() -> Tuple[SendStream, int]: something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`. """ - # pragma: no cover - raise NotImplementedError from create_pipe_to_child_stdin._error + raise NotImplementedError from ( # pragma: no cover + create_pipe_to_child_stdin._error + ) async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: @@ -51,8 +52,9 @@ async def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`. """ - # pragma: no cover - raise NotImplementedError from create_pipe_to_child_stdin._error + raise NotImplementedError from ( # pragma: no cover + create_pipe_to_child_stdin._error + ) try: diff --git a/trio/tests/test_pipes.py b/trio/tests/test_pipes.py index cbd6596447..669fdd7f12 100644 --- a/trio/tests/test_pipes.py +++ b/trio/tests/test_pipes.py @@ -133,7 +133,6 @@ async def write_forever(): with pytest.raises(_core.ClosedResourceError) as excinfo: while True: await w.send_all(b"x" * 4096) - assert excinfo.value assert "another task" in str(excinfo) nursery.start_soon(write_forever) From a79eca7e3a69f3d6ea01d7f85366b1d9c5d8ac71 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 18:11:40 -0500 Subject: [PATCH 26/49] see if this will fix pypy+mac build --- ci/travis.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/travis.sh b/ci/travis.sh index 5dac329b10..31793b9f41 100755 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -17,6 +17,9 @@ if [ "$TRAVIS_OS_NAME" = "osx" -a "$PYPY_NIGHTLY_BRANCH" = "" ]; then fi if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then + if [ "$TRAVIS_OS_NAME" = "osx" ]; then + sudo brew install openssl + fi curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-${TRAVIS_OS_NAME}64.tar.bz2 if [ ! -s pypy.tar.bz2 ]; then # We know: From 09c0f43b559c39eefb248900c1ce058a7c136d49 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 15 Dec 2018 18:25:34 -0500 Subject: [PATCH 27/49] try again --- ci/travis.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/travis.sh b/ci/travis.sh index 31793b9f41..7dacc75d11 100755 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -18,7 +18,7 @@ fi if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then if [ "$TRAVIS_OS_NAME" = "osx" ]; then - sudo brew install openssl + brew install openssl fi curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-${TRAVIS_OS_NAME}64.tar.bz2 if [ ! -s pypy.tar.bz2 ]; then From b3b2bb0eac74fc367ef551cb48bfa19717cf12d9 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sun, 16 Dec 2018 00:35:49 -0500 Subject: [PATCH 28/49] Stop trying to run a PyPy on Travis OS X -- couldn't get the installation of dependencies to find openssl --- .travis.yml | 3 --- ci/travis.sh | 9 +++------ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 53738b4c9b..1a193f6e18 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,9 +13,6 @@ matrix: env: PYPY_NIGHTLY_BRANCH=py3.5 - language: generic env: PYPY_NIGHTLY_BRANCH=py3.6 - - os: osx - language: generic - env: PYPY_NIGHTLY_BRANCH=py3.6 MAC=1 # 3.5.0 and 3.5.1 have different __aiter__ semantics than all # other versions, so we need to test them specially. Travis's # xenial dist only provides 3.5.2+. diff --git a/ci/travis.sh b/ci/travis.sh index 7dacc75d11..e125407d21 100755 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -4,7 +4,7 @@ set -ex git rev-parse HEAD -if [ "$TRAVIS_OS_NAME" = "osx" -a "$PYPY_NIGHTLY_BRANCH" = "" ]; then +if [ "$TRAVIS_OS_NAME" = "osx" ]; then curl -Lo macpython.pkg https://www.python.org/ftp/python/${MACPYTHON}/python-${MACPYTHON}-macosx10.6.pkg sudo installer -pkg macpython.pkg -target / ls /Library/Frameworks/Python.framework/Versions/*/bin/ @@ -17,10 +17,7 @@ if [ "$TRAVIS_OS_NAME" = "osx" -a "$PYPY_NIGHTLY_BRANCH" = "" ]; then fi if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then - if [ "$TRAVIS_OS_NAME" = "osx" ]; then - brew install openssl - fi - curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-${TRAVIS_OS_NAME}64.tar.bz2 + curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-linux64.tar.bz2 if [ ! -s pypy.tar.bz2 ]; then # We know: # - curl succeeded (200 response code; -f means "exit with error if @@ -33,7 +30,7 @@ if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then echo "Skipping testing against the nightly build for right now." exit 0 fi - tar xjf pypy.tar.bz2 + tar xaf pypy.tar.bz2 # something like "pypy-c-jit-89963-748aa3022295-linux64" PYPY_DIR=$(echo pypy-c-jit-*) PYTHON_EXE=$PYPY_DIR/bin/pypy3 From 6380aea7c0f9b13f62c4dea9ce04555329324dc5 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sun, 16 Dec 2018 00:38:24 -0500 Subject: [PATCH 29/49] hopefully fix windows flapping; no-cover the line of pypy+OSX only code --- trio/_core/tests/test_windows.py | 7 ++++++- trio/_subprocess_platform/kqueue.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 04fd3b5226..1c6a1d6a0e 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -9,7 +9,9 @@ from ... import _core if on_windows: - from .._windows_cffi import ffi, kernel32 + from .._windows_cffi import ( + ffi, kernel32, INVALID_HANDLE_VALUE, raise_winerror + ) # The undocumented API that this is testing should be changed to stop using @@ -56,6 +58,7 @@ async def test_readinto_overlapped(): fp.write(data) fp.flush() + await trio.sleep(0.5) rawname = tfile.encode("utf-16le") handle = kernel32.CreateFileW( ffi.cast("LPCWSTR", ffi.from_buffer(rawname)), @@ -66,6 +69,8 @@ async def test_readinto_overlapped(): 0x40000000, # FILE_FLAG_OVERLAPPED ffi.NULL, # no template file ) + if handle == INVALID_HANDLE_VALUE: # pragma: no cover + raise_winerror() _core.register_with_iocp(handle) async def read_region(start, end): diff --git a/trio/_subprocess_platform/kqueue.py b/trio/_subprocess_platform/kqueue.py index 95625ec285..76b339024d 100644 --- a/trio/_subprocess_platform/kqueue.py +++ b/trio/_subprocess_platform/kqueue.py @@ -7,7 +7,7 @@ async def wait_child_exiting(process: subprocess.Popen) -> None: kqueue = _core.current_kqueue() try: from select import KQ_NOTE_EXIT - except ImportError: + except ImportError: # pragma: no cover # pypy doesn't define KQ_NOTE_EXIT: # https://bitbucket.org/pypy/pypy/issues/2921/ # I verified this value against both Darwin and FreeBSD From 0eb001cb5565e4c171e55e6076d38aa7d6f20aee Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sun, 16 Dec 2018 01:04:20 -0500 Subject: [PATCH 30/49] fix careless error in Windows test --- trio/_core/tests/test_windows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 1c6a1d6a0e..7039455463 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -7,7 +7,7 @@ # Mark all the tests in this file as being windows-only pytestmark = pytest.mark.skipif(not on_windows, reason="windows only") -from ... import _core +from ... import _core, sleep if on_windows: from .._windows_cffi import ( ffi, kernel32, INVALID_HANDLE_VALUE, raise_winerror @@ -58,7 +58,7 @@ async def test_readinto_overlapped(): fp.write(data) fp.flush() - await trio.sleep(0.5) + await sleep(0.5) rawname = tfile.encode("utf-16le") handle = kernel32.CreateFileW( ffi.cast("LPCWSTR", ffi.from_buffer(rawname)), From ad0bc6705584b35fd5b94e04067faa67e91a38f5 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 04:22:01 -0800 Subject: [PATCH 31/49] updates after code review - make overlapped I/O operations on a non-registered handle still be cancellable - test too-late-to-cancel for overlapped I/O - privatize `WindowsIOManager._perform_overlapped` - more careful memory management of FFI buffers in overlapped I/O - export some CreateFile flag constants in windows_cffi - fix inconsistent readinto_overlapped test (probably was a missing trailing NUL) - remove unnecessary EINVAL test when writing to subprocess - chain BrokenResourceError with the underlying OS exception - reexport Windows-only subprocess module constants - use Python snippets as our test subprocesses on all platforms - add a subprocess test that does some back-and-forth interaction - split test_pipes into unix and windows versions --- trio/_core/_io_windows.py | 162 +++++++++++++++--- trio/_core/_windows_cffi.py | 13 ++ trio/_core/tests/test_windows.py | 111 +++++++++--- trio/_subprocess.py | 6 - trio/_windows_pipes.py | 4 +- trio/hazmat.py | 1 - trio/subprocess.py | 26 ++- trio/tests/test_subprocess.py | 128 ++++++++++---- .../{test_pipes.py => test_unix_pipes.py} | 35 ++-- trio/tests/test_windows_pipes.py | 88 ++++++++++ 10 files changed, 457 insertions(+), 117 deletions(-) rename trio/tests/{test_pipes.py => test_unix_pipes.py} (77%) create mode 100644 trio/tests/test_windows_pipes.py diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index a20a337a1a..87acd463d6 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -65,7 +65,12 @@ # - when binding handles to the IOCP, we always set the completion key to 0. # when dispatching received events, when the completion key is 0 we dispatch # based on lpOverlapped -# - thread-safe wakeup uses completion key 1 +# - when we try to cancel an I/O operation and the cancellation fails, +# we post a completion with completion key 1; if this arrives before the +# real completion (with completion key 0) we assume the user forgot to +# call register_with_iocp on their handle, and raise an error accordingly +# (without this logic we'd hang forever uninterruptibly waiting for the +# completion that never arrives) # - other completion keys are available for user use # handles: @@ -127,9 +132,13 @@ def __init__(self): self._iocp_queue = deque() self._iocp_thread = None self._overlapped_waiters = {} + self._posted_too_late_to_cancel = set() self._completion_key_queues = {} - # Completion key 0 is reserved for regular IO events - self._completion_key_counter = itertools.count(1) + # Completion key 0 is reserved for regular IO events. + # Completion key 1 is used by the fallback post from a regular + # IO event's abort_fn to catch the user forgetting to call + # register_wiht_iocp. + self._completion_key_counter = itertools.count(2) # {stdlib socket object: task} # except that wakeup socket is mapped to None @@ -239,6 +248,39 @@ def do_select(): # Regular I/O event, dispatch on lpOverlapped waiter = self._overlapped_waiters.pop(entry.lpOverlapped) _core.reschedule(waiter) + elif entry.lpCompletionKey == 1: + # Post made by a regular I/O event's abort_fn + # after it failed to cancel the I/O. If we still + # have a waiter with this lpOverlapped, we didn't + # get the regular I/O completion and almost + # certainly the user forgot to call + # register_with_iocp. + self._posted_too_late_to_cancel.remove(entry.lpOverlapped) + try: + waiter = self._overlapped_waiters.pop( + entry.lpOverlapped + ) + except KeyError: + # Looks like the actual completion got here + # before this fallback post did -- we're in + # the "expected" case of too-late-to-cancel, + # where the user did nothing wrong and the + # main thread just got backlogged relative to + # the IOCP thread somehow. Nothing more to do. + pass + else: + _core.reschedule( + waiter, + outcome.Error( + _core.TrioInternalError( + "Failed to cancel overlapped I/O and " + "didn't receive the completion either. Did " + "you forget to call register_with_iocp()? " + "The operation may or may not have " + "succeeded; we can't tell." + ) + ) + ) else: # dispatch on lpCompletionKey queue = self._completion_key_queues[entry.lpCompletionKey] @@ -318,15 +360,35 @@ def abort(raise_cancel_): raise_cancel = raise_cancel_ try: _check(kernel32.CancelIoEx(handle, lpOverlapped)) - except OSError as exc: # pragma: no cover + except OSError as exc: if exc.winerror == ErrorCodes.ERROR_NOT_FOUND: - # Too late to cancel. Presumably the completion will - # be coming in soon. (Unfortunately, if you make an - # overlapped request through this IOManager on a - # handle not registered with our IOCP, you wind up - # hanging here when you try to cancel it.) - pass - else: + # Too late to cancel. If this happens because the + # operation is already completed, we don't need to + # do anything; presumably the IOCP thread will be + # reporting back about that completion soon. But + # another possibility is that the operation was + # performed on a handle that wasn't registered + # with our IOCP (ie, the user forgot to call + # register_with_iocp), in which case we're just + # never going to see the completion. To avoid an + # uncancellable infinite sleep in the latter case, + # we'll PostQueuedCompletionStatus here, and if + # our post arrives before the original completion + # does, we'll assume the handle wasn't registered. + _check( + kernel32.PostQueuedCompletionStatus( + self._iocp, 0, 1, lpOverlapped + ) + ) + + # Keep the lpOverlapped referenced so its address + # doesn't get reused until our posted completion + # status has been processed. Otherwise, we can + # get confused about which completion goes with + # which I/O. + self._posted_too_late_to_cancel.add(lpOverlapped) + + else: # pragma: no cover raise TrioInternalError( "CancelIoEx failed with unexpected error" ) from exc @@ -399,8 +461,7 @@ def notify_socket_close(self, sock): ) _core.reschedule(task, outcome.Error(exc)) - @_public - async def perform_overlapped(self, handle, submit_fn): + async def _perform_overlapped(self, handle, submit_fn): # submit_fn(lpOverlapped) submits some I/O # it may raise an OSError with ERROR_IO_PENDING # the handle must already be registered using @@ -418,6 +479,12 @@ async def perform_overlapped(self, handle, submit_fn): @_public async def write_overlapped(self, handle, data, file_offset=0): + # Make sure we keep our buffer referenced until the I/O completes. + # For typical types of `data` (bytes, bytearray) the memory we + # pass is part of the existing allocation, but the buffer protocol + # allows for other possibilities. + cbuf_register = [ffi.from_buffer(data)] + def submit_write(lpOverlapped): # yes, these are the real documented names offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME @@ -426,23 +493,62 @@ def submit_write(lpOverlapped): _check( kernel32.WriteFile( _handle(handle), - ffi.cast("LPCVOID", ffi.from_buffer(data)), - len(data), + ffi.cast("LPCVOID", cbuf_register[0]), + len(cbuf_register[0]), ffi.NULL, lpOverlapped, ) ) - lpOverlapped = await self.perform_overlapped(handle, submit_write) - # this is "number of bytes transferred" - return lpOverlapped.InternalHigh + try: + lpOverlapped = await self._perform_overlapped(handle, submit_write) + # this is "number of bytes transferred" + return lpOverlapped.InternalHigh + finally: + # There's a trap here. Let's say the incoming `data` is a + # memoryview, and our caller is bounding its lifetime with + # a context manager, maybe because they want to break a + # larger buffer into multiple writes without copying: + # + # with memoryview(big_buffer) as view: + # total_sent = 0 + # while total_sent < len(view): + # total_sent += await write_overlapped( + # handle, big_buffer[total_sent : total_sent + 8192] + # ) + # + # Our FFI buffer object holds a reference to the memoryview's + # buffer, and the memoryview knows about this. If the + # memoryview context is exited (equivalent to calling + # memoryview.release()) while that reference is still held, + # we get a BufferError. + # + # Unfortunately, there seems to be no way to drop that + # reference without destroying the FFI buffer object. + # We can't even call __del__, because there's no __del__ + # exposed. On CPython, when we return normally, the frame + # and its locals are destroyed, but when we throw an + # exception, they remain referenced by the traceback. + # So, we need to drop the reference to the FFI buffer + # explicitly when unwinding. + # + # This probably doesn't help on PyPy, but we don't really + # support PyPy on Windows anyway, so... + # + del cbuf_register[:] @_public async def readinto_overlapped(self, handle, buffer, file_offset=0): - if memoryview(buffer).readonly: - raise TypeError( - "you must pass a mutable buffer such as a bytearray, not bytes" - ) + # This will throw a reasonable error if `buffer` is read-only + # or doesn't support the buffer protocol, and perform no + # operation otherwise. A future release of CFFI will support + # ffi.from_buffer(foo, require_writable=True) to do the same + # thing less circumlocutiously. + ffi.memmove(buffer, b"", 0) + + # As in write_overlapped, we want to ensure the buffer stays + # alive for the duration of the I/O. + cbuf_register = [ffi.from_buffer(buffer)] def submit_read(lpOverlapped): offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME @@ -451,12 +557,16 @@ def submit_read(lpOverlapped): _check( kernel32.ReadFile( _handle(handle), - ffi.cast("LPVOID", ffi.from_buffer(buffer)), - len(buffer), + ffi.cast("LPVOID", cbuf_register[0]), + len(cbuf_register[0]), ffi.NULL, lpOverlapped, ) ) - lpOverlapped = await self.perform_overlapped(handle, submit_read) - return lpOverlapped.InternalHigh + try: + lpOverlapped = await self._perform_overlapped(handle, submit_read) + return lpOverlapped.InternalHigh + finally: + # See discussion in write_overlapped() + del cbuf_register[:] diff --git a/trio/_core/_windows_cffi.py b/trio/_core/_windows_cffi.py index 5c59c2cafe..f05e871ce3 100644 --- a/trio/_core/_windows_cffi.py +++ b/trio/_core/_windows_cffi.py @@ -204,3 +204,16 @@ class ErrorCodes(enum.IntEnum): ERROR_INVALID_HANDLE = 6 ERROR_INVALID_PARMETER = 87 ERROR_NOT_FOUND = 1168 + + +class FileFlags(enum.IntEnum): + GENERIC_READ = 0x80000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_SHARE_READ = 1 + FILE_SHARE_WRITE = 2 + FILE_SHARE_DELETE = 4 + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 7039455463..c4de83606c 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -1,5 +1,6 @@ import os import tempfile +from contextlib import contextmanager import pytest @@ -7,10 +8,11 @@ # Mark all the tests in this file as being windows-only pytestmark = pytest.mark.skipif(not on_windows, reason="windows only") -from ... import _core, sleep +from ... import _core, sleep, move_on_after +from ...testing import wait_all_tasks_blocked if on_windows: from .._windows_cffi import ( - ffi, kernel32, INVALID_HANDLE_VALUE, raise_winerror + ffi, kernel32, INVALID_HANDLE_VALUE, raise_winerror, FileFlags ) @@ -58,39 +60,96 @@ async def test_readinto_overlapped(): fp.write(data) fp.flush() - await sleep(0.5) - rawname = tfile.encode("utf-16le") + rawname = tfile.encode("utf-16le") + b"\0\0" + rawname_buf = ffi.from_buffer(rawname) handle = kernel32.CreateFileW( - ffi.cast("LPCWSTR", ffi.from_buffer(rawname)), - 0x80000000, # GENERIC_READ - 0, # no sharing + ffi.cast("LPCWSTR", rawname_buf), + FileFlags.GENERIC_READ, + FileFlags.FILE_SHARE_READ, ffi.NULL, # no security attributes - 3, # OPEN_EXISTING - 0x40000000, # FILE_FLAG_OVERLAPPED + FileFlags.OPEN_EXISTING, + FileFlags.FILE_FLAG_OVERLAPPED, ffi.NULL, # no template file ) if handle == INVALID_HANDLE_VALUE: # pragma: no cover raise_winerror() - _core.register_with_iocp(handle) - - async def read_region(start, end): - await _core.readinto_overlapped( - handle, - memoryview(buffer)[start:end], start - ) try: - async with _core.open_nursery() as nursery: - for start in range(0, 4096, 512): - nursery.start_soon(read_region, start, start + 512) - - assert buffer == data - - with pytest.raises(TypeError): - await _core.readinto_overlapped(handle, b"immutable") + with memoryview(buffer) as buffer_view: + + async def read_region(start, end): + await _core.readinto_overlapped( + handle, + buffer_view[start:end], + start, + ) + + # Make sure reading before the handle is registered + # fails rather than hanging forever + with pytest.raises(_core.TrioInternalError) as exc_info: + with move_on_after(0.5): + await read_region(0, 512) + assert "Did you forget to call register_with_iocp()?" in str( + exc_info.value + ) + + _core.register_with_iocp(handle) + + async with _core.open_nursery() as nursery: + for start in range(0, 4096, 512): + nursery.start_soon(read_region, start, start + 512) + + assert buffer == data + + with pytest.raises(BufferError): + await _core.readinto_overlapped(handle, b"immutable") finally: kernel32.CloseHandle(handle) -# XX test setting the iomanager._iocp to something weird to make sure that the -# IOCP thread can send exceptions back to the main thread +@contextmanager +def pipe_with_overlapped_read(): + from asyncio.windows_utils import pipe + import msvcrt + + read_handle, write_handle = pipe(overlapped=(True, False)) + _core.register_with_iocp(read_handle) + try: + write_fd = msvcrt.open_osfhandle(write_handle, 0) + yield os.fdopen(write_fd, "wb", closefd=False), read_handle + finally: + kernel32.CloseHandle(ffi.cast("HANDLE", read_handle)) + kernel32.CloseHandle(ffi.cast("HANDLE", write_handle)) + + +async def test_too_late_to_cancel(): + import time + + with pipe_with_overlapped_read() as (write_fp, read_handle): + target = bytearray(6) + async with _core.open_nursery() as nursery: + # Start an async read in the background + nursery.start_soon(_core.readinto_overlapped, read_handle, target) + await wait_all_tasks_blocked() + + # Synchronous write to the other end of the pipe + with write_fp: + write_fp.write(b"test1\ntest2\n") + + # Note: not trio.sleep! We're making sure the OS level + # ReadFile completes, before trio has a chance to execute + # another checkpoint and notice it completed. + time.sleep(1) + nursery.cancel_scope.cancel() + assert target[:6] == b"test1\n" + + # Do another I/O to make sure we've actually processed the + # fallback completion that was posted when CancelIoEx failed. + assert await _core.readinto_overlapped(read_handle, target) == 6 + assert target[:6] == b"test2\n" + + +# XX: test setting the iomanager._iocp to something weird to make +# sure that the IOCP thread can send exceptions back to the main thread. +# --> it's not clear if this is actually possible? we just get +# ERROR_INVALID_HANDLE which looks like the IOCP was closed (not an error) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 593e90270d..eba792908b 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -221,12 +221,6 @@ async def feed_input(): await proc.stdin.send_all(input) except _core.BrokenResourceError: pass - except OSError as e: # pragma: no cover - # According to the stdlib subprocess module, EINVAL can - # occur on Windows if the child closes its end of the - # pipe, and must be ignored. - if e.errno != errno.EINVAL: - raise await proc.stdin.aclose() async def read_output(stream, chunks): diff --git a/trio/_windows_pipes.py b/trio/_windows_pipes.py index 3c32fc9c20..2b57de7692 100644 --- a/trio/_windows_pipes.py +++ b/trio/_windows_pipes.py @@ -63,8 +63,8 @@ async def send_all(self, data: bytes): total_sent += await _core.write_overlapped( self._pipe, chunk ) - except BrokenPipeError: - raise _core.BrokenResourceError from None + except BrokenPipeError as ex: + raise _core.BrokenResourceError from ex async def wait_send_all_might_not_block(self) -> None: async with self._conflict_detector: diff --git a/trio/hazmat.py b/trio/hazmat.py index 014386e132..2c19bd81ad 100644 --- a/trio/hazmat.py +++ b/trio/hazmat.py @@ -50,7 +50,6 @@ register_with_iocp, wait_overlapped, monitor_completion_key, - perform_overlapped, readinto_overlapped, write_overlapped, ) diff --git a/trio/subprocess.py b/trio/subprocess.py index b37a9f43c5..9b40b98920 100644 --- a/trio/subprocess.py +++ b/trio/subprocess.py @@ -1,6 +1,30 @@ from ._subprocess import Process, run + +Popen = Process + +# Reexport constants and exceptions from the stdlib subprocess module from subprocess import ( PIPE, STDOUT, DEVNULL, CalledProcessError, SubprocessError, TimeoutExpired, CompletedProcess ) -Popen = Process + +# Windows only +try: + from subprocess import ( + STARTUPINFO, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE, + SW_HIDE, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW, + CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP + ) +except ImportError: + pass + +# Windows 3.7+ only +try: + from subprocess import ( + ABOVE_NORMAL_PRIORITY_CLASS, BELOW_NORMAL_PRIORITY_CLASS, + HIGH_PRIORITY_CLASS, IDLE_PRIORITY_CLASS, NORMAL_PRIORITY_CLASS, + REALTIME_PRIORITY_CLASS, CREATE_NO_WINDOW, DETACHED_PROCESS, + CREATE_DEFAULT_ERROR_MODE, CREATE_BREAKAWAY_FROM_JOB + ) +except ImportError: + pass diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index b6e69a51dc..21b115cfc1 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -5,7 +5,9 @@ import sys import pytest -from .. import _core, move_on_after, sleep, sleep_forever, subprocess +from .. import ( + _core, move_on_after, fail_after, sleep, sleep_forever, subprocess +) from .._core.tests.tutil import slow from ..testing import wait_all_tasks_blocked @@ -16,21 +18,17 @@ SIGKILL, SIGTERM, SIGINT = None, None, None -def cmd_switch(posix_option, windows_pycode): - return posix_option if posix else [ - sys.executable, "-c", "import sys; " + windows_pycode - ] +# Since Windows has very few command-line utilities generally available, +# all of our subprocesses are Python processes running short bits of +# (mostly) cross-platform code. +def python(code): + return [sys.executable, "-u", "-c", "import sys; " + code] -EXIT_TRUE = cmd_switch(["true"], "sys.exit(0)") -EXIT_FALSE = cmd_switch(["false"], "sys.exit(1)") -CAT = cmd_switch(["cat"], "sys.stdout.buffer.write(sys.stdin.buffer.read())") - - -def SLEEP(seconds): - return cmd_switch( - ["sleep", str(seconds)], "import time; time.sleep({})".format(seconds) - ) +EXIT_TRUE = python("sys.exit(0)") +EXIT_FALSE = python("sys.exit(1)") +CAT = python("sys.stdout.buffer.write(sys.stdin.buffer.read())") +SLEEP = lambda seconds: python("import time; time.sleep({})".format(seconds)) def got_signal(proc, sig): @@ -57,12 +55,11 @@ async def test_kill_when_context_cancelled(): assert got_signal(proc, SIGKILL) -COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = [ - sys.executable, "-c", "import sys\n" - "data = sys.stdin.buffer.read()\n" - "sys.stdout.buffer.write(data)\n" +COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = python( + "data = sys.stdin.buffer.read(); " + "sys.stdout.buffer.write(data); " "sys.stderr.buffer.write(data[::-1])" -] +) async def test_pipes(): @@ -98,6 +95,76 @@ async def check_output(stream, expected): assert 0 == await proc.wait() +async def test_interactive(): + # Test some back-and-forth with a subprocess. This one works like so: + # in: 32\n + # out: 0000...0000\n (32 zeroes) + # err: 1111...1111\n (64 ones) + # in: 10\n + # out: 2222222222\n (10 twos) + # err: 3333....3333\n (20 threes) + # in: EOF + # out: EOF + # err: EOF + + async with subprocess.Process( + python( + "idx = 0\n" + "while True:\n" + " line = sys.stdin.readline()\n" + " if line == '': break\n" + " request = int(line.strip())\n" + " print(str(idx * 2) * request)\n" + " print(str(idx * 2 + 1) * request * 2, file=sys.stderr)\n" + " idx += 1\n" + ), + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) as proc: + + newline = b"\n" if posix else b"\r\n" + + async def expect(idx, request): + async with _core.open_nursery() as nursery: + + async def drain_one(stream, count, digit): + while count > 0: + result = await stream.receive_some(count) + assert result == ( + "{}".format(digit).encode("utf-8") * len(result) + ) + count -= len(result) + assert count == 0 + assert await stream.receive_some(len(newline)) == newline + + nursery.start_soon(drain_one, proc.stdout, request, idx * 2) + nursery.start_soon( + drain_one, proc.stderr, request * 2, idx * 2 + 1 + ) + + with fail_after(5): + await proc.stdin.send_all(b"12") + await sleep(0.1) + await proc.stdin.send_all(b"345" + newline) + await expect(0, 12345) + await proc.stdin.send_all(b"100" + newline + b"200" + newline) + await expect(1, 100) + await expect(2, 200) + await proc.stdin.send_all(b"0" + newline) + await expect(3, 0) + await proc.stdin.send_all(b"999999") + with move_on_after(0.1) as scope: + await expect(4, 0) + assert scope.cancelled_caught + await proc.stdin.send_all(newline) + await expect(4, 999999) + await proc.stdin.aclose() + assert await proc.stdout.receive_some(1) == b"" + assert await proc.stderr.receive_some(1) == b"" + assert proc.returncode == 0 + + async def test_run(): data = bytes(random.randint(0, 255) for _ in range(2**18)) @@ -168,18 +235,9 @@ async def test_run_timeout(): async def test_run_check(): - cmd = cmd_switch( - "echo test >&2; false", - "sys.stderr.buffer.write(b'test\\n'); sys.exit(1)", - ) - + cmd = python("sys.stderr.buffer.write(b'test\\n'); sys.exit(1)") with pytest.raises(subprocess.CalledProcessError) as excinfo: - await subprocess.run( - cmd, - shell=posix, - stderr=subprocess.PIPE, - check=True, - ) + await subprocess.run(cmd, stderr=subprocess.PIPE, check=True) assert excinfo.value.cmd == cmd assert excinfo.value.returncode == 1 assert excinfo.value.stderr == b"test\n" @@ -328,3 +386,13 @@ def on_alarm(sig, frame): finally: signal.pthread_sigmask(signal.SIG_SETMASK, old_mask) signal.signal(signal.SIGALRM, old_sigalrm) + + +def test_all_constants_reexported(): + trio_subprocess_exports = set(dir(subprocess)) + import subprocess as stdlib_subprocess + + for name in dir(stdlib_subprocess): + if name.isupper() and name[0] != "_": + stdlib_constant = name + assert stdlib_constant in trio_subprocess_exports diff --git a/trio/tests/test_pipes.py b/trio/tests/test_unix_pipes.py similarity index 77% rename from trio/tests/test_pipes.py rename to trio/tests/test_unix_pipes.py index 669fdd7f12..2a0683a3fc 100644 --- a/trio/tests/test_pipes.py +++ b/trio/tests/test_unix_pipes.py @@ -9,13 +9,11 @@ from ..testing import wait_all_tasks_blocked, check_one_way_stream posix = os.name == "posix" +pytestmark = pytest.mark.skipif(not posix, reason="posix only") if posix: from .._unix_pipes import PipeSendStream, PipeReceiveStream, make_pipe -else: - from .._windows_pipes import PipeSendStream, PipeReceiveStream, make_pipe -@pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_send_pipe(): r, w = os.pipe() send = PipeSendStream(w) @@ -28,7 +26,6 @@ async def test_send_pipe(): send._closed = True -@pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_receive_pipe(): r, w = os.pipe() recv = PipeReceiveStream(r) @@ -79,13 +76,10 @@ async def test_pipe_errors(): with pytest.raises(TypeError): PipeReceiveStream(None) - if posix: - # can only construct with invalid fd on posix - with pytest.raises(ValueError): - await PipeReceiveStream(0).receive_some(0) + with pytest.raises(ValueError): + await PipeReceiveStream(0).receive_some(0) -@pytest.mark.skipif(not posix, reason="uses posix file descriptors") async def test_del(): w, r = await make_pipe() f1, f2 = w.fileno(), r.fileno() @@ -109,20 +103,13 @@ async def test_async_with(): assert w._closed assert r._closed - if posix: - with pytest.raises(OSError) as excinfo: - os.close(w.fileno()) - assert excinfo.value.errno == errno.EBADF - - with pytest.raises(OSError) as excinfo: - os.close(r.fileno()) - assert excinfo.value.errno == errno.EBADF + with pytest.raises(OSError) as excinfo: + os.close(w.fileno()) + assert excinfo.value.errno == errno.EBADF - else: - # test failue-to-close on Windows - w._closed = False - with pytest.raises(OSError) as excinfo: - await w.aclose() + with pytest.raises(OSError) as excinfo: + os.close(r.fileno()) + assert excinfo.value.errno == errno.EBADF async def test_close_during_write(): @@ -167,6 +154,4 @@ async def make_clogged_pipe(): async def test_pipe_fully(): - # passing make_clogged_pipe tests wait_send_all_might_not_block, and we - # can't implement that on Windows - await check_one_way_stream(make_pipe, make_clogged_pipe if posix else None) + await check_one_way_stream(make_pipe, make_clogged_pipe) diff --git a/trio/tests/test_windows_pipes.py b/trio/tests/test_windows_pipes.py new file mode 100644 index 0000000000..ca09e23eae --- /dev/null +++ b/trio/tests/test_windows_pipes.py @@ -0,0 +1,88 @@ +import errno +import select + +import os +import pytest + +from .._core.tests.tutil import gc_collect_harder +from .. import _core, move_on_after +from ..testing import wait_all_tasks_blocked, check_one_way_stream + +windows = os.name == "nt" +pytestmark = pytest.mark.skipif(not windows, reason="windows only") +if windows: + from .._windows_pipes import PipeSendStream, PipeReceiveStream, make_pipe + + +async def test_pipes_combined(): + write, read = await make_pipe() + count = 2**20 + + async def sender(): + big = bytearray(count) + await write.send_all(big) + + async def reader(): + await wait_all_tasks_blocked() + received = 0 + while received < count: + received += len(await read.receive_some(4096)) + + assert received == count + + async with _core.open_nursery() as n: + n.start_soon(sender) + n.start_soon(reader) + + await read.aclose() + await write.aclose() + + +async def test_send_on_closed_pipe(): + write, read = await make_pipe() + await write.aclose() + + with pytest.raises(_core.ClosedResourceError): + await write.send_all(b"123") + + await read.aclose() + + +async def test_pipe_errors(): + with pytest.raises(TypeError): + PipeReceiveStream(None) + + +async def test_async_with(): + w, r = await make_pipe() + async with w, r: + pass + + assert w._closed + assert r._closed + + # test failue-to-close + w._closed = False + with pytest.raises(OSError): + await w.aclose() + + +async def test_close_during_write(): + w, r = await make_pipe() + async with _core.open_nursery() as nursery: + + async def write_forever(): + with pytest.raises(_core.ClosedResourceError) as excinfo: + while True: + await w.send_all(b"x" * 4096) + assert "another task" in str(excinfo) + + nursery.start_soon(write_forever) + await wait_all_tasks_blocked(0.1) + await w.aclose() + + +async def test_pipe_fully(): + # passing make_clogged_pipe tests wait_send_all_might_not_block, and we + # can't implement that on Windows + await check_one_way_stream(make_pipe, None) From a87a87c4ce432c92a5818a6ec53a917d3c8fd468 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 04:52:17 -0800 Subject: [PATCH 32/49] streamline waitid EINTR test in the hopes that it starts working on CI too --- trio/tests/test_subprocess.py | 41 +++++++++++++---------------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 21b115cfc1..4df06456a3 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -347,44 +347,33 @@ async def test_wait_reapable_fails(): signal.signal(signal.SIGCHLD, old_sigchld) -@pytest.mark.skipif(not posix, reason="POSIX specific") @slow -async def test_wait_reapable_eintr(): +def test_waitid_eintr(): # This only matters on PyPy (where we're coding EINTR handling - # ourselves) but the test works on all waitid platforms. It does - # depend on having a second thread to interrupt a blocking call in, - # so we skip it elsewhere. + # ourselves) but the test works on all waitid platforms. + from .._subprocess_platform import wait_child_exiting + if not wait_child_exiting.__module__.endswith("waitid"): + pytest.skip("waitid only") + from .._subprocess_platform.waitid import sync_wait_reapable + import subprocess as stdlib_subprocess got_alarm = False + sleeper = stdlib_subprocess.Popen(["sleep", "3600"]) def on_alarm(sig, frame): nonlocal got_alarm got_alarm = True + sleeper.kill() - # make sure SIGALRM gets delivered to the waitid thread, not ours old_sigalrm = signal.signal(signal.SIGALRM, on_alarm) - old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM]) try: - async with subprocess.Process(SLEEP(3600)) as proc: - async with _core.open_nursery() as nursery: - nursery.start_soon(proc.wait) - await wait_all_tasks_blocked() - - # waitid platforms only - if hasattr(proc._proc, "@trio_wait_event"): - # thread has been created at this point, so we can - # mask the signal for us without affecting them - signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM]) - signal.alarm(1) - await sleep(1.5) - assert got_alarm - - proc.kill() - nursery.cancel_scope.deadline = _core.current_time() + 1.0 - assert not nursery.cancel_scope.cancelled_caught - assert proc.returncode == -signal.SIGKILL + signal.alarm(1) + sync_wait_reapable(sleeper.pid) + assert sleeper.wait(timeout=1) == -9 finally: - signal.pthread_sigmask(signal.SIG_SETMASK, old_mask) + if sleeper.returncode is not None: + sleeper.kill() + sleeper.wait() signal.signal(signal.SIGALRM, old_sigalrm) From 82253caa8bd59b5b3bbb0ba61d196289c967f838 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 05:11:19 -0800 Subject: [PATCH 33/49] fix sense of check --- trio/tests/test_subprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 4df06456a3..e7b0c4d816 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -371,7 +371,7 @@ def on_alarm(sig, frame): sync_wait_reapable(sleeper.pid) assert sleeper.wait(timeout=1) == -9 finally: - if sleeper.returncode is not None: + if sleeper.returncode is None: sleeper.kill() sleeper.wait() signal.signal(signal.SIGALRM, old_sigalrm) From 4be26090c146bf22fedc62fd3f79b11189df74c5 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 05:34:21 -0800 Subject: [PATCH 34/49] one more coverage nit --- trio/tests/test_subprocess.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index e7b0c4d816..3558b07a97 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -371,7 +371,9 @@ def on_alarm(sig, frame): sync_wait_reapable(sleeper.pid) assert sleeper.wait(timeout=1) == -9 finally: - if sleeper.returncode is None: + if sleeper.returncode is None: # pragma: no cover + # We only get here if something fails in the above; + # if the test passes, wait() will reap the process sleeper.kill() sleeper.wait() signal.signal(signal.SIGALRM, old_sigalrm) From 5722d4b64730ab95554c7ab32539bff51de89f99 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 06:06:58 -0800 Subject: [PATCH 35/49] add newsfragment --- newsfragments/4.feature.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 newsfragments/4.feature.rst diff --git a/newsfragments/4.feature.rst b/newsfragments/4.feature.rst new file mode 100644 index 0000000000..3511b477f1 --- /dev/null +++ b/newsfragments/4.feature.rst @@ -0,0 +1,17 @@ +Subprocess support! Add :mod:`trio.subprocess`, an async wrapper around the +stdlib :mod:`subprocess` module with a similar interface: +:class:`~trio.subprocess.Process` (renamed from :class:`~subprocess.Popen`) +to create a process and interact with it at your leisure, +:func`~trio.subprocess.run` to run a process to completion and report the +results, and reexports of all the stdlib :mod:`subprocess` exceptions and +constants. There is a :class:`subprocess.Popen` object hiding inside each +:class:`trio.subprocess.Process`, so all the stdlib options for starting +processes in various wacky ways work unmodified. (Currently we only +support unbuffered byte streams for communicating with subprocess +pipes, so the ``universal_newlines``, ``encoding``, ``errors``, and +``bufsize`` arguments to :class:`subprocess.Popen` are not supported by the +Trio version.) Instead of file objects, the ``stdin``, ``stdout``, and +``stderr`` attributes of a :class:`trio.subprocess.Process` are Trio +streams. There is no :meth:`trio.subprocess.Process.communicate` because the +stdlib version has confusing cancellation semantics - use +:func:`trio.subprocess.run` or interact with the pipe streams directly. From 9aea79b6e2f86015a4e8edd23d2a2fe171f38cb8 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 08:18:17 -0800 Subject: [PATCH 36/49] Add docs and capture_output parameter --- docs/source/reference-io.rst | 126 ++++++++++++++++++++-- trio/_subprocess.py | 192 +++++++++++++++++++++++++--------- trio/tests/test_subprocess.py | 11 +- 3 files changed, 271 insertions(+), 58 deletions(-) diff --git a/docs/source/reference-io.rst b/docs/source/reference-io.rst index 4167e23364..eaf5957e2b 100644 --- a/docs/source/reference-io.rst +++ b/docs/source/reference-io.rst @@ -24,7 +24,7 @@ create complex transport configurations. Here's some examples: speak SSL over the network is to wrap an :class:`~trio.ssl.SSLStream` around a :class:`~trio.SocketStream`. -* If you spawn a subprocess then you can get a +* If you spawn a :ref:`subprocess`, you can get a :class:`~trio.abc.SendStream` that lets you write to its stdin, and a :class:`~trio.abc.ReceiveStream` that lets you read from its stdout. If for some reason you wanted to speak SSL to a subprocess, @@ -36,9 +36,6 @@ create complex transport configurations. Here's some examples: ssl_context.check_hostname = False s = SSLStream(StapledStream(process.stdin, process.stdout), ssl_context) - [Note: subprocess support is not implemented yet, but that's the - plan. Unless it is implemented, and I forgot to remove this note.] - * It sometimes happens that you want to connect to an HTTPS server, but you have to go through a web proxy... and the proxy also uses HTTPS. So you end up having to do `SSL-on-top-of-SSL @@ -641,10 +638,125 @@ Asynchronous file objects The underlying synchronous file object. -Subprocesses ------------- +.. module:: trio.subprocess +.. _subprocess: + +Spawning subprocesses with :mod:`trio.subprocess` +------------------------------------------------- + +The :mod:`trio.subprocess` module provides support for spawning +other programs, communicating with them via pipes, senading them signals, +and waiting for them to exit. Its interface is based on the +:mod:`subprocess` module in the standard library; differences +are noted below. + +The constants and exceptions from the standard :mod:`subprocess` +module are re-exported by :mod:`trio.subprocess` unchanged. +So, if you like, you can say ``from trio import subprocess`` +and continue referring to ``subprocess.PIPE``, +:exc:`subprocess.CalledProcessError`, and so on, in the same +way you would in synchronous code. + + +.. _subprocess-options: + +Options for starting subprocesses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The standard :mod:`subprocess` module supports a dizzying array +of `options `__ +for controlling the environment in which a process starts and the +mechanisms used for communicating with it. (If you find that list +overwhelming, you're not alone; you might prefer to start with +just the `frequently used ones +`__.) + +Trio makes use of the :mod:`subprocess` module's logic for spawning processes, +so almost all of these options can be used with their same semantics when +starting subprocesses under Trio. The exceptions are ``encoding``, ``errors``, +``universal_newlines`` (and its 3.7+ alias ``text``), and ``bufsize``; +Trio always uses unbuffered byte streams for communicating with a process, +so these options don't make sense. Text I/O should use a layer +on top of the raw byte streams, just as it does with sockets. +[This layer does not yet exist, but is in the works.] + + +Running a process and waiting for it to finish +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The basic interface for running a subprocess start-to-finish is +:func:`trio.subprocess.run`. It always waits for the subprocess to +exit before returning, so there's no need to worry about leaving +a process running by mistake after you've gone on to do other things. +:func:`~trio.subprocess.run` can supply input or read output from +the subprocess, and can optionally throw an exception if the subprocess +runs for too long or exits with a failure indication. + +.. autofunction:: trio.subprocess.run + + +Interacting with a process as it runs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If :func:`~trio.subprocess.run` doesn't suit your needs, you can spawn +a subprocess by creating an instance of :class:`trio.subprocess.Process` +and then interact with it using its :attr:`~trio.subprocess.Process.stdin`, +:attr:`~trio.subprocess.Process.stdout`, and/or +:attr:`~trio.subprocess.Process.stderr` streams. + +.. autoclass:: trio.subprocess.Process + :members: + -`Not implemented yet! `__ +Differences from the standard library +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* All arguments to :class:`~trio.subprocess.run` and the constructor of + :class:`~trio.subprocess.Process`, except the command to run, must be + passed using keywords. + +* :func:`~subprocess.call`, :func:`~subprocess.check_call`, and + :func:`~subprocess.check_output` are not provided; use + :func:`~trio.subprocess.run` instead. + +* :meth:`~subprocess.Popen.communicate` is not provided as a method on + :class:`~trio.subprocess.Process` objects; use + :func:`~trio.subprocess.run` instead, or write the loop yourself if + you have unusual needs (the implementation of + :func:`~trio.subprocess.run` is fairly straightforward given trio's + concurrency primitives). :meth:`~subprocess.Popen.communicate` has + quite unusual cancellation behavior in the standard library (on some + platforms it spawns a background thread which continues to read from + the child process even after the timeout has expired) and we wanted + to provide an interface with fewer surprises. + +* :meth:`~trio.subprocess.Process.wait` is an async function that does + not take a ``timeout`` argument; combine it with + :func:`~trio.fail_after` if you want a timeout. + +* Text I/O is not supported: you may not use the + :class:`~trio.subprocess.Process` constructor arguments + ``universal_newlines`` (or its 3.7+ alias ``text``), ``encoding``, + or ``errors``. + +* :attr:`~trio.subprocess.Process.stdin` is a :class:`~trio.abc.SendStream` and + :attr:`~trio.subprocess.Process.stdout` and :attr:`~trio.subprocess.Process.stderr` + are :class:`~trio.abc.ReceiveStream`\s, rather than file objects. The + :class:`~trio.subprocess.Process` constructor argument ``bufsize`` is + not supported since there would be no file object to pass it to. + +* :meth:`~trio.subprocess.Process.aclose` (and thus also + ``__aexit__``) behave like the standard :class:`~subprocess.Popen` + context manager exit (close pipes to the process, then wait for it + to exit), but add additional behavior if cancelled: kill the process + and wait for it to finish terminating. This is useful for scoping + the lifetime of a simple subprocess that doesn't spawn any children + of its own. (For subprocesses that do in turn spawn their own + subprocesses, there is not currently any way to clean up the whole + tree; moreover, using the :class:`Process` context manager in such + cases is likely to be counterproductive as killing the top-level + subprocess leaves it no chance to do any cleanup of its children + that might be desired. A better solution for this is in the works.) Signals diff --git a/trio/_subprocess.py b/trio/_subprocess.py index eba792908b..158511da20 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -17,42 +17,79 @@ class Process(AsyncResource): - """Like :class:`subprocess.Popen`, but async. - - :class:`Process` has a public API identical to that of - :class:`subprocess.Popen`, except for the following differences: - - * All constructor arguments except the command to execute - must be passed as keyword arguments. - - * Text I/O is not supported: you may not use the constructor - arguments ``universal_newlines``, ``encoding``, or ``errors``. - - * :attr:`stdin` is a :class:`~trio.abc.SendStream` and - :attr:`stdout` and :attr:`stderr` are :class:`~trio.abc.ReceiveStream`s, - rather than file objects. The constructor argument ``bufsize`` is - not supported since there would be no file object to pass it to. - - * :meth:`wait` is an async function that does not take a ``timeout`` - argument; combine it with :func:`~trio.fail_after` if you want a timeout. - - * :meth:`~subprocess.Popen.communicate` does not exist due to the confusing - cancellation behavior exhibited by the stdlib version. Use :func:`run` - instead, or interact with :attr:`stdin` / :attr:`stdout` / :attr:`stderr` - directly. - - * :meth:`aclose` (and thus also ``__aexit__``) behave like the - standard :class:`Popen` context manager exit (close pipes to the - process, then wait for it to exit), but add additional behavior - if cancelled: kill the process and wait for it to finish - terminating. This is useful for scoping the lifetime of a - simple subprocess that doesn't spawn any children of its - own. (For subprocesses that do in turn spawn their own - subprocesses, there is not currently any way to clean up the - whole tree; moreover, using the :class:`Process` context manager - in such cases is likely to be counterproductive as killing the - top-level subprocess leaves it no chance to do any cleanup of - its children that might be desired.) + """Execute a child program in a new process. + + Like :class:`subprocess.Popen`, but async. + + Constructing a :class:`Process` immediately spawns the child + process, or throws an :exc:`OSError` if the spawning fails (for + example, if the specified command could not be found). + After construction, you can interact with the child process + by writing data to its :attr:`stdin` stream (a + :class:`~trio.abc.SendStream`), reading data from its :attr:`stdout` + and/or :attr:`stderr` streams (both :class:`~trio.abc.ReceiveStream`\s), + sending it signals using :meth:`terminate`, :meth:`kill`, or + :meth:`send_signal`, and waiting for it to exit using :meth:`wait`. + + Each standard stream is only available if it was specified at + :class:`Process` construction time that a pipe should be created for it: + if you constructed with ``stdin=subprocess.PIPE``, you can write to + the :attr:`stdin` stream, else :attr:`stdin` will be ``None``. + + :class:`Process` implements :class:`~trio.abc.AsyncResource`, + so you can use it as an async context manager or call its + :meth:`aclose` method directly. "Closing" a :class:`Process` + will close any pipes to the child and wait for it to exit; + if cancelled, the child will be forcibly killed and we will + ensure it has finished exiting before allowing the cancellation + to propagate. It is *strongly recommended* that process lifetime + be scoped using an ``async with`` block wherever possible, to + avoid winding up with processes hanging around longer than you + were planning on. + + Args: + command (str or list): The command to run. Typically this is a + list of strings such as ``['ls', '-l', 'directory with spaces']``, + where the first element names the executable to invoke and the other + elements specify its arguments. If ``shell=True`` is given as + an option, ``command`` should be a single string like + ``"ls -l 'directory with spaces'"``, which will + split into words following the shell's quoting rules. + stdin: Specifies what the child process's standard input + stream should connect to: output written by the parent + (``subprocess.PIPE``), nothing (``subprocess.DEVNULL``), + or an open file (pass a file descriptor or something whose + ``fileno`` method returns one). If ``stdin`` is unspecified, + the child process will have the same standard input stream + as its parent. + stdout: Like ``stdin``, but for the child process's standard output + stream. + stderr: Like ``stdin``, but for the child process's standard error + stream. An additional value ``subprocess.STDOUT`` is supported, + which causes the child's standard output and standard error + messages to be intermixed on a single standard output stream, + attached to whatever the ``stdout`` option says to attach it to. + **options: Other :ref:`general subprocess options ` + are also accepted. + + Attributes: + args (string or list): The ``command`` passed at construction time, + speifying the process to execute and its arguments. + pid (int): The process ID of the child process managed by this object. + stdin (trio.abc.SendStream or None): A stream connected to the child's + standard input stream: when you write bytes here, they become available + for the child to read. Only available if the :class:`Process` + was constructed using ``stdin=PIPE``; otherwise this will be None. + stdout (trio.abc.ReceiveStream or None): A stream connected to + the child's standard output stream: when the child writes to + standard output, the written bytes become available for you + to read here. Only available if the :class:`Process` was + constructed using ``stdout=PIPE``; otherwise this will be None. + stderr (trio.abc.ReceiveStream or None): A stream connected to + the child's standard error stream: when the child writes to + standard error, the written bytes become available for you + to read here. Only available if the :class:`Process` was + constructed using ``stderr=PIPE``; otherwise this will be None. """ @@ -60,15 +97,17 @@ class Process(AsyncResource): encoding = None errors = None - def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **kwds): + def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **options): if any( - kwds.get(key) - for key in ('universal_newlines', 'encoding', 'errors') + options.get(key) + for key in ('universal_newlines', "text", 'encoding', 'errors') ): - raise NotImplementedError( - "trio.Process does not support text I/O yet" + raise ValueError( + "trio.subprocess.Process only supports communicating over " + "unbuffered byte streams; text encoding and newline " + "translation must be supplied separately" ) - if kwds.get('bufsize', -1) != -1: + if options.get('bufsize', -1) != -1: raise ValueError("bufsize does not make sense for trio subprocess") self.stdin = None @@ -94,7 +133,7 @@ def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **kwds): try: self._proc = subprocess.Popen( - args, stdin=stdin, stdout=stdout, stderr=stderr, **kwds + args, stdin=stdin, stdout=stdout, stderr=stderr, **options ) finally: # Close the parent's handle for each child side of a pipe; @@ -177,9 +216,13 @@ def kill(self): async def run( - *popenargs, input=None, timeout=None, deadline=None, check=False, **kwargs + command, *, input=None, capture_output=False, check=False, timeout=None, deadline=None, **options ): - """Like :func:`subprocess.run`, but async. + """Run ``command`` in a subprocess, wait for it to complete, and + return a :class:`subprocess.CompletedProcess` instance describing + the results. + + Like :func:`subprocess.run`, but async. Unlike most Trio adaptations of standard library functions, this one keeps the ``timeout`` parameter, so that it can provide you @@ -189,6 +232,52 @@ async def run( partial output on a timeout, you can of course also nest run() inside a normal Trio cancel scope. + Args: + command (str or list): The command to run. Typically this is a + list of strings such as ``['ls', '-l', 'directory with spaces']``, + where the first element names the executable to invoke and the other + elements specify its arguments. If ``shell=True`` is given as + an option, ``command`` should be a single string like + ``"ls -l 'directory with spaces'"``, which will + split into words following the shell's quoting rules. + input (bytes): If specified, set up the subprocess to + read its standard input stream from a pipe, and feed that + pipe with the given bytes while the subprocess is running. + Once the supplied input is exhausted, the pipe will be + closed so that the subprocess receives an end-of-file + indication. Note that ``input=b""`` and ``input=None`` + behave differently: ``input=b""`` sets up the subprocess + to read its input from a pipe with no data in it, while + ``input=None`` performs no input redirection at all, so + that the subprocess's standard input stream is the same + as the parent process's. + capture_output (bool): If true, set up the subprocess to write + its standard output and standard error streams to pipes, + the contents of which will be consumed in the parent process + and ultimately rendered as the ``stdout`` and ``stderr`` + attributes of the returned :class:`~subprocess.CompletedProcess` + object. (This argument is supported by Trio on all + Python versions, but was only added to the standard library + in 3.7.) + check (bool): If true, validate that the subprocess returns an exit + status of zero (success). Any nonzero exit status will be + converted into a :exc:`subprocess.CalledProcessError` + exception. + timeout (float): If specified, do not allow the process to run for + longer than ``timeout`` seconds; if the timeout is reached, + kill the subprocess and raise a :exc:`subprocess.TimeoutExpired` + exception containing the output that was provided thus far. + deadline (float): Like ``timeout``, but specified in terms of an + absolute time on the current :ref:`clock ` + at which to kill the process. It is an error to specify both + ``timeout`` and ``deadline``. + **options: :func:`run` also accepts any :ref:`general subprocess + options ` and passes them onto the + :class:`~trio.subprocess.Process` constructor. It is an + error to specify ``stdin`` if ``input`` was also specified, + or to specify ``stdout`` or ``stderr`` if ``capture_output`` + was also specified. + Returns: A :class:`subprocess.CompletedProcess` instance describing the return code and outputs. @@ -203,9 +292,16 @@ async def run( """ if input is not None: - if 'stdin' in kwargs: + if 'stdin' in options: raise ValueError('stdin and input arguments may not both be used') - kwargs['stdin'] = subprocess.PIPE + options['stdin'] = subprocess.PIPE + + if capture_output: + if 'stdout' in options or 'stderr' in options: + raise ValueError( + 'capture_output and stdout/stderr arguments may not both be used' + ) + options['stdout'] = options['stderr'] = subprocess.PIPE if timeout is not None and deadline is not None: raise ValueError('timeout and deadline arguments may not both be used') @@ -213,7 +309,7 @@ async def run( stdout_chunks = [] stderr_chunks = [] - async with Process(*popenargs, **kwargs) as proc: + async with Process(command, **options) as proc: async def feed_input(): if input: diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 3558b07a97..7344544c88 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -185,8 +185,7 @@ async def test_run(): result = await subprocess.run( COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, input=data, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + capture_output=True, ) assert result.args == COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR assert result.returncode == 0 @@ -197,6 +196,12 @@ async def test_run(): # can't use both input and stdin await subprocess.run(CAT, input=b"la di dah", stdin=subprocess.PIPE) + with pytest.raises(ValueError): + # can't use both capture_output and stdout + await subprocess.run( + CAT, input=b"la di dah", capture_output=True, stdout=subprocess.PIPE + ) + with pytest.raises(ValueError): # can't use both timeout and deadline await subprocess.run( @@ -305,7 +310,7 @@ async def test_stderr_stdout(): async def test_errors(): - with pytest.raises(NotImplementedError): + with pytest.raises(ValueError): subprocess.Process(["ls"], universal_newlines=True) with pytest.raises(ValueError): subprocess.Process(["ls"], bufsize=4096) From 3bde9a434569c4c24375373dc430537e542405a4 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 08:31:31 -0800 Subject: [PATCH 37/49] yapf --- trio/_subprocess.py | 13 +++++++++++-- trio/tests/test_subprocess.py | 5 ++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 158511da20..1d7ade5073 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -97,7 +97,9 @@ class Process(AsyncResource): encoding = None errors = None - def __init__(self, args, *, stdin=None, stdout=None, stderr=None, **options): + def __init__( + self, args, *, stdin=None, stdout=None, stderr=None, **options + ): if any( options.get(key) for key in ('universal_newlines', "text", 'encoding', 'errors') @@ -216,7 +218,14 @@ def kill(self): async def run( - command, *, input=None, capture_output=False, check=False, timeout=None, deadline=None, **options + command, + *, + input=None, + capture_output=False, + check=False, + timeout=None, + deadline=None, + **options ): """Run ``command`` in a subprocess, wait for it to complete, and return a :class:`subprocess.CompletedProcess` instance describing diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 7344544c88..683ff43c66 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -199,7 +199,10 @@ async def test_run(): with pytest.raises(ValueError): # can't use both capture_output and stdout await subprocess.run( - CAT, input=b"la di dah", capture_output=True, stdout=subprocess.PIPE + CAT, + input=b"la di dah", + capture_output=True, + stdout=subprocess.PIPE ) with pytest.raises(ValueError): From 3386ed2208c22ff6ff0028f0d3706b1b6392f925 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 10:27:54 -0800 Subject: [PATCH 38/49] fix newsfragment formatting --- newsfragments/4.feature.rst | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/newsfragments/4.feature.rst b/newsfragments/4.feature.rst index 3511b477f1..c37026f74e 100644 --- a/newsfragments/4.feature.rst +++ b/newsfragments/4.feature.rst @@ -1,17 +1,9 @@ -Subprocess support! Add :mod:`trio.subprocess`, an async wrapper around the -stdlib :mod:`subprocess` module with a similar interface: -:class:`~trio.subprocess.Process` (renamed from :class:`~subprocess.Popen`) -to create a process and interact with it at your leisure, -:func`~trio.subprocess.run` to run a process to completion and report the -results, and reexports of all the stdlib :mod:`subprocess` exceptions and -constants. There is a :class:`subprocess.Popen` object hiding inside each -:class:`trio.subprocess.Process`, so all the stdlib options for starting -processes in various wacky ways work unmodified. (Currently we only -support unbuffered byte streams for communicating with subprocess -pipes, so the ``universal_newlines``, ``encoding``, ``errors``, and -``bufsize`` arguments to :class:`subprocess.Popen` are not supported by the -Trio version.) Instead of file objects, the ``stdin``, ``stdout``, and -``stderr`` attributes of a :class:`trio.subprocess.Process` are Trio -streams. There is no :meth:`trio.subprocess.Process.communicate` because the -stdlib version has confusing cancellation semantics - use -:func:`trio.subprocess.run` or interact with the pipe streams directly. +:ref:`Subprocess support! ` Add :mod:`trio.subprocess`, an +async wrapper around the stdlib :mod:`subprocess` module with a +similar interface: :class:`~trio.subprocess.Process` (renamed from +:class:`~subprocess.Popen`) to create a process and interact with it +at your leisure, :func`~trio.subprocess.run` to run a process to +completion and report the results, and reexports of all the stdlib +:mod:`subprocess` exceptions and constants. Instead of file objects, +the ``stdin``, ``stdout``, and ``stderr`` attributes of a +:class:`trio.subprocess.Process` are Trio streams. From 709eabff5cfa5bee943b23737edd146c95043664 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 11:45:53 -0800 Subject: [PATCH 39/49] more doc nits --- docs/source/reference-io.rst | 2 +- trio/_subprocess.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/reference-io.rst b/docs/source/reference-io.rst index eaf5957e2b..6bc47d8d14 100644 --- a/docs/source/reference-io.rst +++ b/docs/source/reference-io.rst @@ -645,7 +645,7 @@ Spawning subprocesses with :mod:`trio.subprocess` ------------------------------------------------- The :mod:`trio.subprocess` module provides support for spawning -other programs, communicating with them via pipes, senading them signals, +other programs, communicating with them via pipes, sending them signals, and waiting for them to exit. Its interface is based on the :mod:`subprocess` module in the standard library; differences are noted below. diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 1d7ade5073..0390e8ce9b 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -73,7 +73,7 @@ class Process(AsyncResource): are also accepted. Attributes: - args (string or list): The ``command`` passed at construction time, + args (str or list): The ``command`` passed at construction time, speifying the process to execute and its arguments. pid (int): The process ID of the child process managed by this object. stdin (trio.abc.SendStream or None): A stream connected to the child's From bd15874bbcbd712d2c5fce40aab90a184d1013e4 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 20:42:00 -0800 Subject: [PATCH 40/49] wait_child_exiting takes a trio process, not a stdlib subprocess --- trio/_subprocess.py | 7 ++++++- trio/_subprocess_platform/__init__.py | 5 ++--- trio/_subprocess_platform/kqueue.py | 5 ++--- trio/_subprocess_platform/waitid.py | 18 +++++------------- trio/_subprocess_platform/windows.py | 6 +++--- 5 files changed, 18 insertions(+), 23 deletions(-) diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 0390e8ce9b..1b0f15898e 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -97,6 +97,11 @@ class Process(AsyncResource): encoding = None errors = None + # Available for the per-platform wait_child_exiting() implementations + # to stash some state; waitid platforms use this to avoid spawning + # arbitrarily many threads if wait() keeps getting cancelled. + _wait_for_exit_data = None + def __init__( self, args, *, stdin=None, stdout=None, stderr=None, **options ): @@ -194,7 +199,7 @@ async def wait(self): as the negative of that signal number, e.g., -11 for ``SIGSEGV``. """ if self.poll() is None: - await wait_child_exiting(self._proc) + await wait_child_exiting(self) self._proc.wait() else: await _core.checkpoint() diff --git a/trio/_subprocess_platform/__init__.py b/trio/_subprocess_platform/__init__.py index db8536d9cc..63ac754bcc 100644 --- a/trio/_subprocess_platform/__init__.py +++ b/trio/_subprocess_platform/__init__.py @@ -1,17 +1,16 @@ # Platform-specific subprocess bits'n'pieces. import os -import subprocess import sys from typing import Tuple -from .. import _core +from .. import _core, _subprocess from .._abc import SendStream, ReceiveStream # Fallback versions of the functions provided -- implementations # per OS are imported atop these at the bottom of the module. -async def wait_child_exiting(process: subprocess.Popen) -> None: +async def wait_child_exiting(process: "_subprocess.Process") -> None: """Block until the child process managed by ``process`` is exiting. It is invalid to call this function if the process has already diff --git a/trio/_subprocess_platform/kqueue.py b/trio/_subprocess_platform/kqueue.py index 76b339024d..be60e52f41 100644 --- a/trio/_subprocess_platform/kqueue.py +++ b/trio/_subprocess_platform/kqueue.py @@ -1,9 +1,8 @@ import select -import subprocess -from .. import _core +from .. import _core, _subprocess -async def wait_child_exiting(process: subprocess.Popen) -> None: +async def wait_child_exiting(process: "_subprocess.Process") -> None: kqueue = _core.current_kqueue() try: from select import KQ_NOTE_EXIT diff --git a/trio/_subprocess_platform/waitid.py b/trio/_subprocess_platform/waitid.py index 615568a065..9b2f86f752 100644 --- a/trio/_subprocess_platform/waitid.py +++ b/trio/_subprocess_platform/waitid.py @@ -1,10 +1,9 @@ import errno import math import os -import subprocess import sys -from .. import _core +from .. import _core, _subprocess from .._sync import CapacityLimiter, Event from .._threads import run_sync_in_worker_thread @@ -92,7 +91,7 @@ async def _waitid_system_task(pid: int, event: Event) -> None: event.set() -async def wait_child_exiting(process: subprocess.Popen) -> None: +async def wait_child_exiting(process: "_subprocess.Process") -> None: # Logic of this function: # - The first time we get called, we create an Event and start # an instance of _waitid_system_task that will set the Event @@ -103,14 +102,7 @@ async def wait_child_exiting(process: subprocess.Popen) -> None: # create an arbitrary number of threads waiting on the same # process. - ATTR = "@trio_wait_event" # an unlikely attribute name, to be sure - try: - event = getattr(process, ATTR) - except AttributeError: - event = Event() - setattr(process, ATTR, event) + if process._wait_for_exit_data is None: + process._wait_for_exit_data = event = Event() _core.spawn_system_task(_waitid_system_task, process.pid, event) - - await event.wait() - # If not cancelled, there's no need to keep the event around anymore: - delattr(process, ATTR) + await process._wait_for_exit_data.wait() diff --git a/trio/_subprocess_platform/windows.py b/trio/_subprocess_platform/windows.py index b9b4e85d2b..ad9a9752db 100644 --- a/trio/_subprocess_platform/windows.py +++ b/trio/_subprocess_platform/windows.py @@ -1,6 +1,6 @@ -import subprocess +from .. import _subprocess from .._wait_for_object import WaitForSingleObject -async def wait_child_exiting(process: subprocess.Popen) -> None: - await WaitForSingleObject(int(process._handle)) +async def wait_child_exiting(process: _subprocess.Process) -> None: + await WaitForSingleObject(int(process._proc._handle)) From 8bdc21766ce1ad2f072b3bafb7d5c062a887b92f Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Thu, 20 Dec 2018 21:28:19 -0800 Subject: [PATCH 41/49] missed a spot --- trio/_subprocess_platform/windows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trio/_subprocess_platform/windows.py b/trio/_subprocess_platform/windows.py index ad9a9752db..958be8675c 100644 --- a/trio/_subprocess_platform/windows.py +++ b/trio/_subprocess_platform/windows.py @@ -2,5 +2,5 @@ from .._wait_for_object import WaitForSingleObject -async def wait_child_exiting(process: _subprocess.Process) -> None: +async def wait_child_exiting(process: "_subprocess.Process") -> None: await WaitForSingleObject(int(process._proc._handle)) From 32494e6b88ab772409eb49e9fbff3f9b8716e08e Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 21 Dec 2018 11:40:05 -0800 Subject: [PATCH 42/49] updates after code review --- docs/source/reference-io.rst | 5 +-- trio/_core/_io_windows.py | 54 +++++++++++++++++++------------- trio/_subprocess.py | 52 ++++++++++++++---------------- trio/subprocess.py | 2 -- trio/tests/test_subprocess.py | 8 ++--- trio/tests/test_unix_pipes.py | 25 --------------- trio/tests/test_windows_pipes.py | 10 ------ 7 files changed, 63 insertions(+), 93 deletions(-) diff --git a/docs/source/reference-io.rst b/docs/source/reference-io.rst index 6bc47d8d14..639e7f14ab 100644 --- a/docs/source/reference-io.rst +++ b/docs/source/reference-io.rst @@ -690,7 +690,7 @@ exit before returning, so there's no need to worry about leaving a process running by mistake after you've gone on to do other things. :func:`~trio.subprocess.run` can supply input or read output from the subprocess, and can optionally throw an exception if the subprocess -runs for too long or exits with a failure indication. +exits with a failure indication. .. autofunction:: trio.subprocess.run @@ -756,7 +756,8 @@ Differences from the standard library tree; moreover, using the :class:`Process` context manager in such cases is likely to be counterproductive as killing the top-level subprocess leaves it no chance to do any cleanup of its children - that might be desired. A better solution for this is in the works.) + that might be desired. You'll probably want to write your own + supervision logic in that case.) Signals diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index 87acd463d6..429ba9d9f7 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -269,18 +269,23 @@ def do_select(): # the IOCP thread somehow. Nothing more to do. pass else: - _core.reschedule( - waiter, - outcome.Error( - _core.TrioInternalError( - "Failed to cancel overlapped I/O and " - "didn't receive the completion either. Did " - "you forget to call register_with_iocp()? " - "The operation may or may not have " - "succeeded; we can't tell." - ) - ) + exc = _core.TrioInternalError( + "Failed to cancel overlapped I/O in {} and didn't " + "receive the completion either. Did you forget to " + "call register_with_iocp()?".format(waiter.name) ) + # Raising this out of handle_io ensures that + # the user will see our message even if some + # other task is in an uncancellable wait due + # to the same underlying forgot-to-register + # issue (if their CancelIoEx succeeds, we + # have no way of noticing that their completion + # won't arrive). Unfortunately it loses the + # task traceback. If you're debugging this + # error and can't tell where it's coming from, + # try changing this line to + # _core.reschedule(waiter, outcome.Error(exc)) + raise exc else: # dispatch on lpCompletionKey queue = self._completion_key_queues[entry.lpCompletionKey] @@ -483,7 +488,7 @@ async def write_overlapped(self, handle, data, file_offset=0): # For typical types of `data` (bytes, bytearray) the memory we # pass is part of the existing allocation, but the buffer protocol # allows for other possibilities. - cbuf_register = [ffi.from_buffer(data)] + cbuf = ffi.from_buffer(data) def submit_write(lpOverlapped): # yes, these are the real documented names @@ -493,8 +498,8 @@ def submit_write(lpOverlapped): _check( kernel32.WriteFile( _handle(handle), - ffi.cast("LPCVOID", cbuf_register[0]), - len(cbuf_register[0]), + ffi.cast("LPCVOID", cbuf), + len(cbuf), ffi.NULL, lpOverlapped, ) @@ -524,7 +529,9 @@ def submit_write(lpOverlapped): # we get a BufferError. # # Unfortunately, there seems to be no way to drop that - # reference without destroying the FFI buffer object. + # reference without destroying the FFI buffer object, + # alhough one might be coming soon. + # (https://bitbucket.org/cffi/cffi/issues/395/) # We can't even call __del__, because there's no __del__ # exposed. On CPython, when we return normally, the frame # and its locals are destroyed, but when we throw an @@ -532,10 +539,12 @@ def submit_write(lpOverlapped): # So, we need to drop the reference to the FFI buffer # explicitly when unwinding. # - # This probably doesn't help on PyPy, but we don't really - # support PyPy on Windows anyway, so... + # This doesn't help with destruction on PyPy, but PyPy + # doesn't currently track buffer references in the same + # way as CPython does, so there's no need for a workaround + # there. # - del cbuf_register[:] + del cbuf @_public async def readinto_overlapped(self, handle, buffer, file_offset=0): @@ -544,11 +553,12 @@ async def readinto_overlapped(self, handle, buffer, file_offset=0): # operation otherwise. A future release of CFFI will support # ffi.from_buffer(foo, require_writable=True) to do the same # thing less circumlocutiously. + # (https://bitbucket.org/cffi/cffi/issues/394/) ffi.memmove(buffer, b"", 0) # As in write_overlapped, we want to ensure the buffer stays # alive for the duration of the I/O. - cbuf_register = [ffi.from_buffer(buffer)] + cbuf = ffi.from_buffer(buffer) def submit_read(lpOverlapped): offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME @@ -557,8 +567,8 @@ def submit_read(lpOverlapped): _check( kernel32.ReadFile( _handle(handle), - ffi.cast("LPVOID", cbuf_register[0]), - len(cbuf_register[0]), + ffi.cast("LPVOID", cbuf), + len(cbuf), ffi.NULL, lpOverlapped, ) @@ -569,4 +579,4 @@ def submit_read(lpOverlapped): return lpOverlapped.InternalHigh finally: # See discussion in write_overlapped() - del cbuf_register[:] + del cbuf diff --git a/trio/_subprocess.py b/trio/_subprocess.py index 1b0f15898e..e084a7ec43 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -105,17 +105,15 @@ class Process(AsyncResource): def __init__( self, args, *, stdin=None, stdout=None, stderr=None, **options ): - if any( - options.get(key) - for key in ('universal_newlines', "text", 'encoding', 'errors') + for key in ( + 'universal_newlines', 'text', 'encoding', 'errors', 'bufsize' ): - raise ValueError( - "trio.subprocess.Process only supports communicating over " - "unbuffered byte streams; text encoding and newline " - "translation must be supplied separately" - ) - if options.get('bufsize', -1) != -1: - raise ValueError("bufsize does not make sense for trio subprocess") + if options.get(key): + raise TypeError( + "trio.subprocess.Process only supports communicating over " + "unbuffered byte streams; the '{}' option is not supported" + .format(key) + ) self.stdin = None self.stdout = None @@ -238,13 +236,10 @@ async def run( Like :func:`subprocess.run`, but async. - Unlike most Trio adaptations of standard library functions, this - one keeps the ``timeout`` parameter, so that it can provide you - with the process's partial output if it is killed due to a - timeout. It also adds ``deadline`` as an option if you prefer to - express your timeout absolutely. If you don't care about preserving - partial output on a timeout, you can of course also nest run() - inside a normal Trio cancel scope. + If cancelled, :func:`run` kills the subprocess and waits for it + to exit before propagating the cancellation, like :meth:`Process.aclose`. + If you need to be able to tell what partial output the process produced + before a timeout, see the ``timeout`` and ``deadline`` arguments. Args: command (str or list): The command to run. Typically this is a @@ -326,19 +321,20 @@ async def run( async with Process(command, **options) as proc: async def feed_input(): - if input: - try: - await proc.stdin.send_all(input) - except _core.BrokenResourceError: - pass - await proc.stdin.aclose() + async with proc.stdin: + if input: + try: + await proc.stdin.send_all(input) + except _core.BrokenResourceError: + pass async def read_output(stream, chunks): - while True: - chunk = await stream.receive_some(32768) - if not chunk: - break - chunks.append(chunk) + async with stream: + while True: + chunk = await stream.receive_some(32768) + if not chunk: + break + chunks.append(chunk) async with _core.open_nursery() as nursery: if proc.stdin is not None: diff --git a/trio/subprocess.py b/trio/subprocess.py index 9b40b98920..7f2ebdf08c 100644 --- a/trio/subprocess.py +++ b/trio/subprocess.py @@ -1,7 +1,5 @@ from ._subprocess import Process, run -Popen = Process - # Reexport constants and exceptions from the stdlib subprocess module from subprocess import ( PIPE, STDOUT, DEVNULL, CalledProcessError, SubprocessError, TimeoutExpired, diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index 683ff43c66..fe4e6192a0 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -313,10 +313,10 @@ async def test_stderr_stdout(): async def test_errors(): - with pytest.raises(ValueError): - subprocess.Process(["ls"], universal_newlines=True) - with pytest.raises(ValueError): - subprocess.Process(["ls"], bufsize=4096) + with pytest.raises(TypeError) as excinfo: + subprocess.Process(["ls"], encoding="utf-8") + assert "unbuffered byte streams" in str(excinfo.value) + assert "the 'encoding' option is not supported" in str(excinfo.value) async def test_signals(): diff --git a/trio/tests/test_unix_pipes.py b/trio/tests/test_unix_pipes.py index 2a0683a3fc..2e8815ca06 100644 --- a/trio/tests/test_unix_pipes.py +++ b/trio/tests/test_unix_pipes.py @@ -62,16 +62,6 @@ async def reader(): await write.aclose() -async def test_send_on_closed_pipe(): - write, read = await make_pipe() - await write.aclose() - - with pytest.raises(_core.ClosedResourceError): - await write.send_all(b"123") - - await read.aclose() - - async def test_pipe_errors(): with pytest.raises(TypeError): PipeReceiveStream(None) @@ -112,21 +102,6 @@ async def test_async_with(): assert excinfo.value.errno == errno.EBADF -async def test_close_during_write(): - w, r = await make_pipe() - async with _core.open_nursery() as nursery: - - async def write_forever(): - with pytest.raises(_core.ClosedResourceError) as excinfo: - while True: - await w.send_all(b"x" * 4096) - assert "another task" in str(excinfo) - - nursery.start_soon(write_forever) - await wait_all_tasks_blocked(0.1) - await w.aclose() - - async def make_clogged_pipe(): s, r = await make_pipe() try: diff --git a/trio/tests/test_windows_pipes.py b/trio/tests/test_windows_pipes.py index ca09e23eae..bba3cfb00b 100644 --- a/trio/tests/test_windows_pipes.py +++ b/trio/tests/test_windows_pipes.py @@ -38,16 +38,6 @@ async def reader(): await write.aclose() -async def test_send_on_closed_pipe(): - write, read = await make_pipe() - await write.aclose() - - with pytest.raises(_core.ClosedResourceError): - await write.send_all(b"123") - - await read.aclose() - - async def test_pipe_errors(): with pytest.raises(TypeError): PipeReceiveStream(None) From b175e62b16aad7c6ff3cef71a438c3fedf2a7c8b Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 21 Dec 2018 13:03:46 -0800 Subject: [PATCH 43/49] fix forgot-to-register test --- trio/_core/tests/test_windows.py | 34 +++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index c4de83606c..6f6747c025 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -84,17 +84,7 @@ async def read_region(start, end): start, ) - # Make sure reading before the handle is registered - # fails rather than hanging forever - with pytest.raises(_core.TrioInternalError) as exc_info: - with move_on_after(0.5): - await read_region(0, 512) - assert "Did you forget to call register_with_iocp()?" in str( - exc_info.value - ) - _core.register_with_iocp(handle) - async with _core.open_nursery() as nursery: for start in range(0, 4096, 512): nursery.start_soon(read_region, start, start + 512) @@ -113,7 +103,6 @@ def pipe_with_overlapped_read(): import msvcrt read_handle, write_handle = pipe(overlapped=(True, False)) - _core.register_with_iocp(read_handle) try: write_fd = msvcrt.open_osfhandle(write_handle, 0) yield os.fdopen(write_fd, "wb", closefd=False), read_handle @@ -122,10 +111,33 @@ def pipe_with_overlapped_read(): kernel32.CloseHandle(ffi.cast("HANDLE", write_handle)) +def test_forgot_to_register_with_iocp(): + with pipe_with_overlapped_read() as (write_fp, read_handle): + write_fp.close() + left_run_yet = False + + async def main(): + target = bytearray(1) + try: + with move_on_after(0): + await _core.readinto_overlapped(read_handle, target) + finally: + # Run loop is exited without unwinding running tasks, so + # we don't get here until the main() coroutine is GC'ed + assert left_run_yet + + with pytest.raises(_core.TrioInternalError) as exc_info: + _core.run(main) + left_run_yet = True + assert "Failed to cancel overlapped I/O in main" in str(exc_info.value) + assert "forget to call register_with_iocp()?" in str(exc_info.value) + + async def test_too_late_to_cancel(): import time with pipe_with_overlapped_read() as (write_fp, read_handle): + _core.register_with_iocp(read_handle) target = bytearray(6) async with _core.open_nursery() as nursery: # Start an async read in the background From 5477ac155090e93ba9d15cc7c2d3d83449dba3dd Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Fri, 21 Dec 2018 13:20:55 -0800 Subject: [PATCH 44/49] wait for the I/O to probably complete before we try to cancel it --- trio/_core/tests/test_windows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 6f6747c025..6352f12e07 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -119,7 +119,7 @@ def test_forgot_to_register_with_iocp(): async def main(): target = bytearray(1) try: - with move_on_after(0): + with move_on_after(0.5): await _core.readinto_overlapped(read_handle, target) finally: # Run loop is exited without unwinding running tasks, so From 3bc77567fc598ec376433c13a8f0d17849a7ce94 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 22 Dec 2018 17:01:28 -0800 Subject: [PATCH 45/49] fix test_forgot_to_register_with_iocp, make test_too_late_to_cancel @slow --- trio/_core/tests/test_windows.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 6352f12e07..e46a006da7 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -8,6 +8,7 @@ # Mark all the tests in this file as being windows-only pytestmark = pytest.mark.skipif(not on_windows, reason="windows only") +from .tutil import slow from ... import _core, sleep, move_on_after from ...testing import wait_all_tasks_blocked if on_windows: @@ -113,14 +114,22 @@ def pipe_with_overlapped_read(): def test_forgot_to_register_with_iocp(): with pipe_with_overlapped_read() as (write_fp, read_handle): - write_fp.close() + with write_fp: + write_fp.write(b"test\n") + left_run_yet = False + nursery = None async def main(): + nonlocal nursery target = bytearray(1) try: - with move_on_after(0.5): - await _core.readinto_overlapped(read_handle, target) + async with _core.open_nursery() as nursery: + nursery.start_soon( + _core.readinto_overlapped, read_handle, target, name="_" + ) + await wait_all_tasks_blocked() + nursery.cancel_scope.cancel() finally: # Run loop is exited without unwinding running tasks, so # we don't get here until the main() coroutine is GC'ed @@ -129,10 +138,16 @@ async def main(): with pytest.raises(_core.TrioInternalError) as exc_info: _core.run(main) left_run_yet = True - assert "Failed to cancel overlapped I/O in main" in str(exc_info.value) + assert "Failed to cancel overlapped I/O in _ " in str(exc_info.value) assert "forget to call register_with_iocp()?" in str(exc_info.value) + # Suppress the Nursery.__del__ assertion about dangling children, + # for both the nursery we created and the system nursery + nursery._children.clear() + nursery.parent_task.parent_nursery._children.clear() + +@slow async def test_too_late_to_cancel(): import time From 14c6991b39d45ad5356f0adb80133b94f65e7daa Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 22 Dec 2018 17:28:46 -0800 Subject: [PATCH 46/49] yapf --- trio/_core/tests/test_windows.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index e46a006da7..a722ab8be7 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -126,7 +126,10 @@ async def main(): try: async with _core.open_nursery() as nursery: nursery.start_soon( - _core.readinto_overlapped, read_handle, target, name="_" + _core.readinto_overlapped, + read_handle, + target, + name="xyz" ) await wait_all_tasks_blocked() nursery.cancel_scope.cancel() @@ -138,7 +141,7 @@ async def main(): with pytest.raises(_core.TrioInternalError) as exc_info: _core.run(main) left_run_yet = True - assert "Failed to cancel overlapped I/O in _ " in str(exc_info.value) + assert "Failed to cancel overlapped I/O in xyz " in str(exc_info.value) assert "forget to call register_with_iocp()?" in str(exc_info.value) # Suppress the Nursery.__del__ assertion about dangling children, From ee7799203030dffbd6d2049230ea66236c0ece60 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Sat, 22 Dec 2018 20:59:54 -0800 Subject: [PATCH 47/49] Use gc_collect_harder instead of trying to fool Trio into not warning --- trio/_core/tests/test_windows.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index a722ab8be7..42ee79060d 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -8,7 +8,7 @@ # Mark all the tests in this file as being windows-only pytestmark = pytest.mark.skipif(not on_windows, reason="windows only") -from .tutil import slow +from .tutil import slow, gc_collect_harder from ... import _core, sleep, move_on_after from ...testing import wait_all_tasks_blocked if on_windows: @@ -118,10 +118,8 @@ def test_forgot_to_register_with_iocp(): write_fp.write(b"test\n") left_run_yet = False - nursery = None async def main(): - nonlocal nursery target = bytearray(1) try: async with _core.open_nursery() as nursery: @@ -144,10 +142,10 @@ async def main(): assert "Failed to cancel overlapped I/O in xyz " in str(exc_info.value) assert "forget to call register_with_iocp()?" in str(exc_info.value) - # Suppress the Nursery.__del__ assertion about dangling children, - # for both the nursery we created and the system nursery - nursery._children.clear() - nursery.parent_task.parent_nursery._children.clear() + # Make sure the Nursery.__del__ assertion about dangling children + # gets put with the correct test + del exc_info + gc_collect_harder() @slow From 879b3b85c17dbd1b77d9aa92fd2c1eaf1d02c955 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Tue, 25 Dec 2018 18:37:23 -0800 Subject: [PATCH 48/49] Remove run() --- docs/source/reference-io.rst | 83 ++++++++++++----- newsfragments/4.feature.rst | 15 ++-- trio/_subprocess.py | 162 ++-------------------------------- trio/subprocess.py | 2 +- trio/tests/test_subprocess.py | 130 ++++----------------------- 5 files changed, 93 insertions(+), 299 deletions(-) diff --git a/docs/source/reference-io.rst b/docs/source/reference-io.rst index 639e7f14ab..6ed76b0c42 100644 --- a/docs/source/reference-io.rst +++ b/docs/source/reference-io.rst @@ -684,23 +684,69 @@ on top of the raw byte streams, just as it does with sockets. Running a process and waiting for it to finish ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The basic interface for running a subprocess start-to-finish is -:func:`trio.subprocess.run`. It always waits for the subprocess to -exit before returning, so there's no need to worry about leaving -a process running by mistake after you've gone on to do other things. -:func:`~trio.subprocess.run` can supply input or read output from -the subprocess, and can optionally throw an exception if the subprocess -exits with a failure indication. - -.. autofunction:: trio.subprocess.run +We're `working on ` +figuring out the best API for common higher-level subprocess operations. +In the meantime, you can implement something like the standard library +:func:`subprocess.run` in terms of :class:`trio.subprocess.Process` +as follows:: + + async def run( + command, *, input=None, capture_output=False, **options + ): + if input is not None: + options['stdin'] = subprocess.PIPE + if capture_output: + options['stdout'] = options['stderr'] = subprocess.PIPE + + stdout_chunks = [] + stderr_chunks = [] + + async with trio.subprocess.Process(command, **options) as proc: + + async def feed_input(): + async with proc.stdin: + if input: + try: + await proc.stdin.send_all(input) + except trio.BrokenResourceError: + pass + + async def read_output(stream, chunks): + async with stream: + while True: + chunk = await stream.receive_some(32768) + if not chunk: + break + chunks.append(chunk) + + async with trio.open_nursery() as nursery: + if proc.stdin is not None: + nursery.start_soon(feed_input) + if proc.stdout is not None: + nursery.start_soon(read_output, proc.stdout, stdout_chunks) + if proc.stderr is not None: + nursery.start_soon(read_output, proc.stderr, stderr_chunks) + await proc.wait() + + stdout = b"".join(stdout_chunks) if proc.stdout is not None else None + stderr = b"".join(stderr_chunks) if proc.stderr is not None else None + + if proc.returncode: + raise subprocess.CalledProcessError( + proc.returncode, proc.args, output=stdout, stderr=stderr + ) + else: + return subprocess.CompletedProcess( + proc.args, proc.returncode, stdout, stderr + ) Interacting with a process as it runs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If :func:`~trio.subprocess.run` doesn't suit your needs, you can spawn -a subprocess by creating an instance of :class:`trio.subprocess.Process` -and then interact with it using its :attr:`~trio.subprocess.Process.stdin`, +You can spawn a subprocess by creating an instance of +:class:`trio.subprocess.Process` and then interact with it using its +:attr:`~trio.subprocess.Process.stdin`, :attr:`~trio.subprocess.Process.stdout`, and/or :attr:`~trio.subprocess.Process.stderr` streams. @@ -711,20 +757,17 @@ and then interact with it using its :attr:`~trio.subprocess.Process.stdin`, Differences from the standard library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -* All arguments to :class:`~trio.subprocess.run` and the constructor of +* All arguments to the constructor of :class:`~trio.subprocess.Process`, except the command to run, must be passed using keywords. * :func:`~subprocess.call`, :func:`~subprocess.check_call`, and - :func:`~subprocess.check_output` are not provided; use - :func:`~trio.subprocess.run` instead. + :func:`~subprocess.check_output` are not provided. * :meth:`~subprocess.Popen.communicate` is not provided as a method on - :class:`~trio.subprocess.Process` objects; use - :func:`~trio.subprocess.run` instead, or write the loop yourself if - you have unusual needs (the implementation of - :func:`~trio.subprocess.run` is fairly straightforward given trio's - concurrency primitives). :meth:`~subprocess.Popen.communicate` has + :class:`~trio.subprocess.Process` objects; use a higher-level + function instead, or write the loop yourself if + you have unusual needs. :meth:`~subprocess.Popen.communicate` has quite unusual cancellation behavior in the standard library (on some platforms it spawns a background thread which continues to read from the child process even after the timeout has expired) and we wanted diff --git a/newsfragments/4.feature.rst b/newsfragments/4.feature.rst index c37026f74e..4acb42f88d 100644 --- a/newsfragments/4.feature.rst +++ b/newsfragments/4.feature.rst @@ -1,9 +1,6 @@ -:ref:`Subprocess support! ` Add :mod:`trio.subprocess`, an -async wrapper around the stdlib :mod:`subprocess` module with a -similar interface: :class:`~trio.subprocess.Process` (renamed from -:class:`~subprocess.Popen`) to create a process and interact with it -at your leisure, :func`~trio.subprocess.run` to run a process to -completion and report the results, and reexports of all the stdlib -:mod:`subprocess` exceptions and constants. Instead of file objects, -the ``stdin``, ``stdout``, and ``stderr`` attributes of a -:class:`trio.subprocess.Process` are Trio streams. +Initial :ref:`subprocess support `. +Add :class:`trio.subprocess.Process`, an async wrapper around the stdlib +:mod:`subprocess.Popen` class, which permits spawning subprocesses +and communicating with them over standard Trio streams. +:mod:`trio.subprocess` also reexports all the stdlib :mod:`subprocess` +exceptions and constants for convenience. diff --git a/trio/_subprocess.py b/trio/_subprocess.py index e084a7ec43..b1ab84ffd2 100644 --- a/trio/_subprocess.py +++ b/trio/_subprocess.py @@ -13,7 +13,7 @@ create_pipe_from_child_output ) -__all__ = ["Process", "run"] +__all__ = ["Process"] class Process(AsyncResource): @@ -32,9 +32,10 @@ class Process(AsyncResource): :meth:`send_signal`, and waiting for it to exit using :meth:`wait`. Each standard stream is only available if it was specified at - :class:`Process` construction time that a pipe should be created for it: - if you constructed with ``stdin=subprocess.PIPE``, you can write to - the :attr:`stdin` stream, else :attr:`stdin` will be ``None``. + :class:`Process` construction time that a pipe should be created + for it. For example, if you constructed with + ``stdin=subprocess.PIPE``, you can write to the :attr:`stdin` + stream, else :attr:`stdin` will be ``None``. :class:`Process` implements :class:`~trio.abc.AsyncResource`, so you can use it as an async context manager or call its @@ -218,156 +219,3 @@ def terminate(self): def kill(self): """Forwards to :meth:`subprocess.Popen.kill`.""" self._proc.kill() - - -async def run( - command, - *, - input=None, - capture_output=False, - check=False, - timeout=None, - deadline=None, - **options -): - """Run ``command`` in a subprocess, wait for it to complete, and - return a :class:`subprocess.CompletedProcess` instance describing - the results. - - Like :func:`subprocess.run`, but async. - - If cancelled, :func:`run` kills the subprocess and waits for it - to exit before propagating the cancellation, like :meth:`Process.aclose`. - If you need to be able to tell what partial output the process produced - before a timeout, see the ``timeout`` and ``deadline`` arguments. - - Args: - command (str or list): The command to run. Typically this is a - list of strings such as ``['ls', '-l', 'directory with spaces']``, - where the first element names the executable to invoke and the other - elements specify its arguments. If ``shell=True`` is given as - an option, ``command`` should be a single string like - ``"ls -l 'directory with spaces'"``, which will - split into words following the shell's quoting rules. - input (bytes): If specified, set up the subprocess to - read its standard input stream from a pipe, and feed that - pipe with the given bytes while the subprocess is running. - Once the supplied input is exhausted, the pipe will be - closed so that the subprocess receives an end-of-file - indication. Note that ``input=b""`` and ``input=None`` - behave differently: ``input=b""`` sets up the subprocess - to read its input from a pipe with no data in it, while - ``input=None`` performs no input redirection at all, so - that the subprocess's standard input stream is the same - as the parent process's. - capture_output (bool): If true, set up the subprocess to write - its standard output and standard error streams to pipes, - the contents of which will be consumed in the parent process - and ultimately rendered as the ``stdout`` and ``stderr`` - attributes of the returned :class:`~subprocess.CompletedProcess` - object. (This argument is supported by Trio on all - Python versions, but was only added to the standard library - in 3.7.) - check (bool): If true, validate that the subprocess returns an exit - status of zero (success). Any nonzero exit status will be - converted into a :exc:`subprocess.CalledProcessError` - exception. - timeout (float): If specified, do not allow the process to run for - longer than ``timeout`` seconds; if the timeout is reached, - kill the subprocess and raise a :exc:`subprocess.TimeoutExpired` - exception containing the output that was provided thus far. - deadline (float): Like ``timeout``, but specified in terms of an - absolute time on the current :ref:`clock ` - at which to kill the process. It is an error to specify both - ``timeout`` and ``deadline``. - **options: :func:`run` also accepts any :ref:`general subprocess - options ` and passes them onto the - :class:`~trio.subprocess.Process` constructor. It is an - error to specify ``stdin`` if ``input`` was also specified, - or to specify ``stdout`` or ``stderr`` if ``capture_output`` - was also specified. - - Returns: - A :class:`subprocess.CompletedProcess` instance describing the - return code and outputs. - - Raises: - subprocess.TimeoutExpired: if the process is killed due to timeout - expiry - subprocess.CalledProcessError: if check=True is passed and the process - exits with a nonzero exit status - OSError: if an error is encountered starting or communicating with - the process - - """ - if input is not None: - if 'stdin' in options: - raise ValueError('stdin and input arguments may not both be used') - options['stdin'] = subprocess.PIPE - - if capture_output: - if 'stdout' in options or 'stderr' in options: - raise ValueError( - 'capture_output and stdout/stderr arguments may not both be used' - ) - options['stdout'] = options['stderr'] = subprocess.PIPE - - if timeout is not None and deadline is not None: - raise ValueError('timeout and deadline arguments may not both be used') - - stdout_chunks = [] - stderr_chunks = [] - - async with Process(command, **options) as proc: - - async def feed_input(): - async with proc.stdin: - if input: - try: - await proc.stdin.send_all(input) - except _core.BrokenResourceError: - pass - - async def read_output(stream, chunks): - async with stream: - while True: - chunk = await stream.receive_some(32768) - if not chunk: - break - chunks.append(chunk) - - async with _core.open_nursery() as nursery: - if proc.stdin is not None: - nursery.start_soon(feed_input) - if proc.stdout is not None: - nursery.start_soon(read_output, proc.stdout, stdout_chunks) - if proc.stderr is not None: - nursery.start_soon(read_output, proc.stderr, stderr_chunks) - - with _core.open_cancel_scope() as wait_scope: - if timeout is not None: - wait_scope.deadline = _core.current_time() + timeout - if deadline is not None: - wait_scope.deadline = deadline - timeout = deadline - _core.current_time() - await proc.wait() - - if wait_scope.cancelled_caught: - proc.kill() - nursery.cancel_scope.cancel() - - stdout = b"".join(stdout_chunks) if proc.stdout is not None else None - stderr = b"".join(stderr_chunks) if proc.stderr is not None else None - - if wait_scope.cancelled_caught: - raise subprocess.TimeoutExpired( - proc.args, timeout, output=stdout, stderr=stderr - ) - if check and proc.returncode: - raise subprocess.CalledProcessError( - proc.returncode, proc.args, output=stdout, stderr=stderr - ) - - return subprocess.CompletedProcess( - proc.args, proc.returncode, stdout, stderr - ) diff --git a/trio/subprocess.py b/trio/subprocess.py index 7f2ebdf08c..b91e28784a 100644 --- a/trio/subprocess.py +++ b/trio/subprocess.py @@ -1,4 +1,4 @@ -from ._subprocess import Process, run +from ._subprocess import Process # Reexport constants and exceptions from the stdlib subprocess module from subprocess import ( diff --git a/trio/tests/test_subprocess.py b/trio/tests/test_subprocess.py index fe4e6192a0..e0bd0857ed 100644 --- a/trio/tests/test_subprocess.py +++ b/trio/tests/test_subprocess.py @@ -165,104 +165,6 @@ async def drain_one(stream, count, digit): assert proc.returncode == 0 -async def test_run(): - data = bytes(random.randint(0, 255) for _ in range(2**18)) - - result = await subprocess.run(CAT, input=data, stdout=subprocess.PIPE) - assert result.args == CAT - assert result.returncode == 0 - assert result.stdout == data - assert result.stderr is None - - result = await subprocess.run( - CAT, stdin=subprocess.PIPE, stdout=subprocess.PIPE - ) - assert result.args == CAT - assert result.returncode == 0 - assert result.stdout == b"" - assert result.stderr is None - - result = await subprocess.run( - COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, - input=data, - capture_output=True, - ) - assert result.args == COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR - assert result.returncode == 0 - assert result.stdout == data - assert result.stderr == data[::-1] - - with pytest.raises(ValueError): - # can't use both input and stdin - await subprocess.run(CAT, input=b"la di dah", stdin=subprocess.PIPE) - - with pytest.raises(ValueError): - # can't use both capture_output and stdout - await subprocess.run( - CAT, - input=b"la di dah", - capture_output=True, - stdout=subprocess.PIPE - ) - - with pytest.raises(ValueError): - # can't use both timeout and deadline - await subprocess.run( - EXIT_TRUE, timeout=1, deadline=_core.current_time() - ) - - -@slow -async def test_run_timeout(): - data = b"1" * 65536 + b"2" * 65536 + b"3" * 65536 - child_script = """ -import sys, time -sys.stdout.buffer.write(sys.stdin.buffer.read(32768)) -time.sleep(10) -sys.stdout.buffer.write(sys.stdin.buffer.read()) -""" - - for make_timeout_arg in ( - lambda: {"timeout": 1.0}, - lambda: {"deadline": _core.current_time() + 1.0} - ): - with pytest.raises(subprocess.TimeoutExpired) as excinfo: - await subprocess.run( - [sys.executable, "-c", child_script], - input=data, - stdout=subprocess.PIPE, - **make_timeout_arg() - ) - assert excinfo.value.cmd == [sys.executable, "-c", child_script] - if "timeout" in make_timeout_arg(): - assert excinfo.value.timeout == 1.0 - else: - assert 0.9 < excinfo.value.timeout < 1.1 - assert excinfo.value.stdout == data[:32768] - assert excinfo.value.stderr is None - - -async def test_run_check(): - cmd = python("sys.stderr.buffer.write(b'test\\n'); sys.exit(1)") - with pytest.raises(subprocess.CalledProcessError) as excinfo: - await subprocess.run(cmd, stderr=subprocess.PIPE, check=True) - assert excinfo.value.cmd == cmd - assert excinfo.value.returncode == 1 - assert excinfo.value.stderr == b"test\n" - assert excinfo.value.stdout is None - - -async def test_run_with_broken_pipe(): - result = await subprocess.run( - [sys.executable, "-c", "import sys; sys.stdin.close()"], - input=b"x" * 131072, - stdout=subprocess.PIPE, - ) - assert result.returncode == 0 - assert result.stdout == b"" - assert result.stderr is None - - async def test_stderr_stdout(): async with subprocess.Process( COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, @@ -272,23 +174,27 @@ async def test_stderr_stdout(): ) as proc: assert proc.stdout is not None assert proc.stderr is None - - result = await subprocess.run( - COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR, - input=b"1234", - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - assert result.returncode == 0 - assert result.stdout == b"12344321" - assert result.stderr is None + await proc.stdin.send_all(b"1234") + await proc.stdin.aclose() + + output = [] + while True: + chunk = await proc.stdout.receive_some(16) + if chunk == b"": + break + output.append(chunk) + assert b"".join(output) == b"12344321" + assert proc.returncode == 0 # this one hits the branch where stderr=STDOUT but stdout # is not redirected - result = await subprocess.run(CAT, input=b"", stderr=subprocess.STDOUT) - assert result.returncode == 0 - assert result.stdout is None - assert result.stderr is None + async with subprocess.Process( + CAT, stdin=subprocess.PIPE, stderr=subprocess.STDOUT + ) as proc: + assert proc.stdout is None + assert proc.stderr is None + await proc.stdin.aclose() + assert proc.returncode == 0 if posix: try: From 8ec3a6cea65412d67a495586c07fed7d921ac062 Mon Sep 17 00:00:00 2001 From: Joshua Oreman Date: Wed, 26 Dec 2018 10:56:51 -1000 Subject: [PATCH 49/49] fix newsfragment reference --- newsfragments/4.feature.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/newsfragments/4.feature.rst b/newsfragments/4.feature.rst index 4acb42f88d..0a34bf041d 100644 --- a/newsfragments/4.feature.rst +++ b/newsfragments/4.feature.rst @@ -1,6 +1,6 @@ Initial :ref:`subprocess support `. Add :class:`trio.subprocess.Process`, an async wrapper around the stdlib -:mod:`subprocess.Popen` class, which permits spawning subprocesses +:class:`subprocess.Popen` class, which permits spawning subprocesses and communicating with them over standard Trio streams. :mod:`trio.subprocess` also reexports all the stdlib :mod:`subprocess` exceptions and constants for convenience.