diff --git a/Lib/asyncio/__init__.py b/Lib/asyncio/__init__.py index 03165a425eb..32a5dbae03a 100644 --- a/Lib/asyncio/__init__.py +++ b/Lib/asyncio/__init__.py @@ -10,6 +10,7 @@ from .events import * from .exceptions import * from .futures import * +from .graph import * from .locks import * from .protocols import * from .runners import * @@ -27,6 +28,7 @@ events.__all__ + exceptions.__all__ + futures.__all__ + + graph.__all__ + locks.__all__ + protocols.__all__ + runners.__all__ + @@ -45,3 +47,28 @@ else: from .unix_events import * # pragma: no cover __all__ += unix_events.__all__ + +def __getattr__(name: str): + import warnings + + match name: + case "AbstractEventLoopPolicy": + warnings._deprecated(f"asyncio.{name}", remove=(3, 16)) + return events._AbstractEventLoopPolicy + case "DefaultEventLoopPolicy": + warnings._deprecated(f"asyncio.{name}", remove=(3, 16)) + if sys.platform == 'win32': + return windows_events._DefaultEventLoopPolicy + return unix_events._DefaultEventLoopPolicy + case "WindowsSelectorEventLoopPolicy": + if sys.platform == 'win32': + warnings._deprecated(f"asyncio.{name}", remove=(3, 16)) + return windows_events._WindowsSelectorEventLoopPolicy + # Else fall through to the AttributeError below. + case "WindowsProactorEventLoopPolicy": + if sys.platform == 'win32': + warnings._deprecated(f"asyncio.{name}", remove=(3, 16)) + return windows_events._WindowsProactorEventLoopPolicy + # Else fall through to the AttributeError below. + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/Lib/asyncio/__main__.py b/Lib/asyncio/__main__.py index 8e9af83cc8a..e07dd52a2a5 100644 --- a/Lib/asyncio/__main__.py +++ b/Lib/asyncio/__main__.py @@ -1,5 +1,7 @@ +import argparse import ast import asyncio +import asyncio.tools import concurrent.futures import contextvars import inspect @@ -10,7 +12,7 @@ import types import warnings -from _colorize import can_colorize, ANSIColors # type: ignore[import-not-found] +from _colorize import get_theme from _pyrepl.console import InteractiveColoredConsole from . import futures @@ -102,8 +104,9 @@ def run(self): exec(startup_code, console.locals) ps1 = getattr(sys, "ps1", ">>> ") - if can_colorize() and CAN_USE_PYREPL: - ps1 = f"{ANSIColors.BOLD_MAGENTA}{ps1}{ANSIColors.RESET}" + if CAN_USE_PYREPL: + theme = get_theme().syntax + ps1 = f"{theme.prompt}{ps1}{theme.reset}" console.write(f"{ps1}import asyncio\n") if CAN_USE_PYREPL: @@ -141,6 +144,37 @@ def interrupt(self) -> None: if __name__ == '__main__': + parser = argparse.ArgumentParser( + prog="python3 -m asyncio", + description="Interactive asyncio shell and CLI tools", + color=True, + ) + subparsers = parser.add_subparsers(help="sub-commands", dest="command") + ps = subparsers.add_parser( + "ps", help="Display a table of all pending tasks in a process" + ) + ps.add_argument("pid", type=int, help="Process ID to inspect") + pstree = subparsers.add_parser( + "pstree", help="Display a tree of all pending tasks in a process" + ) + pstree.add_argument("pid", type=int, help="Process ID to inspect") + args = parser.parse_args() + match args.command: + case "ps": + asyncio.tools.display_awaited_by_tasks_table(args.pid) + sys.exit(0) + case "pstree": + asyncio.tools.display_awaited_by_tasks_tree(args.pid) + sys.exit(0) + case None: + pass # continue to the interactive shell + case _: + # shouldn't happen as an invalid command-line wouldn't parse + # but let's keep it for the next person adding a command + print(f"error: unhandled command {args.command}", file=sys.stderr) + parser.print_usage(file=sys.stderr) + sys.exit(1) + sys.audit("cpython.run_stdin") if os.getenv('PYTHON_BASIC_REPL'): diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py index 356fc3d7913..8cbb71f7085 100644 --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -458,24 +458,18 @@ def create_future(self): """Create a Future object attached to the loop.""" return futures.Future(loop=self) - def create_task(self, coro, *, name=None, context=None, **kwargs): - """Schedule a coroutine object. + def create_task(self, coro, **kwargs): + """Schedule or begin executing a coroutine object. Return a task object. """ self._check_closed() if self._task_factory is not None: - if context is not None: - kwargs["context"] = context - - task = self._task_factory(self, coro, **kwargs) - task.set_name(name) - - else: - task = tasks.Task(coro, loop=self, name=name, context=context, **kwargs) - if task._source_traceback: - del task._source_traceback[-1] + return self._task_factory(self, coro, **kwargs) + task = tasks.Task(coro, loop=self, **kwargs) + if task._source_traceback: + del task._source_traceback[-1] try: return task finally: @@ -841,7 +835,7 @@ def call_soon(self, callback, *args, context=None): def _check_callback(self, callback, method): if (coroutines.iscoroutine(callback) or - coroutines.iscoroutinefunction(callback)): + coroutines._iscoroutinefunction(callback)): raise TypeError( f"coroutines cannot be used with {method}()") if not callable(callback): @@ -878,7 +872,10 @@ def call_soon_threadsafe(self, callback, *args, context=None): self._check_closed() if self._debug: self._check_callback(callback, 'call_soon_threadsafe') - handle = self._call_soon(callback, args, context) + handle = events._ThreadSafeHandle(callback, args, self, context) + self._ready.append(handle) + if handle._source_traceback: + del handle._source_traceback[-1] if handle._source_traceback: del handle._source_traceback[-1] self._write_to_self() @@ -1677,8 +1674,7 @@ async def connect_accepted_socket( raise ValueError( 'ssl_shutdown_timeout is only meaningful with ssl') - if sock is not None: - _check_ssl_socket(sock) + _check_ssl_socket(sock) transport, protocol = await self._create_connection_transport( sock, protocol_factory, ssl, '', server_side=True, diff --git a/Lib/asyncio/coroutines.py b/Lib/asyncio/coroutines.py index ab4f30eb51b..a51319cb72a 100644 --- a/Lib/asyncio/coroutines.py +++ b/Lib/asyncio/coroutines.py @@ -18,7 +18,16 @@ def _is_debug_mode(): def iscoroutinefunction(func): + import warnings """Return True if func is a decorated coroutine function.""" + warnings._deprecated("asyncio.iscoroutinefunction", + f"{warnings._DEPRECATED_MSG}; " + "use inspect.iscoroutinefunction() instead", + remove=(3,16)) + return _iscoroutinefunction(func) + + +def _iscoroutinefunction(func): return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', None) is _is_coroutine) diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py index 0d1591e4604..a7fb55982ab 100644 --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -5,14 +5,18 @@ # SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io __all__ = ( - 'AbstractEventLoopPolicy', - 'AbstractEventLoop', 'AbstractServer', - 'Handle', 'TimerHandle', - 'get_event_loop_policy', 'set_event_loop_policy', - 'get_event_loop', 'set_event_loop', 'new_event_loop', - 'get_child_watcher', 'set_child_watcher', - '_set_running_loop', 'get_running_loop', - '_get_running_loop', + "AbstractEventLoop", + "AbstractServer", + "Handle", + "TimerHandle", + "get_event_loop_policy", + "set_event_loop_policy", + "get_event_loop", + "set_event_loop", + "new_event_loop", + "_set_running_loop", + "get_running_loop", + "_get_running_loop", ) import contextvars @@ -22,6 +26,7 @@ import subprocess import sys import threading +import warnings from . import format_helpers @@ -104,6 +109,34 @@ def _run(self): self._loop.call_exception_handler(context) self = None # Needed to break cycles when an exception occurs. +# _ThreadSafeHandle is used for callbacks scheduled with call_soon_threadsafe +# and is thread safe unlike Handle which is not thread safe. +class _ThreadSafeHandle(Handle): + + __slots__ = ('_lock',) + + def __init__(self, callback, args, loop, context=None): + super().__init__(callback, args, loop, context) + self._lock = threading.RLock() + + def cancel(self): + with self._lock: + return super().cancel() + + def cancelled(self): + with self._lock: + return super().cancelled() + + def _run(self): + # The event loop checks for cancellation without holding the lock + # It is possible that the handle is cancelled after the check + # but before the callback is called so check it again after acquiring + # the lock and return without calling the callback if it is cancelled. + with self._lock: + if self._cancelled: + return + return super()._run() + class TimerHandle(Handle): """Object returned by timed callback registration methods.""" @@ -629,7 +662,7 @@ def set_debug(self, enabled): raise NotImplementedError -class AbstractEventLoopPolicy: +class _AbstractEventLoopPolicy: """Abstract policy for accessing the event loop.""" def get_event_loop(self): @@ -652,18 +685,7 @@ def new_event_loop(self): the current context, set_event_loop must be called explicitly.""" raise NotImplementedError - # Child processes handling (Unix only). - - def get_child_watcher(self): - "Get the watcher for child processes." - raise NotImplementedError - - def set_child_watcher(self, watcher): - """Set the watcher for child processes.""" - raise NotImplementedError - - -class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy): +class _BaseDefaultEventLoopPolicy(_AbstractEventLoopPolicy): """Default policy implementation for accessing the event loop. In this policy, each thread has its own event loop. However, we @@ -680,7 +702,6 @@ class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy): class _Local(threading.local): _loop = None - _set_called = False def __init__(self): self._local = self._Local() @@ -690,28 +711,6 @@ def get_event_loop(self): Returns an instance of EventLoop or raises an exception. """ - if (self._local._loop is None and - not self._local._set_called and - threading.current_thread() is threading.main_thread()): - stacklevel = 2 - try: - f = sys._getframe(1) - except AttributeError: - pass - else: - # Move up the call stack so that the warning is attached - # to the line outside asyncio itself. - while f: - module = f.f_globals.get('__name__') - if not (module == 'asyncio' or module.startswith('asyncio.')): - break - f = f.f_back - stacklevel += 1 - import warnings - warnings.warn('There is no current event loop', - DeprecationWarning, stacklevel=stacklevel) - self.set_event_loop(self.new_event_loop()) - if self._local._loop is None: raise RuntimeError('There is no current event loop in thread %r.' % threading.current_thread().name) @@ -720,7 +719,6 @@ def get_event_loop(self): def set_event_loop(self, loop): """Set the event loop.""" - self._local._set_called = True if loop is not None and not isinstance(loop, AbstractEventLoop): raise TypeError(f"loop must be an instance of AbstractEventLoop or None, not '{type(loop).__name__}'") self._local._loop = loop @@ -790,34 +788,36 @@ def _init_event_loop_policy(): global _event_loop_policy with _lock: if _event_loop_policy is None: # pragma: no branch - from . import DefaultEventLoopPolicy - _event_loop_policy = DefaultEventLoopPolicy() + if sys.platform == 'win32': + from .windows_events import _DefaultEventLoopPolicy + else: + from .unix_events import _DefaultEventLoopPolicy + _event_loop_policy = _DefaultEventLoopPolicy() -def get_event_loop_policy(): +def _get_event_loop_policy(): """Get the current event loop policy.""" if _event_loop_policy is None: _init_event_loop_policy() return _event_loop_policy +def get_event_loop_policy(): + warnings._deprecated('asyncio.get_event_loop_policy', remove=(3, 16)) + return _get_event_loop_policy() def _set_event_loop_policy(policy): """Set the current event loop policy. If policy is None, the default policy is restored.""" global _event_loop_policy - if policy is not None and not isinstance(policy, AbstractEventLoopPolicy): + if policy is not None and not isinstance(policy, _AbstractEventLoopPolicy): raise TypeError(f"policy must be an instance of AbstractEventLoopPolicy or None, not '{type(policy).__name__}'") _event_loop_policy = policy - def set_event_loop_policy(policy): - """Set the current event loop policy. - - If policy is None, the default policy is restored.""" + warnings._deprecated('asyncio.set_event_loop_policy', remove=(3,16)) _set_event_loop_policy(policy) - def get_event_loop(): """Return an asyncio event loop. @@ -831,28 +831,17 @@ def get_event_loop(): current_loop = _get_running_loop() if current_loop is not None: return current_loop - return get_event_loop_policy().get_event_loop() + return _get_event_loop_policy().get_event_loop() def set_event_loop(loop): """Equivalent to calling get_event_loop_policy().set_event_loop(loop).""" - get_event_loop_policy().set_event_loop(loop) + _get_event_loop_policy().set_event_loop(loop) def new_event_loop(): """Equivalent to calling get_event_loop_policy().new_event_loop().""" - return get_event_loop_policy().new_event_loop() - - -def get_child_watcher(): - """Equivalent to calling get_event_loop_policy().get_child_watcher().""" - return get_event_loop_policy().get_child_watcher() - - -def set_child_watcher(watcher): - """Equivalent to calling - get_event_loop_policy().set_child_watcher(watcher).""" - return get_event_loop_policy().set_child_watcher(watcher) + return _get_event_loop_policy().new_event_loop() # Alias pure-Python implementations for testing purposes. @@ -882,7 +871,7 @@ def set_child_watcher(watcher): def on_fork(): # Reset the loop and wakeupfd in the forked child process. if _event_loop_policy is not None: - _event_loop_policy._local = BaseDefaultEventLoopPolicy._Local() + _event_loop_policy._local = _BaseDefaultEventLoopPolicy._Local() _set_running_loop(None) signal.set_wakeup_fd(-1) diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py index 51932639097..d1df6707302 100644 --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -2,6 +2,7 @@ __all__ = ( 'Future', 'wrap_future', 'isfuture', + 'future_add_to_awaited_by', 'future_discard_from_awaited_by', ) import concurrent.futures @@ -43,7 +44,6 @@ class Future: - This class is not compatible with the wait() and as_completed() methods in the concurrent.futures package. - (In Python 3.4 or later we may be able to unify the implementations.) """ # Class variables serving as defaults for instance variables. @@ -61,12 +61,15 @@ class Future: # the Future protocol (i.e. is intended to be duck-type compatible). # The value must also be not-None, to enable a subclass to declare # that it is not compatible by setting this to None. - # - It is set by __iter__() below so that Task._step() can tell + # - It is set by __iter__() below so that Task.__step() can tell # the difference between - # `await Future()` or`yield from Future()` (correct) vs. + # `await Future()` or `yield from Future()` (correct) vs. # `yield Future()` (incorrect). _asyncio_future_blocking = False + # Used by the capture_call_stack() API. + __asyncio_awaited_by = None + __log_traceback = False def __init__(self, *, loop=None): @@ -116,6 +119,12 @@ def _log_traceback(self, val): raise ValueError('_log_traceback can only be set to False') self.__log_traceback = False + @property + def _asyncio_awaited_by(self): + if self.__asyncio_awaited_by is None: + return None + return frozenset(self.__asyncio_awaited_by) + def get_loop(self): """Return the event loop the Future is bound to.""" loop = self._loop @@ -416,6 +425,49 @@ def wrap_future(future, *, loop=None): return new_future +def future_add_to_awaited_by(fut, waiter, /): + """Record that `fut` is awaited on by `waiter`.""" + # For the sake of keeping the implementation minimal and assuming + # that most of asyncio users use the built-in Futures and Tasks + # (or their subclasses), we only support native Future objects + # and their subclasses. + # + # Longer version: tracking requires storing the caller-callee + # dependency somewhere. One obvious choice is to store that + # information right in the future itself in a dedicated attribute. + # This means that we'd have to require all duck-type compatible + # futures to implement a specific attribute used by asyncio for + # the book keeping. Another solution would be to store that in + # a global dictionary. The downside here is that that would create + # strong references and any scenario where the "add" call isn't + # followed by a "discard" call would lead to a memory leak. + # Using WeakDict would resolve that issue, but would complicate + # the C code (_asynciomodule.c). The bottom line here is that + # it's not clear that all this work would be worth the effort. + # + # Note that there's an accelerated version of this function + # shadowing this implementation later in this file. + if isinstance(fut, _PyFuture) and isinstance(waiter, _PyFuture): + if fut._Future__asyncio_awaited_by is None: + fut._Future__asyncio_awaited_by = set() + fut._Future__asyncio_awaited_by.add(waiter) + + +def future_discard_from_awaited_by(fut, waiter, /): + """Record that `fut` is no longer awaited on by `waiter`.""" + # See the comment in "future_add_to_awaited_by()" body for + # details on implementation. + # + # Note that there's an accelerated version of this function + # shadowing this implementation later in this file. + if isinstance(fut, _PyFuture) and isinstance(waiter, _PyFuture): + if fut._Future__asyncio_awaited_by is not None: + fut._Future__asyncio_awaited_by.discard(waiter) + + +_py_future_add_to_awaited_by = future_add_to_awaited_by +_py_future_discard_from_awaited_by = future_discard_from_awaited_by + try: import _asyncio except ImportError: @@ -423,3 +475,7 @@ def wrap_future(future, *, loop=None): else: # _CFuture is needed for tests. Future = _CFuture = _asyncio.Future + future_add_to_awaited_by = _asyncio.future_add_to_awaited_by + future_discard_from_awaited_by = _asyncio.future_discard_from_awaited_by + _c_future_add_to_awaited_by = future_add_to_awaited_by + _c_future_discard_from_awaited_by = future_discard_from_awaited_by diff --git a/Lib/asyncio/graph.py b/Lib/asyncio/graph.py new file mode 100644 index 00000000000..b5bfeb1630a --- /dev/null +++ b/Lib/asyncio/graph.py @@ -0,0 +1,276 @@ +"""Introspection utils for tasks call graphs.""" + +import dataclasses +import io +import sys +import types + +from . import events +from . import futures +from . import tasks + +__all__ = ( + 'capture_call_graph', + 'format_call_graph', + 'print_call_graph', + 'FrameCallGraphEntry', + 'FutureCallGraph', +) + +# Sadly, we can't re-use the traceback module's datastructures as those +# are tailored for error reporting, whereas we need to represent an +# async call graph. +# +# Going with pretty verbose names as we'd like to export them to the +# top level asyncio namespace, and want to avoid future name clashes. + + +@dataclasses.dataclass(frozen=True, slots=True) +class FrameCallGraphEntry: + frame: types.FrameType + + +@dataclasses.dataclass(frozen=True, slots=True) +class FutureCallGraph: + future: futures.Future + call_stack: tuple["FrameCallGraphEntry", ...] + awaited_by: tuple["FutureCallGraph", ...] + + +def _build_graph_for_future( + future: futures.Future, + *, + limit: int | None = None, +) -> FutureCallGraph: + if not isinstance(future, futures.Future): + raise TypeError( + f"{future!r} object does not appear to be compatible " + f"with asyncio.Future" + ) + + coro = None + if get_coro := getattr(future, 'get_coro', None): + coro = get_coro() if limit != 0 else None + + st: list[FrameCallGraphEntry] = [] + awaited_by: list[FutureCallGraph] = [] + + while coro is not None: + if hasattr(coro, 'cr_await'): + # A native coroutine or duck-type compatible iterator + st.append(FrameCallGraphEntry(coro.cr_frame)) + coro = coro.cr_await + elif hasattr(coro, 'ag_await'): + # A native async generator or duck-type compatible iterator + st.append(FrameCallGraphEntry(coro.cr_frame)) + coro = coro.ag_await + else: + break + + if future._asyncio_awaited_by: + for parent in future._asyncio_awaited_by: + awaited_by.append(_build_graph_for_future(parent, limit=limit)) + + if limit is not None: + if limit > 0: + st = st[:limit] + elif limit < 0: + st = st[limit:] + st.reverse() + return FutureCallGraph(future, tuple(st), tuple(awaited_by)) + + +def capture_call_graph( + future: futures.Future | None = None, + /, + *, + depth: int = 1, + limit: int | None = None, +) -> FutureCallGraph | None: + """Capture the async call graph for the current task or the provided Future. + + The graph is represented with three data structures: + + * FutureCallGraph(future, call_stack, awaited_by) + + Where 'future' is an instance of asyncio.Future or asyncio.Task. + + 'call_stack' is a tuple of FrameGraphEntry objects. + + 'awaited_by' is a tuple of FutureCallGraph objects. + + * FrameCallGraphEntry(frame) + + Where 'frame' is a frame object of a regular Python function + in the call stack. + + Receives an optional 'future' argument. If not passed, + the current task will be used. If there's no current task, the function + returns None. + + If "capture_call_graph()" is introspecting *the current task*, the + optional keyword-only 'depth' argument can be used to skip the specified + number of frames from top of the stack. + + If the optional keyword-only 'limit' argument is provided, each call stack + in the resulting graph is truncated to include at most ``abs(limit)`` + entries. If 'limit' is positive, the entries left are the closest to + the invocation point. If 'limit' is negative, the topmost entries are + left. If 'limit' is omitted or None, all entries are present. + If 'limit' is 0, the call stack is not captured at all, only + "awaited by" information is present. + """ + + loop = events._get_running_loop() + + if future is not None: + # Check if we're in a context of a running event loop; + # if yes - check if the passed future is the currently + # running task or not. + if loop is None or future is not tasks.current_task(loop=loop): + return _build_graph_for_future(future, limit=limit) + # else: future is the current task, move on. + else: + if loop is None: + raise RuntimeError( + 'capture_call_graph() is called outside of a running ' + 'event loop and no *future* to introspect was provided') + future = tasks.current_task(loop=loop) + + if future is None: + # This isn't a generic call stack introspection utility. If we + # can't determine the current task and none was provided, we + # just return. + return None + + if not isinstance(future, futures.Future): + raise TypeError( + f"{future!r} object does not appear to be compatible " + f"with asyncio.Future" + ) + + call_stack: list[FrameCallGraphEntry] = [] + + f = sys._getframe(depth) if limit != 0 else None + try: + while f is not None: + is_async = f.f_generator is not None + call_stack.append(FrameCallGraphEntry(f)) + + if is_async: + if f.f_back is not None and f.f_back.f_generator is None: + # We've reached the bottom of the coroutine stack, which + # must be the Task that runs it. + break + + f = f.f_back + finally: + del f + + awaited_by = [] + if future._asyncio_awaited_by: + for parent in future._asyncio_awaited_by: + awaited_by.append(_build_graph_for_future(parent, limit=limit)) + + if limit is not None: + limit *= -1 + if limit > 0: + call_stack = call_stack[:limit] + elif limit < 0: + call_stack = call_stack[limit:] + + return FutureCallGraph(future, tuple(call_stack), tuple(awaited_by)) + + +def format_call_graph( + future: futures.Future | None = None, + /, + *, + depth: int = 1, + limit: int | None = None, +) -> str: + """Return the async call graph as a string for `future`. + + If `future` is not provided, format the call graph for the current task. + """ + + def render_level(st: FutureCallGraph, buf: list[str], level: int) -> None: + def add_line(line: str) -> None: + buf.append(level * ' ' + line) + + if isinstance(st.future, tasks.Task): + add_line( + f'* Task(name={st.future.get_name()!r}, id={id(st.future):#x})' + ) + else: + add_line( + f'* Future(id={id(st.future):#x})' + ) + + if st.call_stack: + add_line( + f' + Call stack:' + ) + for ste in st.call_stack: + f = ste.frame + + if f.f_generator is None: + f = ste.frame + add_line( + f' | File {f.f_code.co_filename!r},' + f' line {f.f_lineno}, in' + f' {f.f_code.co_qualname}()' + ) + else: + c = f.f_generator + + try: + f = c.cr_frame + code = c.cr_code + tag = 'async' + except AttributeError: + try: + f = c.ag_frame + code = c.ag_code + tag = 'async generator' + except AttributeError: + f = c.gi_frame + code = c.gi_code + tag = 'generator' + + add_line( + f' | File {f.f_code.co_filename!r},' + f' line {f.f_lineno}, in' + f' {tag} {code.co_qualname}()' + ) + + if st.awaited_by: + add_line( + f' + Awaited by:' + ) + for fut in st.awaited_by: + render_level(fut, buf, level + 1) + + graph = capture_call_graph(future, depth=depth + 1, limit=limit) + if graph is None: + return "" + + buf: list[str] = [] + try: + render_level(graph, buf, 0) + finally: + # 'graph' has references to frames so we should + # make sure it's GC'ed as soon as we don't need it. + del graph + return '\n'.join(buf) + +def print_call_graph( + future: futures.Future | None = None, + /, + *, + file: io.Writer[str] | None = None, + depth: int = 1, + limit: int | None = None, +) -> None: + """Print the async call graph for the current task or the provided Future.""" + print(format_call_graph(future, depth=depth, limit=limit), file=file) diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py index 3df4c693a91..fa3a94764b5 100644 --- a/Lib/asyncio/locks.py +++ b/Lib/asyncio/locks.py @@ -341,9 +341,9 @@ def _notify(self, n): fut.set_result(False) def notify_all(self): - """Wake up all threads waiting on this condition. This method acts - like notify(), but wakes up all waiting threads instead of one. If the - calling thread has not acquired the lock when this method is called, + """Wake up all tasks waiting on this condition. This method acts + like notify(), but wakes up all waiting tasks instead of one. If the + calling task has not acquired the lock when this method is called, a RuntimeError is raised. """ self.notify(len(self._waiters)) diff --git a/Lib/asyncio/runners.py b/Lib/asyncio/runners.py index 102ae78021b..ba37e003a65 100644 --- a/Lib/asyncio/runners.py +++ b/Lib/asyncio/runners.py @@ -3,6 +3,7 @@ import contextvars import enum import functools +import inspect import threading import signal from . import coroutines @@ -84,10 +85,7 @@ def get_loop(self): return self._loop def run(self, coro, *, context=None): - """Run a coroutine inside the embedded event loop.""" - if not coroutines.iscoroutine(coro): - raise ValueError("a coroutine was expected, got {!r}".format(coro)) - + """Run code in the embedded event loop.""" if events._get_running_loop() is not None: # fail fast with short traceback raise RuntimeError( @@ -95,8 +93,19 @@ def run(self, coro, *, context=None): self._lazy_init() + if not coroutines.iscoroutine(coro): + if inspect.isawaitable(coro): + async def _wrap_awaitable(awaitable): + return await awaitable + + coro = _wrap_awaitable(coro) + else: + raise TypeError('An asyncio.Future, a coroutine or an ' + 'awaitable is required') + if context is None: context = self._context + task = self._loop.create_task(coro, context=context) if (threading.current_thread() is threading.main_thread() diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py index 8701467d413..ff7e16df3c6 100644 --- a/Lib/asyncio/selector_events.py +++ b/Lib/asyncio/selector_events.py @@ -173,16 +173,20 @@ def _accept_connection( # listening socket has triggered an EVENT_READ. There may be multiple # connections waiting for an .accept() so it is called in a loop. # See https://bugs.python.org/issue27906 for more details. - for _ in range(backlog): + for _ in range(backlog + 1): try: conn, addr = sock.accept() if self._debug: logger.debug("%r got a new connection from %r: %r", server, addr, conn) conn.setblocking(False) - except (BlockingIOError, InterruptedError, ConnectionAbortedError): - # Early exit because the socket accept buffer is empty. - return None + except ConnectionAbortedError: + # Discard connections that were aborted before accept(). + continue + except (BlockingIOError, InterruptedError): + # Early exit because of a signal or + # the socket accept buffer is empty. + return except OSError as exc: # There's nowhere to send the error, so just log it. if exc.errno in (errno.EMFILE, errno.ENFILE, diff --git a/Lib/asyncio/staggered.py b/Lib/asyncio/staggered.py index 0afed64fdf9..2ad65d8648e 100644 --- a/Lib/asyncio/staggered.py +++ b/Lib/asyncio/staggered.py @@ -8,6 +8,7 @@ from . import exceptions as exceptions_mod from . import locks from . import tasks +from . import futures async def staggered_race(coro_fns, delay, *, loop=None): @@ -63,6 +64,7 @@ async def staggered_race(coro_fns, delay, *, loop=None): """ # TODO: when we have aiter() and anext(), allow async iterables in coro_fns. loop = loop or events.get_running_loop() + parent_task = tasks.current_task(loop) enum_coro_fns = enumerate(coro_fns) winner_result = None winner_index = None @@ -73,6 +75,7 @@ async def staggered_race(coro_fns, delay, *, loop=None): def task_done(task): running_tasks.discard(task) + futures.future_discard_from_awaited_by(task, parent_task) if ( on_completed_fut is not None and not on_completed_fut.done() @@ -110,6 +113,7 @@ async def run_one_coro(ok_to_start, previous_failed) -> None: this_failed = locks.Event() next_ok_to_start = locks.Event() next_task = loop.create_task(run_one_coro(next_ok_to_start, this_failed)) + futures.future_add_to_awaited_by(next_task, parent_task) running_tasks.add(next_task) next_task.add_done_callback(task_done) # next_task has been appended to running_tasks so next_task is ok to @@ -148,6 +152,7 @@ async def run_one_coro(ok_to_start, previous_failed) -> None: try: ok_to_start = locks.Event() first_task = loop.create_task(run_one_coro(ok_to_start, None)) + futures.future_add_to_awaited_by(first_task, parent_task) running_tasks.add(first_task) first_task.add_done_callback(task_done) # first_task has been appended to running_tasks so first_task is ok to start. @@ -171,4 +176,4 @@ async def run_one_coro(ok_to_start, previous_failed) -> None: raise propagate_cancellation_error return winner_result, winner_index, exceptions finally: - del exceptions, propagate_cancellation_error, unhandled_exceptions + del exceptions, propagate_cancellation_error, unhandled_exceptions, parent_task diff --git a/Lib/asyncio/taskgroups.py b/Lib/asyncio/taskgroups.py index 8fda6c8d55e..00e8f6d5d1a 100644 --- a/Lib/asyncio/taskgroups.py +++ b/Lib/asyncio/taskgroups.py @@ -6,6 +6,7 @@ from . import events from . import exceptions +from . import futures from . import tasks @@ -178,7 +179,7 @@ async def _aexit(self, et, exc): exc = None - def create_task(self, coro, *, name=None, context=None): + def create_task(self, coro, **kwargs): """Create a new task in this group and return it. Similar to `asyncio.create_task`. @@ -192,10 +193,9 @@ def create_task(self, coro, *, name=None, context=None): if self._aborting: coro.close() raise RuntimeError(f"TaskGroup {self!r} is shutting down") - if context is None: - task = self._loop.create_task(coro, name=name) - else: - task = self._loop.create_task(coro, name=name, context=context) + task = self._loop.create_task(coro, **kwargs) + + futures.future_add_to_awaited_by(task, self._parent_task) # Always schedule the done callback even if the task is # already done (e.g. if the coro was able to complete eagerly), @@ -228,6 +228,8 @@ def _abort(self): def _on_task_done(self, task): self._tasks.discard(task) + futures.future_discard_from_awaited_by(task, self._parent_task) + if self._on_completed_fut is not None and not self._tasks: if not self._on_completed_fut.done(): self._on_completed_fut.set_result(True) diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py index dadcb5b5f36..fbd5c39a7c5 100644 --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -48,23 +48,8 @@ def all_tasks(loop=None): # capturing the set of eager tasks first, so if an eager task "graduates" # to a regular task in another thread, we don't risk missing it. eager_tasks = list(_eager_tasks) - # Looping over the WeakSet isn't safe as it can be updated from another - # thread, therefore we cast it to list prior to filtering. The list cast - # itself requires iteration, so we repeat it several times ignoring - # RuntimeErrors (which are not very likely to occur). - # See issues 34970 and 36607 for details. - scheduled_tasks = None - i = 0 - while True: - try: - scheduled_tasks = list(_scheduled_tasks) - except RuntimeError: - i += 1 - if i >= 1000: - raise - else: - break - return {t for t in itertools.chain(scheduled_tasks, eager_tasks) + + return {t for t in itertools.chain(_scheduled_tasks, eager_tasks) if futures._get_loop(t) is loop and not t.done()} @@ -125,7 +110,7 @@ def __init__(self, coro, *, loop=None, name=None, context=None, self.__eager_start() else: self._loop.call_soon(self.__step, context=self._context) - _register_task(self) + _py_register_task(self) def __del__(self): if self._state == futures._PENDING and self._log_destroy_pending: @@ -260,39 +245,39 @@ def uncancel(self): return self._num_cancels_requested def __eager_start(self): - prev_task = _swap_current_task(self._loop, self) + prev_task = _py_swap_current_task(self._loop, self) try: - _register_eager_task(self) + _py_register_eager_task(self) try: self._context.run(self.__step_run_and_handle_result, None) finally: - _unregister_eager_task(self) + _py_unregister_eager_task(self) finally: try: - curtask = _swap_current_task(self._loop, prev_task) + curtask = _py_swap_current_task(self._loop, prev_task) assert curtask is self finally: if self.done(): self._coro = None self = None # Needed to break cycles when an exception occurs. else: - _register_task(self) + _py_register_task(self) def __step(self, exc=None): if self.done(): raise exceptions.InvalidStateError( - f'_step(): already done: {self!r}, {exc!r}') + f'__step(): already done: {self!r}, {exc!r}') if self._must_cancel: if not isinstance(exc, exceptions.CancelledError): exc = self._make_cancelled_error() self._must_cancel = False self._fut_waiter = None - _enter_task(self._loop, self) + _py_enter_task(self._loop, self) try: self.__step_run_and_handle_result(exc) finally: - _leave_task(self._loop, self) + _py_leave_task(self._loop, self) self = None # Needed to break cycles when an exception occurs. def __step_run_and_handle_result(self, exc): @@ -337,6 +322,7 @@ def __step_run_and_handle_result(self, exc): self._loop.call_soon( self.__step, new_exc, context=self._context) else: + futures.future_add_to_awaited_by(result, self) result._asyncio_future_blocking = False result.add_done_callback( self.__wakeup, context=self._context) @@ -371,6 +357,7 @@ def __step_run_and_handle_result(self, exc): self = None # Needed to break cycles when an exception occurs. def __wakeup(self, future): + futures.future_discard_from_awaited_by(future, self) try: future.result() except BaseException as exc: @@ -379,7 +366,7 @@ def __wakeup(self, future): else: # Don't pass the value of `future.result()` explicitly, # as `Future.__iter__` and `Future.__await__` don't need it. - # If we call `_step(value, None)` instead of `_step()`, + # If we call `__step(value, None)` instead of `__step()`, # Python eval loop would use `.send(value)` method call, # instead of `__next__()`, which is slower for futures # that return non-generator iterators from their `__iter__`. @@ -399,19 +386,13 @@ def __wakeup(self, future): Task = _CTask = _asyncio.Task -def create_task(coro, *, name=None, context=None): +def create_task(coro, **kwargs): """Schedule the execution of a coroutine object in a spawn task. Return a Task object. """ loop = events.get_running_loop() - if context is None: - # Use legacy API if context is not needed - task = loop.create_task(coro, name=name) - else: - task = loop.create_task(coro, name=name, context=context) - - return task + return loop.create_task(coro, **kwargs) # wait() and as_completed() similar to those in PEP 3148. @@ -517,6 +498,7 @@ async def _wait(fs, timeout, return_when, loop): if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) counter = len(fs) + cur_task = current_task() def _on_completion(f): nonlocal counter @@ -529,9 +511,11 @@ def _on_completion(f): timeout_handle.cancel() if not waiter.done(): waiter.set_result(None) + futures.future_discard_from_awaited_by(f, cur_task) for f in fs: f.add_done_callback(_on_completion) + futures.future_add_to_awaited_by(f, cur_task) try: await waiter @@ -817,10 +801,19 @@ def gather(*coros_or_futures, return_exceptions=False): outer.set_result([]) return outer - def _done_callback(fut): + loop = events._get_running_loop() + if loop is not None: + cur_task = current_task(loop) + else: + cur_task = None + + def _done_callback(fut, cur_task=cur_task): nonlocal nfinished nfinished += 1 + if cur_task is not None: + futures.future_discard_from_awaited_by(fut, cur_task) + if outer is None or outer.done(): if not fut.cancelled(): # Mark exception retrieved. @@ -877,7 +870,6 @@ def _done_callback(fut): nfuts = 0 nfinished = 0 done_futs = [] - loop = None outer = None # bpo-46672 for arg in coros_or_futures: if arg not in arg_to_fut: @@ -890,12 +882,13 @@ def _done_callback(fut): # can't control it, disable the "destroy pending task" # warning. fut._log_destroy_pending = False - nfuts += 1 arg_to_fut[arg] = fut if fut.done(): done_futs.append(fut) else: + if cur_task is not None: + futures.future_add_to_awaited_by(fut, cur_task) fut.add_done_callback(_done_callback) else: @@ -915,6 +908,25 @@ def _done_callback(fut): return outer +def _log_on_exception(fut): + if fut.cancelled(): + return + + exc = fut.exception() + if exc is None: + return + + context = { + 'message': + f'{exc.__class__.__name__} exception in shielded future', + 'exception': exc, + 'future': fut, + } + if fut._source_traceback: + context['source_traceback'] = fut._source_traceback + fut._loop.call_exception_handler(context) + + def shield(arg): """Wait for a future, shielding it from cancellation. @@ -955,11 +967,16 @@ def shield(arg): loop = futures._get_loop(inner) outer = loop.create_future() + if loop is not None and (cur_task := current_task(loop)) is not None: + futures.future_add_to_awaited_by(inner, cur_task) + else: + cur_task = None + + def _clear_awaited_by_callback(inner): + futures.future_discard_from_awaited_by(inner, cur_task) + def _inner_done_callback(inner): if outer.cancelled(): - if not inner.cancelled(): - # Mark inner's result as retrieved. - inner.exception() return if inner.cancelled(): @@ -971,10 +988,16 @@ def _inner_done_callback(inner): else: outer.set_result(inner.result()) - def _outer_done_callback(outer): if not inner.done(): inner.remove_done_callback(_inner_done_callback) + # Keep only one callback to log on cancel + inner.remove_done_callback(_log_on_exception) + inner.add_done_callback(_log_on_exception) + + if cur_task is not None: + inner.add_done_callback(_clear_awaited_by_callback) + inner.add_done_callback(_inner_done_callback) outer.add_done_callback(_outer_done_callback) @@ -1023,9 +1046,9 @@ def create_eager_task_factory(custom_task_constructor): used. E.g. `loop.set_task_factory(asyncio.eager_task_factory)`. """ - def factory(loop, coro, *, name=None, context=None): + def factory(loop, coro, *, eager_start=True, **kwargs): return custom_task_constructor( - coro, loop=loop, name=name, context=context, eager_start=True) + coro, loop=loop, eager_start=eager_start, **kwargs) return factory @@ -1097,14 +1120,13 @@ def _unregister_eager_task(task): _py_enter_task = _enter_task _py_leave_task = _leave_task _py_swap_current_task = _swap_current_task - +_py_all_tasks = all_tasks try: from _asyncio import (_register_task, _register_eager_task, _unregister_task, _unregister_eager_task, _enter_task, _leave_task, _swap_current_task, - _scheduled_tasks, _eager_tasks, _current_tasks, - current_task) + current_task, all_tasks) except ImportError: pass else: @@ -1116,3 +1138,4 @@ def _unregister_eager_task(task): _c_enter_task = _enter_task _c_leave_task = _leave_task _c_swap_current_task = _swap_current_task + _c_all_tasks = all_tasks diff --git a/Lib/asyncio/timeouts.py b/Lib/asyncio/timeouts.py index e6f5100691d..09342dc7c13 100644 --- a/Lib/asyncio/timeouts.py +++ b/Lib/asyncio/timeouts.py @@ -1,7 +1,6 @@ import enum from types import TracebackType -from typing import final, Optional, Type from . import events from . import exceptions @@ -23,14 +22,13 @@ class _State(enum.Enum): EXITED = "finished" -@final class Timeout: """Asynchronous context manager for cancelling overdue coroutines. Use `timeout()` or `timeout_at()` rather than instantiating this class directly. """ - def __init__(self, when: Optional[float]) -> None: + def __init__(self, when: float | None) -> None: """Schedule a timeout that will trigger at a given loop time. - If `when` is `None`, the timeout will never trigger. @@ -39,15 +37,15 @@ def __init__(self, when: Optional[float]) -> None: """ self._state = _State.CREATED - self._timeout_handler: Optional[events.TimerHandle] = None - self._task: Optional[tasks.Task] = None + self._timeout_handler: events.TimerHandle | None = None + self._task: tasks.Task | None = None self._when = when - def when(self) -> Optional[float]: + def when(self) -> float | None: """Return the current deadline.""" return self._when - def reschedule(self, when: Optional[float]) -> None: + def reschedule(self, when: float | None) -> None: """Reschedule the timeout.""" if self._state is not _State.ENTERED: if self._state is _State.CREATED: @@ -96,10 +94,10 @@ async def __aenter__(self) -> "Timeout": async def __aexit__( self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> Optional[bool]: + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool | None: assert self._state in (_State.ENTERED, _State.EXPIRING) if self._timeout_handler is not None: @@ -142,7 +140,7 @@ def _insert_timeout_error(exc_val: BaseException) -> None: exc_val = exc_val.__context__ -def timeout(delay: Optional[float]) -> Timeout: +def timeout(delay: float | None) -> Timeout: """Timeout async context manager. Useful in cases when you want to apply timeout logic around block @@ -162,7 +160,7 @@ def timeout(delay: Optional[float]) -> Timeout: return Timeout(loop.time() + delay if delay is not None else None) -def timeout_at(when: Optional[float]) -> Timeout: +def timeout_at(when: float | None) -> Timeout: """Schedule the timeout at absolute time. Like timeout() but argument gives absolute time in the same clock system diff --git a/Lib/asyncio/tools.py b/Lib/asyncio/tools.py new file mode 100644 index 00000000000..f39e11fdd51 --- /dev/null +++ b/Lib/asyncio/tools.py @@ -0,0 +1,276 @@ +"""Tools to analyze tasks running in asyncio programs.""" + +from collections import defaultdict, namedtuple +from itertools import count +from enum import Enum +import sys +from _remote_debugging import RemoteUnwinder, FrameInfo + +class NodeType(Enum): + COROUTINE = 1 + TASK = 2 + + +class CycleFoundException(Exception): + """Raised when there is a cycle when drawing the call tree.""" + def __init__( + self, + cycles: list[list[int]], + id2name: dict[int, str], + ) -> None: + super().__init__(cycles, id2name) + self.cycles = cycles + self.id2name = id2name + + + +# ─── indexing helpers ─────────────────────────────────────────── +def _format_stack_entry(elem: str|FrameInfo) -> str: + if not isinstance(elem, str): + if elem.lineno == 0 and elem.filename == "": + return f"{elem.funcname}" + else: + return f"{elem.funcname} {elem.filename}:{elem.lineno}" + return elem + + +def _index(result): + id2name, awaits, task_stacks = {}, [], {} + for awaited_info in result: + for task_info in awaited_info.awaited_by: + task_id = task_info.task_id + task_name = task_info.task_name + id2name[task_id] = task_name + + # Store the internal coroutine stack for this task + if task_info.coroutine_stack: + for coro_info in task_info.coroutine_stack: + call_stack = coro_info.call_stack + internal_stack = [_format_stack_entry(frame) for frame in call_stack] + task_stacks[task_id] = internal_stack + + # Add the awaited_by relationships (external dependencies) + if task_info.awaited_by: + for coro_info in task_info.awaited_by: + call_stack = coro_info.call_stack + parent_task_id = coro_info.task_name + stack = [_format_stack_entry(frame) for frame in call_stack] + awaits.append((parent_task_id, stack, task_id)) + return id2name, awaits, task_stacks + + +def _build_tree(id2name, awaits, task_stacks): + id2label = {(NodeType.TASK, tid): name for tid, name in id2name.items()} + children = defaultdict(list) + cor_nodes = defaultdict(dict) # Maps parent -> {frame_name: node_key} + next_cor_id = count(1) + + def get_or_create_cor_node(parent, frame): + """Get existing coroutine node or create new one under parent""" + if frame in cor_nodes[parent]: + return cor_nodes[parent][frame] + + node_key = (NodeType.COROUTINE, f"c{next(next_cor_id)}") + id2label[node_key] = frame + children[parent].append(node_key) + cor_nodes[parent][frame] = node_key + return node_key + + # Build task dependency tree with coroutine frames + for parent_id, stack, child_id in awaits: + cur = (NodeType.TASK, parent_id) + for frame in reversed(stack): + cur = get_or_create_cor_node(cur, frame) + + child_key = (NodeType.TASK, child_id) + if child_key not in children[cur]: + children[cur].append(child_key) + + # Add coroutine stacks for leaf tasks + awaiting_tasks = {parent_id for parent_id, _, _ in awaits} + for task_id in id2name: + if task_id not in awaiting_tasks and task_id in task_stacks: + cur = (NodeType.TASK, task_id) + for frame in reversed(task_stacks[task_id]): + cur = get_or_create_cor_node(cur, frame) + + return id2label, children + + +def _roots(id2label, children): + all_children = {c for kids in children.values() for c in kids} + return [n for n in id2label if n not in all_children] + +# ─── detect cycles in the task-to-task graph ─────────────────────── +def _task_graph(awaits): + """Return {parent_task_id: {child_task_id, …}, …}.""" + g = defaultdict(set) + for parent_id, _stack, child_id in awaits: + g[parent_id].add(child_id) + return g + + +def _find_cycles(graph): + """ + Depth-first search for back-edges. + + Returns a list of cycles (each cycle is a list of task-ids) or an + empty list if the graph is acyclic. + """ + WHITE, GREY, BLACK = 0, 1, 2 + color = defaultdict(lambda: WHITE) + path, cycles = [], [] + + def dfs(v): + color[v] = GREY + path.append(v) + for w in graph.get(v, ()): + if color[w] == WHITE: + dfs(w) + elif color[w] == GREY: # back-edge → cycle! + i = path.index(w) + cycles.append(path[i:] + [w]) # make a copy + color[v] = BLACK + path.pop() + + for v in list(graph): + if color[v] == WHITE: + dfs(v) + return cycles + + +# ─── PRINT TREE FUNCTION ─────────────────────────────────────── +def get_all_awaited_by(pid): + unwinder = RemoteUnwinder(pid) + return unwinder.get_all_awaited_by() + + +def build_async_tree(result, task_emoji="(T)", cor_emoji=""): + """ + Build a list of strings for pretty-print an async call tree. + + The call tree is produced by `get_all_async_stacks()`, prefixing tasks + with `task_emoji` and coroutine frames with `cor_emoji`. + """ + id2name, awaits, task_stacks = _index(result) + g = _task_graph(awaits) + cycles = _find_cycles(g) + if cycles: + raise CycleFoundException(cycles, id2name) + labels, children = _build_tree(id2name, awaits, task_stacks) + + def pretty(node): + flag = task_emoji if node[0] == NodeType.TASK else cor_emoji + return f"{flag} {labels[node]}" + + def render(node, prefix="", last=True, buf=None): + if buf is None: + buf = [] + buf.append(f"{prefix}{'└── ' if last else '├── '}{pretty(node)}") + new_pref = prefix + (" " if last else "│ ") + kids = children.get(node, []) + for i, kid in enumerate(kids): + render(kid, new_pref, i == len(kids) - 1, buf) + return buf + + return [render(root) for root in _roots(labels, children)] + + +def build_task_table(result): + id2name, _, _ = _index(result) + table = [] + + for awaited_info in result: + thread_id = awaited_info.thread_id + for task_info in awaited_info.awaited_by: + # Get task info + task_id = task_info.task_id + task_name = task_info.task_name + + # Build coroutine stack string + frames = [frame for coro in task_info.coroutine_stack + for frame in coro.call_stack] + coro_stack = " -> ".join(_format_stack_entry(x).split(" ")[0] + for x in frames) + + # Handle tasks with no awaiters + if not task_info.awaited_by: + table.append([thread_id, hex(task_id), task_name, coro_stack, + "", "", "0x0"]) + continue + + # Handle tasks with awaiters + for coro_info in task_info.awaited_by: + parent_id = coro_info.task_name + awaiter_frames = [_format_stack_entry(x).split(" ")[0] + for x in coro_info.call_stack] + awaiter_chain = " -> ".join(awaiter_frames) + awaiter_name = id2name.get(parent_id, "Unknown") + parent_id_str = (hex(parent_id) if isinstance(parent_id, int) + else str(parent_id)) + + table.append([thread_id, hex(task_id), task_name, coro_stack, + awaiter_chain, awaiter_name, parent_id_str]) + + return table + +def _print_cycle_exception(exception: CycleFoundException): + print("ERROR: await-graph contains cycles - cannot print a tree!", file=sys.stderr) + print("", file=sys.stderr) + for c in exception.cycles: + inames = " → ".join(exception.id2name.get(tid, hex(tid)) for tid in c) + print(f"cycle: {inames}", file=sys.stderr) + + +def exit_with_permission_help_text(): + """ + Prints a message pointing to platform-specific permission help text and exits the program. + This function is called when a PermissionError is encountered while trying + to attach to a process. + """ + print( + "Error: The specified process cannot be attached to due to insufficient permissions.\n" + "See the Python documentation for details on required privileges and troubleshooting:\n" + "https://docs.python.org/3.14/howto/remote_debugging.html#permission-requirements\n" + ) + sys.exit(1) + + +def _get_awaited_by_tasks(pid: int) -> list: + try: + return get_all_awaited_by(pid) + except RuntimeError as e: + while e.__context__ is not None: + e = e.__context__ + print(f"Error retrieving tasks: {e}") + sys.exit(1) + except PermissionError as e: + exit_with_permission_help_text() + + +def display_awaited_by_tasks_table(pid: int) -> None: + """Build and print a table of all pending tasks under `pid`.""" + + tasks = _get_awaited_by_tasks(pid) + table = build_task_table(tasks) + # Print the table in a simple tabular format + print( + f"{'tid':<10} {'task id':<20} {'task name':<20} {'coroutine stack':<50} {'awaiter chain':<50} {'awaiter name':<15} {'awaiter id':<15}" + ) + print("-" * 180) + for row in table: + print(f"{row[0]:<10} {row[1]:<20} {row[2]:<20} {row[3]:<50} {row[4]:<50} {row[5]:<15} {row[6]:<15}") + + +def display_awaited_by_tasks_tree(pid: int) -> None: + """Build and print a tree of all pending tasks under `pid`.""" + + tasks = _get_awaited_by_tasks(pid) + try: + result = build_async_tree(tasks) + except CycleFoundException as e: + _print_cycle_exception(e) + sys.exit(1) + + for tree in result: + print("\n".join(tree)) diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py index 41ccf1b78fb..1c1458127db 100644 --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -28,10 +28,6 @@ __all__ = ( 'SelectorEventLoop', - 'AbstractChildWatcher', 'SafeChildWatcher', - 'FastChildWatcher', 'PidfdChildWatcher', - 'MultiLoopChildWatcher', 'ThreadedChildWatcher', - 'DefaultEventLoopPolicy', 'EventLoop', ) @@ -65,6 +61,10 @@ def __init__(self, selector=None): super().__init__(selector) self._signal_handlers = {} self._unix_server_sockets = {} + if can_use_pidfd(): + self._watcher = _PidfdChildWatcher() + else: + self._watcher = _ThreadedChildWatcher() def close(self): super().close() @@ -94,7 +94,7 @@ def add_signal_handler(self, sig, callback, *args): Raise RuntimeError if there is a problem setting up the handler. """ if (coroutines.iscoroutine(callback) or - coroutines.iscoroutinefunction(callback)): + coroutines._iscoroutinefunction(callback)): raise TypeError("coroutines cannot be used " "with add_signal_handler()") self._check_signal(sig) @@ -197,33 +197,22 @@ def _make_write_pipe_transport(self, pipe, protocol, waiter=None, async def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - watcher = events.get_child_watcher() - - with watcher: - if not watcher.is_active(): - # Check early. - # Raising exception before process creation - # prevents subprocess execution if the watcher - # is not ready to handle it. - raise RuntimeError("asyncio.get_child_watcher() is not activated, " - "subprocess support is not installed.") - waiter = self.create_future() - transp = _UnixSubprocessTransport(self, protocol, args, shell, - stdin, stdout, stderr, bufsize, - waiter=waiter, extra=extra, - **kwargs) - watcher.add_child_handler(transp.get_pid(), - self._child_watcher_callback, transp) - try: - await waiter - except (SystemExit, KeyboardInterrupt): - raise - except BaseException: - transp.close() - await transp._wait() - raise + watcher = self._watcher + waiter = self.create_future() + transp = _UnixSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + watcher.add_child_handler(transp.get_pid(), + self._child_watcher_callback, transp) + try: + await waiter + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + transp.close() + await transp._wait() + raise return transp @@ -865,93 +854,7 @@ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): stdin_w.close() -class AbstractChildWatcher: - """Abstract base class for monitoring child processes. - - Objects derived from this class monitor a collection of subprocesses and - report their termination or interruption by a signal. - - New callbacks are registered with .add_child_handler(). Starting a new - process must be done within a 'with' block to allow the watcher to suspend - its activity until the new process if fully registered (this is needed to - prevent a race condition in some implementations). - - Example: - with watcher: - proc = subprocess.Popen("sleep 1") - watcher.add_child_handler(proc.pid, callback) - - Notes: - Implementations of this class must be thread-safe. - - Since child watcher objects may catch the SIGCHLD signal and call - waitpid(-1), there should be only one active object per process. - """ - - def __init_subclass__(cls) -> None: - if cls.__module__ != __name__: - warnings._deprecated("AbstractChildWatcher", - "{name!r} is deprecated as of Python 3.12 and will be " - "removed in Python {remove}.", - remove=(3, 14)) - - def add_child_handler(self, pid, callback, *args): - """Register a new child handler. - - Arrange for callback(pid, returncode, *args) to be called when - process 'pid' terminates. Specifying another callback for the same - process replaces the previous handler. - - Note: callback() must be thread-safe. - """ - raise NotImplementedError() - - def remove_child_handler(self, pid): - """Removes the handler for process 'pid'. - - The function returns True if the handler was successfully removed, - False if there was nothing to remove.""" - - raise NotImplementedError() - - def attach_loop(self, loop): - """Attach the watcher to an event loop. - - If the watcher was previously attached to an event loop, then it is - first detached before attaching to the new loop. - - Note: loop may be None. - """ - raise NotImplementedError() - - def close(self): - """Close the watcher. - - This must be called to make sure that any underlying resource is freed. - """ - raise NotImplementedError() - - def is_active(self): - """Return ``True`` if the watcher is active and is used by the event loop. - - Return True if the watcher is installed and ready to handle process exit - notifications. - - """ - raise NotImplementedError() - - def __enter__(self): - """Enter the watcher's context and allow starting new processes - - This function must return self""" - raise NotImplementedError() - - def __exit__(self, a, b, c): - """Exit the watcher's context""" - raise NotImplementedError() - - -class PidfdChildWatcher(AbstractChildWatcher): +class _PidfdChildWatcher: """Child watcher implementation using Linux's pid file descriptors. This child watcher polls process file descriptors (pidfds) to await child @@ -963,21 +866,6 @@ class PidfdChildWatcher(AbstractChildWatcher): recent (5.3+) kernels. """ - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - pass - - def is_active(self): - return True - - def close(self): - pass - - def attach_loop(self, loop): - pass - def add_child_handler(self, pid, callback, *args): loop = events.get_running_loop() pidfd = os.pidfd_open(pid) @@ -1002,386 +890,7 @@ def _do_wait(self, pid, pidfd, callback, args): os.close(pidfd) callback(pid, returncode, *args) - def remove_child_handler(self, pid): - # asyncio never calls remove_child_handler() !!! - # The method is no-op but is implemented because - # abstract base classes require it. - return True - - -class BaseChildWatcher(AbstractChildWatcher): - - def __init__(self): - self._loop = None - self._callbacks = {} - - def close(self): - self.attach_loop(None) - - def is_active(self): - return self._loop is not None and self._loop.is_running() - - def _do_waitpid(self, expected_pid): - raise NotImplementedError() - - def _do_waitpid_all(self): - raise NotImplementedError() - - def attach_loop(self, loop): - assert loop is None or isinstance(loop, events.AbstractEventLoop) - - if self._loop is not None and loop is None and self._callbacks: - warnings.warn( - 'A loop is being detached ' - 'from a child watcher with pending handlers', - RuntimeWarning) - - if self._loop is not None: - self._loop.remove_signal_handler(signal.SIGCHLD) - - self._loop = loop - if loop is not None: - loop.add_signal_handler(signal.SIGCHLD, self._sig_chld) - - # Prevent a race condition in case a child terminated - # during the switch. - self._do_waitpid_all() - - def _sig_chld(self): - try: - self._do_waitpid_all() - except (SystemExit, KeyboardInterrupt): - raise - except BaseException as exc: - # self._loop should always be available here - # as '_sig_chld' is added as a signal handler - # in 'attach_loop' - self._loop.call_exception_handler({ - 'message': 'Unknown exception in SIGCHLD handler', - 'exception': exc, - }) - - -class SafeChildWatcher(BaseChildWatcher): - """'Safe' child watcher implementation. - - This implementation avoids disrupting other code spawning processes by - polling explicitly each process in the SIGCHLD handler instead of calling - os.waitpid(-1). - - This is a safe solution but it has a significant overhead when handling a - big number of children (O(n) each time SIGCHLD is raised) - """ - - def __init__(self): - super().__init__() - warnings._deprecated("SafeChildWatcher", - "{name!r} is deprecated as of Python 3.12 and will be " - "removed in Python {remove}.", - remove=(3, 14)) - - def close(self): - self._callbacks.clear() - super().close() - - def __enter__(self): - return self - - def __exit__(self, a, b, c): - pass - - def add_child_handler(self, pid, callback, *args): - self._callbacks[pid] = (callback, args) - - # Prevent a race condition in case the child is already terminated. - self._do_waitpid(pid) - - def remove_child_handler(self, pid): - try: - del self._callbacks[pid] - return True - except KeyError: - return False - - def _do_waitpid_all(self): - - for pid in list(self._callbacks): - self._do_waitpid(pid) - - def _do_waitpid(self, expected_pid): - assert expected_pid > 0 - - try: - pid, status = os.waitpid(expected_pid, os.WNOHANG) - except ChildProcessError: - # The child process is already reaped - # (may happen if waitpid() is called elsewhere). - pid = expected_pid - returncode = 255 - logger.warning( - "Unknown child process pid %d, will report returncode 255", - pid) - else: - if pid == 0: - # The child process is still alive. - return - - returncode = waitstatus_to_exitcode(status) - if self._loop.get_debug(): - logger.debug('process %s exited with returncode %s', - expected_pid, returncode) - - try: - callback, args = self._callbacks.pop(pid) - except KeyError: # pragma: no cover - # May happen if .remove_child_handler() is called - # after os.waitpid() returns. - if self._loop.get_debug(): - logger.warning("Child watcher got an unexpected pid: %r", - pid, exc_info=True) - else: - callback(pid, returncode, *args) - - -class FastChildWatcher(BaseChildWatcher): - """'Fast' child watcher implementation. - - This implementation reaps every terminated processes by calling - os.waitpid(-1) directly, possibly breaking other code spawning processes - and waiting for their termination. - - There is no noticeable overhead when handling a big number of children - (O(1) each time a child terminates). - """ - def __init__(self): - super().__init__() - self._lock = threading.Lock() - self._zombies = {} - self._forks = 0 - warnings._deprecated("FastChildWatcher", - "{name!r} is deprecated as of Python 3.12 and will be " - "removed in Python {remove}.", - remove=(3, 14)) - - def close(self): - self._callbacks.clear() - self._zombies.clear() - super().close() - - def __enter__(self): - with self._lock: - self._forks += 1 - - return self - - def __exit__(self, a, b, c): - with self._lock: - self._forks -= 1 - - if self._forks or not self._zombies: - return - - collateral_victims = str(self._zombies) - self._zombies.clear() - - logger.warning( - "Caught subprocesses termination from unknown pids: %s", - collateral_victims) - - def add_child_handler(self, pid, callback, *args): - assert self._forks, "Must use the context manager" - - with self._lock: - try: - returncode = self._zombies.pop(pid) - except KeyError: - # The child is running. - self._callbacks[pid] = callback, args - return - - # The child is dead already. We can fire the callback. - callback(pid, returncode, *args) - - def remove_child_handler(self, pid): - try: - del self._callbacks[pid] - return True - except KeyError: - return False - - def _do_waitpid_all(self): - # Because of signal coalescing, we must keep calling waitpid() as - # long as we're able to reap a child. - while True: - try: - pid, status = os.waitpid(-1, os.WNOHANG) - except ChildProcessError: - # No more child processes exist. - return - else: - if pid == 0: - # A child process is still alive. - return - - returncode = waitstatus_to_exitcode(status) - - with self._lock: - try: - callback, args = self._callbacks.pop(pid) - except KeyError: - # unknown child - if self._forks: - # It may not be registered yet. - self._zombies[pid] = returncode - if self._loop.get_debug(): - logger.debug('unknown process %s exited ' - 'with returncode %s', - pid, returncode) - continue - callback = None - else: - if self._loop.get_debug(): - logger.debug('process %s exited with returncode %s', - pid, returncode) - - if callback is None: - logger.warning( - "Caught subprocess termination from unknown pid: " - "%d -> %d", pid, returncode) - else: - callback(pid, returncode, *args) - - -class MultiLoopChildWatcher(AbstractChildWatcher): - """A watcher that doesn't require running loop in the main thread. - - This implementation registers a SIGCHLD signal handler on - instantiation (which may conflict with other code that - install own handler for this signal). - - The solution is safe but it has a significant overhead when - handling a big number of processes (*O(n)* each time a - SIGCHLD is received). - """ - - # Implementation note: - # The class keeps compatibility with AbstractChildWatcher ABC - # To achieve this it has empty attach_loop() method - # and doesn't accept explicit loop argument - # for add_child_handler()/remove_child_handler() - # but retrieves the current loop by get_running_loop() - - def __init__(self): - self._callbacks = {} - self._saved_sighandler = None - warnings._deprecated("MultiLoopChildWatcher", - "{name!r} is deprecated as of Python 3.12 and will be " - "removed in Python {remove}.", - remove=(3, 14)) - - def is_active(self): - return self._saved_sighandler is not None - - def close(self): - self._callbacks.clear() - if self._saved_sighandler is None: - return - - handler = signal.getsignal(signal.SIGCHLD) - if handler != self._sig_chld: - logger.warning("SIGCHLD handler was changed by outside code") - else: - signal.signal(signal.SIGCHLD, self._saved_sighandler) - self._saved_sighandler = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - def add_child_handler(self, pid, callback, *args): - loop = events.get_running_loop() - self._callbacks[pid] = (loop, callback, args) - - # Prevent a race condition in case the child is already terminated. - self._do_waitpid(pid) - - def remove_child_handler(self, pid): - try: - del self._callbacks[pid] - return True - except KeyError: - return False - - def attach_loop(self, loop): - # Don't save the loop but initialize itself if called first time - # The reason to do it here is that attach_loop() is called from - # unix policy only for the main thread. - # Main thread is required for subscription on SIGCHLD signal - if self._saved_sighandler is not None: - return - - self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld) - if self._saved_sighandler is None: - logger.warning("Previous SIGCHLD handler was set by non-Python code, " - "restore to default handler on watcher close.") - self._saved_sighandler = signal.SIG_DFL - - # Set SA_RESTART to limit EINTR occurrences. - signal.siginterrupt(signal.SIGCHLD, False) - - def _do_waitpid_all(self): - for pid in list(self._callbacks): - self._do_waitpid(pid) - - def _do_waitpid(self, expected_pid): - assert expected_pid > 0 - - try: - pid, status = os.waitpid(expected_pid, os.WNOHANG) - except ChildProcessError: - # The child process is already reaped - # (may happen if waitpid() is called elsewhere). - pid = expected_pid - returncode = 255 - logger.warning( - "Unknown child process pid %d, will report returncode 255", - pid) - debug_log = False - else: - if pid == 0: - # The child process is still alive. - return - - returncode = waitstatus_to_exitcode(status) - debug_log = True - try: - loop, callback, args = self._callbacks.pop(pid) - except KeyError: # pragma: no cover - # May happen if .remove_child_handler() is called - # after os.waitpid() returns. - logger.warning("Child watcher got an unexpected pid: %r", - pid, exc_info=True) - else: - if loop.is_closed(): - logger.warning("Loop %r that handles pid %r is closed", loop, pid) - else: - if debug_log and loop.get_debug(): - logger.debug('process %s exited with returncode %s', - expected_pid, returncode) - loop.call_soon_threadsafe(callback, pid, returncode, *args) - - def _sig_chld(self, signum, frame): - try: - self._do_waitpid_all() - except (SystemExit, KeyboardInterrupt): - raise - except BaseException: - logger.warning('Unknown exception in SIGCHLD handler', exc_info=True) - - -class ThreadedChildWatcher(AbstractChildWatcher): +class _ThreadedChildWatcher: """Threaded child watcher implementation. The watcher uses a thread per process @@ -1398,18 +907,6 @@ def __init__(self): self._pid_counter = itertools.count(0) self._threads = {} - def is_active(self): - return True - - def close(self): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - def __del__(self, _warn=warnings.warn): threads = [thread for thread in list(self._threads.values()) if thread.is_alive()] @@ -1427,15 +924,6 @@ def add_child_handler(self, pid, callback, *args): self._threads[pid] = thread thread.start() - def remove_child_handler(self, pid): - # asyncio never calls remove_child_handler() !!! - # The method is no-op but is implemented because - # abstract base classes require it. - return True - - def attach_loop(self, loop): - pass - def _do_waitpid(self, loop, expected_pid, callback, args): assert expected_pid > 0 @@ -1474,63 +962,11 @@ def can_use_pidfd(): return True -class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): - """UNIX event loop policy with a watcher for child processes.""" +class _UnixDefaultEventLoopPolicy(events._BaseDefaultEventLoopPolicy): + """UNIX event loop policy""" _loop_factory = _UnixSelectorEventLoop - def __init__(self): - super().__init__() - self._watcher = None - - def _init_watcher(self): - with events._lock: - if self._watcher is None: # pragma: no branch - if can_use_pidfd(): - self._watcher = PidfdChildWatcher() - else: - self._watcher = ThreadedChildWatcher() - - def set_event_loop(self, loop): - """Set the event loop. - - As a side effect, if a child watcher was set before, then calling - .set_event_loop() from the main thread will call .attach_loop(loop) on - the child watcher. - """ - - super().set_event_loop(loop) - - if (self._watcher is not None and - threading.current_thread() is threading.main_thread()): - self._watcher.attach_loop(loop) - - def get_child_watcher(self): - """Get the watcher for child processes. - - If not yet set, a ThreadedChildWatcher object is automatically created. - """ - if self._watcher is None: - self._init_watcher() - - warnings._deprecated("get_child_watcher", - "{name!r} is deprecated as of Python 3.12 and will be " - "removed in Python {remove}.", remove=(3, 14)) - return self._watcher - - def set_child_watcher(self, watcher): - """Set the watcher for child processes.""" - - assert watcher is None or isinstance(watcher, AbstractChildWatcher) - - if self._watcher is not None: - self._watcher.close() - - self._watcher = watcher - warnings._deprecated("set_child_watcher", - "{name!r} is deprecated as of Python 3.12 and will be " - "removed in Python {remove}.", remove=(3, 14)) - SelectorEventLoop = _UnixSelectorEventLoop -DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy +_DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy EventLoop = SelectorEventLoop diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py index bf99bc271c7..5f75b17d8ca 100644 --- a/Lib/asyncio/windows_events.py +++ b/Lib/asyncio/windows_events.py @@ -29,8 +29,8 @@ __all__ = ( 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', - 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy', - 'WindowsProactorEventLoopPolicy', 'EventLoop', + '_DefaultEventLoopPolicy', '_WindowsSelectorEventLoopPolicy', + '_WindowsProactorEventLoopPolicy', 'EventLoop', ) @@ -891,13 +891,13 @@ def callback(f): SelectorEventLoop = _WindowsSelectorEventLoop -class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): +class _WindowsSelectorEventLoopPolicy(events._BaseDefaultEventLoopPolicy): _loop_factory = SelectorEventLoop -class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): +class _WindowsProactorEventLoopPolicy(events._BaseDefaultEventLoopPolicy): _loop_factory = ProactorEventLoop -DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy +_DefaultEventLoopPolicy = _WindowsProactorEventLoopPolicy EventLoop = ProactorEventLoop diff --git a/Lib/test/certdata/keycert3.pem.reference b/Lib/test/certdata/keycert3.pem.reference new file mode 100644 index 00000000000..84d2ca29953 --- /dev/null +++ b/Lib/test/certdata/keycert3.pem.reference @@ -0,0 +1,15 @@ +{'OCSP': ('http://testca.pythontest.net/testca/ocsp/',), + 'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',), + 'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',), + 'issuer': ((('countryName', 'XY'),), + (('organizationName', 'Python Software Foundation CA'),), + (('commonName', 'our-ca-server'),)), + 'notAfter': 'Oct 28 14:23:16 2525 GMT', + 'notBefore': 'Aug 29 14:23:16 2018 GMT', + 'serialNumber': 'CB2D80995A69525C', + 'subject': ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)), + 'subjectAltName': (('DNS', 'localhost'),), + 'version': 3} diff --git a/Lib/test/test_asyncgen.py b/Lib/test/test_asyncgen.py index 45d220e3e02..d26b2a703e2 100644 --- a/Lib/test/test_asyncgen.py +++ b/Lib/test/test_asyncgen.py @@ -1026,6 +1026,7 @@ async def run(): fut.cancel() self.loop.run_until_complete(asyncio.sleep(0.01)) + @unittest.expectedFailure # TODO: RUSTPYTHON; gc_collect doesn't finalize async generators def test_async_gen_asyncio_gc_aclose_09(self): DONE = 0 @@ -1512,6 +1513,7 @@ async def main(): self.assertIn('an error occurred during closing of asynchronous generator', message['message']) + @unittest.expectedFailure # TODO: RUSTPYTHON; gc_collect doesn't finalize async generators, different cleanup path def test_async_gen_asyncio_shutdown_exception_02(self): messages = [] diff --git a/Lib/test/test_asyncio/functional.py b/Lib/test/test_asyncio/functional.py index d19c7a612cc..96dc9ab4401 100644 --- a/Lib/test/test_asyncio/functional.py +++ b/Lib/test/test_asyncio/functional.py @@ -217,16 +217,15 @@ def stop(self): pass finally: super().stop() - - def run(self): - try: - with self._sock: - self._sock.setblocking(False) - self._run() - finally: + self._sock.close() self._s1.close() self._s2.close() + + def run(self): + self._sock.setblocking(False) + self._run() + def _run(self): while self._active: if self._clients >= self._max_clients: diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py index 870be745098..92895bbb420 100644 --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -29,7 +29,7 @@ class CustomError(Exception): def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) def mock_socket_module(): @@ -1019,8 +1019,7 @@ async def iter_one(): asyncio.create_task(iter_one()) return status - # TODO: RUSTPYTHON - GC doesn't finalize async generators - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - GC doesn't finalize async generators def test_asyncgen_finalization_by_gc(self): # Async generators should be finalized when garbage collected. self.loop._process_events = mock.Mock() @@ -1036,8 +1035,7 @@ def test_asyncgen_finalization_by_gc(self): test_utils.run_briefly(self.loop) self.assertTrue(status['finalized']) - # TODO: RUSTPYTHON - GC doesn't finalize async generators - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - GC doesn't finalize async generators def test_asyncgen_finalization_by_gc_in_other_thread(self): # Python issue 34769: If garbage collector runs in another # thread, async generators will not finalize in debug @@ -1413,7 +1411,7 @@ def getaddrinfo_task(*args, **kwds): with self.assertRaises(OSError) as cm: self.loop.run_until_complete(coro) - self.assertTrue(str(cm.exception).startswith('Multiple exceptions: ')) + self.assertStartsWith(str(cm.exception), 'Multiple exceptions: ') self.assertTrue(m_socket.socket.return_value.close.called) coro = self.loop.create_connection( diff --git a/Lib/test/test_asyncio/test_buffered_proto.py b/Lib/test/test_asyncio/test_buffered_proto.py index f24e363ebfc..6d3edcc36f5 100644 --- a/Lib/test/test_asyncio/test_buffered_proto.py +++ b/Lib/test/test_asyncio/test_buffered_proto.py @@ -5,7 +5,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class ReceiveStuffProto(asyncio.BufferedProtocol): diff --git a/Lib/test/test_asyncio/test_context.py b/Lib/test/test_asyncio/test_context.py index 6b80721873d..f85f39839cb 100644 --- a/Lib/test/test_asyncio/test_context.py +++ b/Lib/test/test_asyncio/test_context.py @@ -4,7 +4,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) @unittest.skipUnless(decimal.HAVE_CONTEXTVAR, "decimal is built with a thread-local context") diff --git a/Lib/test/test_asyncio/test_eager_task_factory.py b/Lib/test/test_asyncio/test_eager_task_factory.py index 179a6e44b59..0561b54a3f1 100644 --- a/Lib/test/test_asyncio/test_eager_task_factory.py +++ b/Lib/test/test_asyncio/test_eager_task_factory.py @@ -13,7 +13,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class EagerTaskFactoryLoopTests: @@ -263,19 +263,62 @@ async def run(): self.run_coro(run()) + def test_eager_start_false(self): + name = None + + async def asyncfn(): + nonlocal name + name = asyncio.current_task().get_name() + + async def main(): + t = asyncio.get_running_loop().create_task( + asyncfn(), eager_start=False, name="example" + ) + self.assertFalse(t.done()) + self.assertIsNone(name) + await t + self.assertEqual(name, "example") + + self.run_coro(main()) + class PyEagerTaskFactoryLoopTests(EagerTaskFactoryLoopTests, test_utils.TestCase): Task = tasks._PyTask + def setUp(self): + self._all_tasks = asyncio.all_tasks + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._py_current_task + asyncio.all_tasks = asyncio.tasks.all_tasks = asyncio.tasks._py_all_tasks + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + asyncio.all_tasks = asyncio.tasks.all_tasks = self._all_tasks + return super().tearDown() + + @unittest.skipUnless(hasattr(tasks, '_CTask'), 'requires the C _asyncio module') class CEagerTaskFactoryLoopTests(EagerTaskFactoryLoopTests, test_utils.TestCase): Task = getattr(tasks, '_CTask', None) + def setUp(self): + self._current_task = asyncio.current_task + self._all_tasks = asyncio.all_tasks + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._c_current_task + asyncio.all_tasks = asyncio.tasks.all_tasks = asyncio.tasks._c_all_tasks + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + asyncio.all_tasks = asyncio.tasks.all_tasks = self._all_tasks + return super().tearDown() + def test_issue105987(self): code = """if 1: - from _asyncio import _swap_current_task + from _asyncio import _swap_current_task, _set_running_loop class DummyTask: pass @@ -284,6 +327,7 @@ class DummyLoop: pass l = DummyLoop() + _set_running_loop(l) _swap_current_task(l, DummyTask()) t = _swap_current_task(l, None) """ @@ -400,31 +444,102 @@ class BaseEagerTaskFactoryTests(BaseTaskCountingTests): class NonEagerTests(BaseNonEagerTaskFactoryTests, test_utils.TestCase): - Task = asyncio.Task + Task = asyncio.tasks._CTask + def setUp(self): + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._c_current_task + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + return super().tearDown() class EagerTests(BaseEagerTaskFactoryTests, test_utils.TestCase): - Task = asyncio.Task + Task = asyncio.tasks._CTask + + def setUp(self): + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._c_current_task + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + return super().tearDown() class NonEagerPyTaskTests(BaseNonEagerTaskFactoryTests, test_utils.TestCase): Task = tasks._PyTask + def setUp(self): + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._py_current_task + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + return super().tearDown() + class EagerPyTaskTests(BaseEagerTaskFactoryTests, test_utils.TestCase): Task = tasks._PyTask + def setUp(self): + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._py_current_task + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + return super().tearDown() @unittest.skipUnless(hasattr(tasks, '_CTask'), 'requires the C _asyncio module') class NonEagerCTaskTests(BaseNonEagerTaskFactoryTests, test_utils.TestCase): Task = getattr(tasks, '_CTask', None) + def setUp(self): + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._c_current_task + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + return super().tearDown() + @unittest.skipUnless(hasattr(tasks, '_CTask'), 'requires the C _asyncio module') class EagerCTaskTests(BaseEagerTaskFactoryTests, test_utils.TestCase): Task = getattr(tasks, '_CTask', None) + def setUp(self): + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._c_current_task + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._current_task + return super().tearDown() + + +class DefaultTaskFactoryEagerStart(test_utils.TestCase): + def test_eager_start_true_with_default_factory(self): + name = None + + async def asyncfn(): + nonlocal name + name = asyncio.current_task().get_name() + + async def main(): + t = asyncio.get_running_loop().create_task( + asyncfn(), eager_start=True, name="example" + ) + self.assertTrue(t.done()) + self.assertEqual(name, "example") + await t + + asyncio.run(main(), loop_factory=asyncio.EventLoop) + if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py index 2cbb96da696..1a06b426f71 100644 --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -23,7 +23,6 @@ import unittest from unittest import mock import weakref -import warnings if sys.platform not in ('win32', 'vxworks'): import tty @@ -38,9 +37,8 @@ from test.support import threading_helper from test.support import ALWAYS_EQ, LARGEST, SMALLEST - def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) def broken_unix_getsockname(): @@ -275,8 +273,7 @@ def tearDown(self): support.gc_collect() super().tearDown() - # TODO: RUSTPYTHON - RuntimeWarning for unawaited coroutine not triggered - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - RuntimeWarning for unawaited coroutine not triggered def test_run_until_complete_nesting(self): async def coro1(): await asyncio.sleep(0) @@ -356,6 +353,124 @@ def run_in_thread(): t.join() self.assertEqual(results, ['hello', 'world']) + def test_call_soon_threadsafe_handle_block_check_cancelled(self): + results = [] + + callback_started = threading.Event() + callback_finished = threading.Event() + def callback(arg): + callback_started.set() + results.append(arg) + time.sleep(1) + callback_finished.set() + + def run_in_thread(): + handle = self.loop.call_soon_threadsafe(callback, 'hello') + self.assertIsInstance(handle, events._ThreadSafeHandle) + callback_started.wait() + # callback started so it should block checking for cancellation + # until it finishes + self.assertFalse(handle.cancelled()) + self.assertTrue(callback_finished.is_set()) + self.loop.call_soon_threadsafe(self.loop.stop) + + t = threading.Thread(target=run_in_thread) + t.start() + + self.loop.run_forever() + t.join() + self.assertEqual(results, ['hello']) + + def test_call_soon_threadsafe_handle_block_cancellation(self): + results = [] + + callback_started = threading.Event() + callback_finished = threading.Event() + def callback(arg): + callback_started.set() + results.append(arg) + time.sleep(1) + callback_finished.set() + + def run_in_thread(): + handle = self.loop.call_soon_threadsafe(callback, 'hello') + self.assertIsInstance(handle, events._ThreadSafeHandle) + callback_started.wait() + # callback started so it cannot be cancelled from other thread until + # it finishes + handle.cancel() + self.assertTrue(callback_finished.is_set()) + self.loop.call_soon_threadsafe(self.loop.stop) + + t = threading.Thread(target=run_in_thread) + t.start() + + self.loop.run_forever() + t.join() + self.assertEqual(results, ['hello']) + + def test_call_soon_threadsafe_handle_cancel_same_thread(self): + results = [] + callback_started = threading.Event() + callback_finished = threading.Event() + + fut = concurrent.futures.Future() + def callback(arg): + callback_started.set() + handle = fut.result() + handle.cancel() + results.append(arg) + callback_finished.set() + self.loop.stop() + + def run_in_thread(): + handle = self.loop.call_soon_threadsafe(callback, 'hello') + fut.set_result(handle) + self.assertIsInstance(handle, events._ThreadSafeHandle) + callback_started.wait() + # callback cancels itself from same thread so it has no effect + # it runs to completion + self.assertTrue(handle.cancelled()) + self.assertTrue(callback_finished.is_set()) + self.loop.call_soon_threadsafe(self.loop.stop) + + t = threading.Thread(target=run_in_thread) + t.start() + + self.loop.run_forever() + t.join() + self.assertEqual(results, ['hello']) + + def test_call_soon_threadsafe_handle_cancel_other_thread(self): + results = [] + ev = threading.Event() + + callback_finished = threading.Event() + def callback(arg): + results.append(arg) + callback_finished.set() + self.loop.stop() + + def run_in_thread(): + handle = self.loop.call_soon_threadsafe(callback, 'hello') + # handle can be cancelled from other thread if not started yet + self.assertIsInstance(handle, events._ThreadSafeHandle) + handle.cancel() + self.assertTrue(handle.cancelled()) + self.assertFalse(callback_finished.is_set()) + ev.set() + self.loop.call_soon_threadsafe(self.loop.stop) + + # block the main loop until the callback is added and cancelled in the + # other thread + self.loop.call_soon(ev.wait) + t = threading.Thread(target=run_in_thread) + t.start() + self.loop.run_forever() + t.join() + self.assertEqual(results, []) + self.assertFalse(callback_finished.is_set()) + def test_call_soon_threadsafe_same_thread(self): results = [] @@ -445,8 +560,7 @@ def writer(data): r.close() self.assertEqual(read, data) - # TODO: RUSTPYTHON - signal handler implementation differs - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - signal handler implementation differs @unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL') def test_add_signal_handler(self): caught = 0 @@ -1174,8 +1288,7 @@ def test_create_unix_server_ssl_verified(self): server.close() self.loop.run_until_complete(proto.done) - # TODO: RUSTPYTHON - SSL peer certificate format differs - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - SSL peer certificate format differs @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_verified(self): proto = MyProto(loop=self.loop) @@ -2073,7 +2186,7 @@ def test_subprocess_stderr(self): transp.close() self.assertEqual(b'OUT:test', proto.data[1]) - self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2]) + self.assertStartsWith(proto.data[2], b'ERR:test') self.assertEqual(0, proto.returncode) @support.requires_subprocess() @@ -2095,8 +2208,7 @@ def test_subprocess_stderr_redirect_to_stdout(self): stdin.write(b'test') self.loop.run_until_complete(proto.completed) - self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'), - proto.data[1]) + self.assertStartsWith(proto.data[1], b'OUT:testERR:test') self.assertEqual(b'', proto.data[2]) transp.close() @@ -2216,24 +2328,8 @@ def test_remove_fds_after_closing(self): else: import selectors - class UnixEventLoopTestsMixin(EventLoopTestsMixin): - def setUp(self): - super().setUp() - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - watcher = asyncio.SafeChildWatcher() - watcher.attach_loop(self.loop) - asyncio.set_child_watcher(watcher) - - def tearDown(self): - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - asyncio.set_child_watcher(None) - super().tearDown() - - if hasattr(selectors, 'KqueueSelector'): - class KqueueEventLoopTests(UnixEventLoopTestsMixin, + class KqueueEventLoopTests(EventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): @@ -2258,7 +2354,7 @@ def test_write_pty(self): super().test_write_pty() if hasattr(selectors, 'EpollSelector'): - class EPollEventLoopTests(UnixEventLoopTestsMixin, + class EPollEventLoopTests(EventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): @@ -2266,7 +2362,7 @@ def create_event_loop(self): return asyncio.SelectorEventLoop(selectors.EpollSelector()) if hasattr(selectors, 'PollSelector'): - class PollEventLoopTests(UnixEventLoopTestsMixin, + class PollEventLoopTests(EventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): @@ -2274,7 +2370,7 @@ def create_event_loop(self): return asyncio.SelectorEventLoop(selectors.PollSelector()) # Should always exist. - class SelectEventLoopTests(UnixEventLoopTestsMixin, + class SelectEventLoopTests(EventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): @@ -2420,7 +2516,7 @@ def test_handle_repr_debug(self): self.assertRegex(repr(h), regex) def test_handle_source_traceback(self): - loop = asyncio.get_event_loop_policy().new_event_loop() + loop = asyncio.new_event_loop() loop.set_debug(True) self.set_event_loop(loop) @@ -2559,33 +2655,46 @@ def callback(*args): h1 = asyncio.TimerHandle(when, callback, (), self.loop) h2 = asyncio.TimerHandle(when, callback, (), self.loop) - # TODO: Use assertLess etc. - self.assertFalse(h1 < h2) - self.assertFalse(h2 < h1) - self.assertTrue(h1 <= h2) - self.assertTrue(h2 <= h1) - self.assertFalse(h1 > h2) - self.assertFalse(h2 > h1) - self.assertTrue(h1 >= h2) - self.assertTrue(h2 >= h1) - self.assertTrue(h1 == h2) - self.assertFalse(h1 != h2) + with self.assertRaises(AssertionError): + self.assertLess(h1, h2) + with self.assertRaises(AssertionError): + self.assertLess(h2, h1) + with self.assertRaises(AssertionError): + self.assertGreater(h1, h2) + with self.assertRaises(AssertionError): + self.assertGreater(h2, h1) + with self.assertRaises(AssertionError): + self.assertNotEqual(h1, h2) + + self.assertLessEqual(h1, h2) + self.assertLessEqual(h2, h1) + self.assertGreaterEqual(h1, h2) + self.assertGreaterEqual(h2, h1) + self.assertEqual(h1, h2) h2.cancel() - self.assertFalse(h1 == h2) + with self.assertRaises(AssertionError): + self.assertEqual(h1, h2) + self.assertNotEqual(h1, h2) h1 = asyncio.TimerHandle(when, callback, (), self.loop) h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop) - self.assertTrue(h1 < h2) - self.assertFalse(h2 < h1) - self.assertTrue(h1 <= h2) - self.assertFalse(h2 <= h1) - self.assertFalse(h1 > h2) - self.assertTrue(h2 > h1) - self.assertFalse(h1 >= h2) - self.assertTrue(h2 >= h1) - self.assertFalse(h1 == h2) - self.assertTrue(h1 != h2) + with self.assertRaises(AssertionError): + self.assertLess(h2, h1) + with self.assertRaises(AssertionError): + self.assertLessEqual(h2, h1) + with self.assertRaises(AssertionError): + self.assertGreater(h1, h2) + with self.assertRaises(AssertionError): + self.assertGreaterEqual(h1, h2) + with self.assertRaises(AssertionError): + self.assertEqual(h1, h2) + + self.assertLess(h1, h2) + self.assertGreater(h2, h1) + self.assertLessEqual(h1, h2) + self.assertGreaterEqual(h2, h1) + self.assertNotEqual(h1, h2) h3 = asyncio.Handle(callback, (), self.loop) self.assertIs(NotImplemented, h1.__eq__(h3)) @@ -2599,19 +2708,25 @@ def callback(*args): h1 <= () with self.assertRaises(TypeError): h1 >= () - self.assertFalse(h1 == ()) - self.assertTrue(h1 != ()) - - self.assertTrue(h1 == ALWAYS_EQ) - self.assertFalse(h1 != ALWAYS_EQ) - self.assertTrue(h1 < LARGEST) - self.assertFalse(h1 > LARGEST) - self.assertTrue(h1 <= LARGEST) - self.assertFalse(h1 >= LARGEST) - self.assertFalse(h1 < SMALLEST) - self.assertTrue(h1 > SMALLEST) - self.assertFalse(h1 <= SMALLEST) - self.assertTrue(h1 >= SMALLEST) + with self.assertRaises(AssertionError): + self.assertEqual(h1, ()) + with self.assertRaises(AssertionError): + self.assertNotEqual(h1, ALWAYS_EQ) + with self.assertRaises(AssertionError): + self.assertGreater(h1, LARGEST) + with self.assertRaises(AssertionError): + self.assertGreaterEqual(h1, LARGEST) + with self.assertRaises(AssertionError): + self.assertLess(h1, SMALLEST) + with self.assertRaises(AssertionError): + self.assertLessEqual(h1, SMALLEST) + + self.assertNotEqual(h1, ()) + self.assertEqual(h1, ALWAYS_EQ) + self.assertLess(h1, LARGEST) + self.assertLessEqual(h1, LARGEST) + self.assertGreaterEqual(h1, SMALLEST) + self.assertGreater(h1, SMALLEST) class AbstractEventLoopTests(unittest.TestCase): @@ -2718,58 +2833,55 @@ async def inner(): class PolicyTests(unittest.TestCase): + def test_abstract_event_loop_policy_deprecation(self): + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.AbstractEventLoopPolicy' is deprecated"): + policy = asyncio.AbstractEventLoopPolicy() + self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy) + + def test_default_event_loop_policy_deprecation(self): + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.DefaultEventLoopPolicy' is deprecated"): + policy = asyncio.DefaultEventLoopPolicy() + self.assertIsInstance(policy, asyncio.DefaultEventLoopPolicy) + def test_event_loop_policy(self): - policy = asyncio.AbstractEventLoopPolicy() + policy = asyncio.events._AbstractEventLoopPolicy() self.assertRaises(NotImplementedError, policy.get_event_loop) self.assertRaises(NotImplementedError, policy.set_event_loop, object()) self.assertRaises(NotImplementedError, policy.new_event_loop) - self.assertRaises(NotImplementedError, policy.get_child_watcher) - self.assertRaises(NotImplementedError, policy.set_child_watcher, - object()) def test_get_event_loop(self): - policy = asyncio.DefaultEventLoopPolicy() + policy = test_utils.DefaultEventLoopPolicy() self.assertIsNone(policy._local._loop) - with self.assertWarns(DeprecationWarning) as cm: - loop = policy.get_event_loop() - self.assertEqual(cm.filename, __file__) - self.assertIsInstance(loop, asyncio.AbstractEventLoop) - self.assertIs(policy._local._loop, loop) - self.assertIs(loop, policy.get_event_loop()) - loop.close() + with self.assertRaises(RuntimeError): + loop = policy.get_event_loop() + self.assertIsNone(policy._local._loop) - def test_get_event_loop_calls_set_event_loop(self): - policy = asyncio.DefaultEventLoopPolicy() + def test_get_event_loop_does_not_call_set_event_loop(self): + policy = test_utils.DefaultEventLoopPolicy() with mock.patch.object( policy, "set_event_loop", wraps=policy.set_event_loop) as m_set_event_loop: - with self.assertWarns(DeprecationWarning) as cm: + with self.assertRaises(RuntimeError): loop = policy.get_event_loop() - self.addCleanup(loop.close) - self.assertEqual(cm.filename, __file__) - # policy._local._loop must be set through .set_event_loop() - # (the unix DefaultEventLoopPolicy needs this call to attach - # the child watcher correctly) - m_set_event_loop.assert_called_with(loop) - - loop.close() + m_set_event_loop.assert_not_called() def test_get_event_loop_after_set_none(self): - policy = asyncio.DefaultEventLoopPolicy() + policy = test_utils.DefaultEventLoopPolicy() policy.set_event_loop(None) self.assertRaises(RuntimeError, policy.get_event_loop) - # TODO: RUSTPYTHON - mock.patch doesn't work correctly with threading.current_thread - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - mock.patch doesn't work correctly with threading.current_thread @mock.patch('asyncio.events.threading.current_thread') def test_get_event_loop_thread(self, m_current_thread): def f(): - policy = asyncio.DefaultEventLoopPolicy() + policy = test_utils.DefaultEventLoopPolicy() self.assertRaises(RuntimeError, policy.get_event_loop) th = threading.Thread(target=f) @@ -2777,14 +2889,14 @@ def f(): th.join() def test_new_event_loop(self): - policy = asyncio.DefaultEventLoopPolicy() + policy = test_utils.DefaultEventLoopPolicy() loop = policy.new_event_loop() self.assertIsInstance(loop, asyncio.AbstractEventLoop) loop.close() def test_set_event_loop(self): - policy = asyncio.DefaultEventLoopPolicy() + policy = test_utils.DefaultEventLoopPolicy() old_loop = policy.new_event_loop() policy.set_event_loop(old_loop) @@ -2798,20 +2910,31 @@ def test_set_event_loop(self): old_loop.close() def test_get_event_loop_policy(self): - policy = asyncio.get_event_loop_policy() - self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy) - self.assertIs(policy, asyncio.get_event_loop_policy()) + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.get_event_loop_policy' is deprecated"): + policy = asyncio.get_event_loop_policy() + self.assertIsInstance(policy, asyncio.events._AbstractEventLoopPolicy) + self.assertIs(policy, asyncio.get_event_loop_policy()) def test_set_event_loop_policy(self): - self.assertRaises( - TypeError, asyncio.set_event_loop_policy, object()) + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.set_event_loop_policy' is deprecated"): + self.assertRaises( + TypeError, asyncio.set_event_loop_policy, object()) + + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.get_event_loop_policy' is deprecated"): + old_policy = asyncio.get_event_loop_policy() - old_policy = asyncio.get_event_loop_policy() + policy = test_utils.DefaultEventLoopPolicy() + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.set_event_loop_policy' is deprecated"): + asyncio.set_event_loop_policy(policy) - policy = asyncio.DefaultEventLoopPolicy() - asyncio.set_event_loop_policy(policy) - self.assertIs(policy, asyncio.get_event_loop_policy()) - self.assertIsNot(policy, old_policy) + with self.assertWarnsRegex( + DeprecationWarning, "'asyncio.get_event_loop_policy' is deprecated"): + self.assertIs(policy, asyncio.get_event_loop_policy()) + self.assertIsNot(policy, old_policy) class GetEventLoopTestsMixin: @@ -2821,11 +2944,16 @@ class GetEventLoopTestsMixin: get_running_loop_impl = None get_event_loop_impl = None + Task = None + Future = None + def setUp(self): self._get_running_loop_saved = events._get_running_loop self._set_running_loop_saved = events._set_running_loop self.get_running_loop_saved = events.get_running_loop self.get_event_loop_saved = events.get_event_loop + self._Task_saved = asyncio.Task + self._Future_saved = asyncio.Future events._get_running_loop = type(self)._get_running_loop_impl events._set_running_loop = type(self)._set_running_loop_impl @@ -2837,25 +2965,15 @@ def setUp(self): asyncio.get_running_loop = type(self).get_running_loop_impl asyncio.get_event_loop = type(self).get_event_loop_impl + asyncio.Task = asyncio.tasks.Task = type(self).Task + asyncio.Future = asyncio.futures.Future = type(self).Future super().setUp() self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) - if sys.platform != 'win32': - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - watcher = asyncio.SafeChildWatcher() - watcher.attach_loop(self.loop) - asyncio.set_child_watcher(watcher) - def tearDown(self): try: - if sys.platform != 'win32': - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - asyncio.set_child_watcher(None) - super().tearDown() finally: self.loop.close() @@ -2871,8 +2989,10 @@ def tearDown(self): asyncio.get_running_loop = self.get_running_loop_saved asyncio.get_event_loop = self.get_event_loop_saved - if sys.platform != 'win32': + asyncio.Task = asyncio.tasks.Task = self._Task_saved + asyncio.Future = asyncio.futures.Future = self._Future_saved + if sys.platform != 'win32': def test_get_event_loop_new_process(self): # bpo-32126: The multiprocessing module used by # ProcessPoolExecutor is not functional when the @@ -2918,13 +3038,13 @@ def test_get_event_loop_returns_running_loop(self): class TestError(Exception): pass - class Policy(asyncio.DefaultEventLoopPolicy): + class Policy(test_utils.DefaultEventLoopPolicy): def get_event_loop(self): raise TestError - old_policy = asyncio.get_event_loop_policy() + old_policy = asyncio.events._get_event_loop_policy() try: - asyncio.set_event_loop_policy(Policy()) + asyncio.events._set_event_loop_policy(Policy()) loop = asyncio.new_event_loop() with self.assertRaises(TestError): @@ -2952,7 +3072,7 @@ async def func(): asyncio.get_event_loop() finally: - asyncio.set_event_loop_policy(old_policy) + asyncio.events._set_event_loop_policy(old_policy) if loop is not None: loop.close() @@ -2962,23 +3082,18 @@ async def func(): self.assertIs(asyncio._get_running_loop(), None) def test_get_event_loop_returns_running_loop2(self): - old_policy = asyncio.get_event_loop_policy() + old_policy = asyncio.events._get_event_loop_policy() try: - asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy()) + asyncio.events._set_event_loop_policy(test_utils.DefaultEventLoopPolicy()) loop = asyncio.new_event_loop() self.addCleanup(loop.close) - with self.assertWarns(DeprecationWarning) as cm: - loop2 = asyncio.get_event_loop() - self.addCleanup(loop2.close) - self.assertEqual(cm.filename, __file__) - asyncio.set_event_loop(None) with self.assertRaisesRegex(RuntimeError, 'no current'): asyncio.get_event_loop() - with self.assertRaisesRegex(RuntimeError, 'no running'): - asyncio.get_running_loop() - self.assertIs(asyncio._get_running_loop(), None) + asyncio.set_event_loop(None) + with self.assertRaisesRegex(RuntimeError, 'no current'): + asyncio.get_event_loop() async def func(): self.assertIs(asyncio.get_event_loop(), loop) @@ -2995,7 +3110,7 @@ async def func(): asyncio.get_event_loop() finally: - asyncio.set_event_loop_policy(old_policy) + asyncio.events._set_event_loop_policy(old_policy) if loop is not None: loop.close() @@ -3012,6 +3127,8 @@ class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase): get_running_loop_impl = events._py_get_running_loop get_event_loop_impl = events._py_get_event_loop + Task = asyncio.tasks._PyTask + Future = asyncio.futures._PyFuture try: import _asyncio # NoQA @@ -3026,6 +3143,8 @@ class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase): get_running_loop_impl = events._c_get_running_loop get_event_loop_impl = events._c_get_event_loop + Task = asyncio.tasks._CTask + Future = asyncio.futures._CFuture class TestServer(unittest.TestCase): diff --git a/Lib/test/test_asyncio/test_free_threading.py b/Lib/test/test_asyncio/test_free_threading.py new file mode 100644 index 00000000000..86ba9e68b3b --- /dev/null +++ b/Lib/test/test_asyncio/test_free_threading.py @@ -0,0 +1,243 @@ +import asyncio +import threading +import unittest +from threading import Thread +from unittest import TestCase +import weakref +from test import support +from test.support import threading_helper + +threading_helper.requires_working_threading(module=True) + + +class MyException(Exception): + pass + + +def tearDownModule(): + asyncio.events._set_event_loop_policy(None) + + +class TestFreeThreading: + def test_all_tasks_race(self) -> None: + async def main(): + loop = asyncio.get_running_loop() + future = loop.create_future() + + async def coro(): + await future + + tasks = set() + + async with asyncio.TaskGroup() as tg: + for _ in range(100): + tasks.add(tg.create_task(coro())) + + all_tasks = asyncio.all_tasks(loop) + self.assertEqual(len(all_tasks), 101) + + for task in all_tasks: + self.assertEqual(task.get_loop(), loop) + self.assertFalse(task.done()) + + current = asyncio.current_task() + self.assertEqual(current.get_loop(), loop) + self.assertSetEqual(all_tasks, tasks | {current}) + future.set_result(None) + + def runner(): + with asyncio.Runner() as runner: + loop = runner.get_loop() + loop.set_task_factory(self.factory) + runner.run(main()) + + threads = [] + + for _ in range(10): + thread = Thread(target=runner) + threads.append(thread) + + with threading_helper.start_threads(threads): + pass + + def test_all_tasks_different_thread(self) -> None: + loop = None + started = threading.Event() + done = threading.Event() # used for main task not finishing early + async def coro(): + await asyncio.Future() + + lock = threading.Lock() + tasks = set() + + async def main(): + nonlocal tasks, loop + loop = asyncio.get_running_loop() + started.set() + for i in range(1000): + with lock: + asyncio.create_task(coro()) + tasks = asyncio.all_tasks(loop) + done.wait() + + runner = threading.Thread(target=lambda: asyncio.run(main())) + + def check(): + started.wait() + with lock: + self.assertSetEqual(tasks & asyncio.all_tasks(loop), tasks) + + threads = [threading.Thread(target=check) for _ in range(10)] + runner.start() + + with threading_helper.start_threads(threads): + pass + + done.set() + runner.join() + + def test_task_different_thread_finalized(self) -> None: + task = None + async def func(): + nonlocal task + task = asyncio.current_task() + def runner(): + with asyncio.Runner() as runner: + loop = runner.get_loop() + loop.set_task_factory(self.factory) + runner.run(func()) + thread = Thread(target=runner) + thread.start() + thread.join() + wr = weakref.ref(task) + del thread + del task + # task finalization in different thread shouldn't crash + support.gc_collect() + self.assertIsNone(wr()) + + def test_run_coroutine_threadsafe(self) -> None: + results = [] + + def in_thread(loop: asyncio.AbstractEventLoop): + coro = asyncio.sleep(0.1, result=42) + fut = asyncio.run_coroutine_threadsafe(coro, loop) + result = fut.result() + self.assertEqual(result, 42) + results.append(result) + + async def main(): + loop = asyncio.get_running_loop() + async with asyncio.TaskGroup() as tg: + for _ in range(10): + tg.create_task(asyncio.to_thread(in_thread, loop)) + self.assertEqual(results, [42] * 10) + + with asyncio.Runner() as r: + loop = r.get_loop() + loop.set_task_factory(self.factory) + r.run(main()) + + def test_run_coroutine_threadsafe_exception(self) -> None: + async def coro(): + await asyncio.sleep(0) + raise MyException("test") + + def in_thread(loop: asyncio.AbstractEventLoop): + fut = asyncio.run_coroutine_threadsafe(coro(), loop) + return fut.result() + + async def main(): + loop = asyncio.get_running_loop() + tasks = [] + for _ in range(10): + task = loop.create_task(asyncio.to_thread(in_thread, loop)) + tasks.append(task) + results = await asyncio.gather(*tasks, return_exceptions=True) + + self.assertEqual(len(results), 10) + for result in results: + self.assertIsInstance(result, MyException) + self.assertEqual(str(result), "test") + + with asyncio.Runner() as r: + loop = r.get_loop() + loop.set_task_factory(self.factory) + r.run(main()) + + +class TestPyFreeThreading(TestFreeThreading, TestCase): + + def setUp(self): + self._old_current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._py_current_task + self._old_all_tasks = asyncio.all_tasks + asyncio.all_tasks = asyncio.tasks.all_tasks = asyncio.tasks._py_all_tasks + self._old_Task = asyncio.Task + asyncio.Task = asyncio.tasks.Task = asyncio.tasks._PyTask + self._old_Future = asyncio.Future + asyncio.Future = asyncio.futures.Future = asyncio.futures._PyFuture + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._old_current_task + asyncio.all_tasks = asyncio.tasks.all_tasks = self._old_all_tasks + asyncio.Task = asyncio.tasks.Task = self._old_Task + asyncio.Future = asyncio.tasks.Future = self._old_Future + return super().tearDown() + + def factory(self, loop, coro, **kwargs): + return asyncio.tasks._PyTask(coro, loop=loop, **kwargs) + + @unittest.expectedFailure # TODO: RUSTPYTHON; AssertionError: .func() done, defined at /Users/al03219714/Projects/RustPython2/crates/pylib/Lib/test/test_asyncio/test_free_threading.py:101> result=None> is not None + def test_task_different_thread_finalized(self): + return super().test_task_different_thread_finalized() + + @unittest.skip("TODO: RUSTPYTHON; hangs - Python _current_tasks dict not thread-safe") + def test_all_tasks_race(self): + return super().test_all_tasks_race() + + +@unittest.skipUnless(hasattr(asyncio.tasks, "_c_all_tasks"), "requires _asyncio") +class TestCFreeThreading(TestFreeThreading, TestCase): + + def setUp(self): + self._old_current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = asyncio.tasks._c_current_task + self._old_all_tasks = asyncio.all_tasks + asyncio.all_tasks = asyncio.tasks.all_tasks = asyncio.tasks._c_all_tasks + self._old_Task = asyncio.Task + asyncio.Task = asyncio.tasks.Task = asyncio.tasks._CTask + self._old_Future = asyncio.Future + asyncio.Future = asyncio.futures.Future = asyncio.futures._CFuture + return super().setUp() + + def tearDown(self): + asyncio.current_task = asyncio.tasks.current_task = self._old_current_task + asyncio.all_tasks = asyncio.tasks.all_tasks = self._old_all_tasks + asyncio.Task = asyncio.tasks.Task = self._old_Task + asyncio.Future = asyncio.futures.Future = self._old_Future + return super().tearDown() + + + def factory(self, loop, coro, **kwargs): + return asyncio.tasks._CTask(coro, loop=loop, **kwargs) + + +class TestEagerPyFreeThreading(TestPyFreeThreading): + def factory(self, loop, coro, eager_start=True, **kwargs): + return asyncio.tasks._PyTask(coro, loop=loop, **kwargs, eager_start=eager_start) + + @unittest.expectedFailure # TODO: RUSTPYTHON; AssertionError: .func() done, defined at /Users/al03219714/Projects/RustPython2/crates/pylib/Lib/test/test_asyncio/test_free_threading.py:101> result=None> is not None + def test_task_different_thread_finalized(self): + return super().test_task_different_thread_finalized() + + @unittest.skip("TODO: RUSTPYTHON; hangs - Python _current_tasks dict not thread-safe") + def test_all_tasks_race(self): + return super().test_all_tasks_race() + + +@unittest.skipUnless(hasattr(asyncio.tasks, "_c_all_tasks"), "requires _asyncio") +class TestEagerCFreeThreading(TestCFreeThreading, TestCase): + def factory(self, loop, coro, eager_start=True, **kwargs): + return asyncio.tasks._CTask(coro, loop=loop, **kwargs, eager_start=eager_start) diff --git a/Lib/test/test_asyncio/test_futures.py b/Lib/test/test_asyncio/test_futures.py index 571fbace020..0acd7aead8c 100644 --- a/Lib/test/test_asyncio/test_futures.py +++ b/Lib/test/test_asyncio/test_futures.py @@ -17,7 +17,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) def _fakefunc(f): @@ -242,7 +242,7 @@ def test_uninitialized(self): def test_future_cancel_message_getter(self): f = self._new_future(loop=self.loop) - self.assertTrue(hasattr(f, '_cancel_message')) + self.assertHasAttr(f, '_cancel_message') self.assertEqual(f._cancel_message, None) f.cancel('my message') @@ -678,8 +678,7 @@ def __del__(self): fut = self._new_future(loop=self.loop) fut.set_result(Evil()) - # TODO: RUSTPYTHON - gc.get_referrers not implemented - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - gc.get_referrers not implemented def test_future_cancelled_result_refcycles(self): f = self._new_future(loop=self.loop) f.cancel() @@ -691,8 +690,7 @@ def test_future_cancelled_result_refcycles(self): self.assertIsNotNone(exc) self.assertListEqual(gc.get_referrers(exc), []) - # TODO: RUSTPYTHON - gc.get_referrers not implemented - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - gc.get_referrers not implemented def test_future_cancelled_exception_refcycles(self): f = self._new_future(loop=self.loop) f.cancel() @@ -720,16 +718,6 @@ def test_future_del_segfault(self): with self.assertRaises(AttributeError): del fut._log_traceback - # TODO: RUSTPYTHON - gc.get_referents not implemented - @unittest.expectedFailure - def test_future_iter_get_referents_segfault(self): - # See https://github.com/python/cpython/issues/122695 - import _asyncio - it = iter(self._new_future(loop=self.loop)) - del it - evil = gc.get_referents(_asyncio) - gc.collect() - def test_callbacks_copy(self): # See https://github.com/python/cpython/issues/125789 # In C implementation, the `_callbacks` attribute @@ -748,6 +736,10 @@ def test_callbacks_copy(self): fut.remove_done_callback(f2) self.assertIsNone(fut._callbacks) + @unittest.expectedFailure # TODO: RUSTPYTHON; - gc.get_referents not implemented + def test_future_iter_get_referents_segfault(self): + return super().test_future_iter_get_referents_segfault() + @unittest.skipUnless(hasattr(futures, '_CFuture'), 'requires the C _asyncio module') diff --git a/Lib/test/test_asyncio/test_futures2.py b/Lib/test/test_asyncio/test_futures2.py index b7cfffb76bd..c7c0ebdac1b 100644 --- a/Lib/test/test_asyncio/test_futures2.py +++ b/Lib/test/test_asyncio/test_futures2.py @@ -7,7 +7,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class FutureTests: diff --git a/Lib/test/test_asyncio/test_graph.py b/Lib/test/test_asyncio/test_graph.py new file mode 100644 index 00000000000..2f22fbccba4 --- /dev/null +++ b/Lib/test/test_asyncio/test_graph.py @@ -0,0 +1,445 @@ +import asyncio +import io +import unittest + + +# To prevent a warning "test altered the execution environment" +def tearDownModule(): + asyncio.events._set_event_loop_policy(None) + + +def capture_test_stack(*, fut=None, depth=1): + + def walk(s): + ret = [ + (f"T<{n}>" if '-' not in (n := s.future.get_name()) else 'T') + if isinstance(s.future, asyncio.Task) else 'F' + ] + + ret.append( + [ + ( + f"s {entry.frame.f_code.co_name}" + if entry.frame.f_generator is None else + ( + f"a {entry.frame.f_generator.cr_code.co_name}" + if hasattr(entry.frame.f_generator, 'cr_code') else + f"ag {entry.frame.f_generator.ag_code.co_name}" + ) + ) for entry in s.call_stack + ] + ) + + ret.append( + sorted([ + walk(ab) for ab in s.awaited_by + ], key=lambda entry: entry[0]) + ) + + return ret + + buf = io.StringIO() + asyncio.print_call_graph(fut, file=buf, depth=depth+1) + + stack = asyncio.capture_call_graph(fut, depth=depth) + return walk(stack), buf.getvalue() + + +class CallStackTestBase: + + async def test_stack_tgroup(self): + + stack_for_c5 = None + + def c5(): + nonlocal stack_for_c5 + stack_for_c5 = capture_test_stack(depth=2) + + async def c4(): + await asyncio.sleep(0) + c5() + + async def c3(): + await c4() + + async def c2(): + await c3() + + async def c1(task): + await task + + async def main(): + async with asyncio.TaskGroup() as tg: + task = tg.create_task(c2(), name="c2_root") + tg.create_task(c1(task), name="sub_main_1") + tg.create_task(c1(task), name="sub_main_2") + + await main() + + self.assertEqual(stack_for_c5[0], [ + # task name + 'T', + # call stack + ['s c5', 'a c4', 'a c3', 'a c2'], + # awaited by + [ + ['T', + ['a _aexit', 'a __aexit__', 'a main', 'a test_stack_tgroup'], [] + ], + ['T', + ['a c1'], + [ + ['T', + ['a _aexit', 'a __aexit__', 'a main', 'a test_stack_tgroup'], [] + ] + ] + ], + ['T', + ['a c1'], + [ + ['T', + ['a _aexit', 'a __aexit__', 'a main', 'a test_stack_tgroup'], [] + ] + ] + ] + ] + ]) + + self.assertIn( + ' async CallStackTestBase.test_stack_tgroup()', + stack_for_c5[1]) + + + async def test_stack_async_gen(self): + + stack_for_gen_nested_call = None + + async def gen_nested_call(): + nonlocal stack_for_gen_nested_call + stack_for_gen_nested_call = capture_test_stack() + + async def gen(): + for num in range(2): + yield num + if num == 1: + await gen_nested_call() + + async def main(): + async for el in gen(): + pass + + await main() + + self.assertEqual(stack_for_gen_nested_call[0], [ + 'T', + [ + 's capture_test_stack', + 'a gen_nested_call', + 'ag gen', + 'a main', + 'a test_stack_async_gen' + ], + [] + ]) + + self.assertIn( + 'async generator CallStackTestBase.test_stack_async_gen..gen()', + stack_for_gen_nested_call[1]) + + async def test_stack_gather(self): + + stack_for_deep = None + + async def deep(): + await asyncio.sleep(0) + nonlocal stack_for_deep + stack_for_deep = capture_test_stack() + + async def c1(): + await asyncio.sleep(0) + await deep() + + async def c2(): + await asyncio.sleep(0) + + async def main(): + await asyncio.gather(c1(), c2()) + + await main() + + self.assertEqual(stack_for_deep[0], [ + 'T', + ['s capture_test_stack', 'a deep', 'a c1'], + [ + ['T', ['a main', 'a test_stack_gather'], []] + ] + ]) + + async def test_stack_shield(self): + + stack_for_shield = None + + async def deep(): + await asyncio.sleep(0) + nonlocal stack_for_shield + stack_for_shield = capture_test_stack() + + async def c1(): + await asyncio.sleep(0) + await deep() + + async def main(): + await asyncio.shield(c1()) + + await main() + + self.assertEqual(stack_for_shield[0], [ + 'T', + ['s capture_test_stack', 'a deep', 'a c1'], + [ + ['T', ['a main', 'a test_stack_shield'], []] + ] + ]) + + async def test_stack_timeout(self): + + stack_for_inner = None + + async def inner(): + await asyncio.sleep(0) + nonlocal stack_for_inner + stack_for_inner = capture_test_stack() + + async def c1(): + async with asyncio.timeout(1): + await asyncio.sleep(0) + await inner() + + async def main(): + await asyncio.shield(c1()) + + await main() + + self.assertEqual(stack_for_inner[0], [ + 'T', + ['s capture_test_stack', 'a inner', 'a c1'], + [ + ['T', ['a main', 'a test_stack_timeout'], []] + ] + ]) + + async def test_stack_wait(self): + + stack_for_inner = None + + async def inner(): + await asyncio.sleep(0) + nonlocal stack_for_inner + stack_for_inner = capture_test_stack() + + async def c1(): + async with asyncio.timeout(1): + await asyncio.sleep(0) + await inner() + + async def c2(): + for i in range(3): + await asyncio.sleep(0) + + async def main(t1, t2): + while True: + _, pending = await asyncio.wait([t1, t2]) + if not pending: + break + + t1 = asyncio.create_task(c1()) + t2 = asyncio.create_task(c2()) + try: + await main(t1, t2) + finally: + await t1 + await t2 + + self.assertEqual(stack_for_inner[0], [ + 'T', + ['s capture_test_stack', 'a inner', 'a c1'], + [ + ['T', + ['a _wait', 'a wait', 'a main', 'a test_stack_wait'], + [] + ] + ] + ]) + + async def test_stack_task(self): + + stack_for_inner = None + + async def inner(): + await asyncio.sleep(0) + nonlocal stack_for_inner + stack_for_inner = capture_test_stack() + + async def c1(): + await inner() + + async def c2(): + await asyncio.create_task(c1(), name='there there') + + async def main(): + await c2() + + await main() + + self.assertEqual(stack_for_inner[0], [ + 'T', + ['s capture_test_stack', 'a inner', 'a c1'], + [['T', ['a c2', 'a main', 'a test_stack_task'], []]] + ]) + + async def test_stack_future(self): + + stack_for_fut = None + + async def a2(fut): + await fut + + async def a1(fut): + await a2(fut) + + async def b1(fut): + await fut + + async def main(): + nonlocal stack_for_fut + + fut = asyncio.Future() + async with asyncio.TaskGroup() as g: + g.create_task(a1(fut), name="task A") + g.create_task(b1(fut), name='task B') + + for _ in range(5): + # Do a few iterations to ensure that both a1 and b1 + # await on the future + await asyncio.sleep(0) + + stack_for_fut = capture_test_stack(fut=fut) + fut.set_result(None) + + await main() + + self.assertEqual(stack_for_fut[0], + ['F', + [], + [ + ['T', + ['a a2', 'a a1'], + [['T', ['a test_stack_future'], []]] + ], + ['T', + ['a b1'], + [['T', ['a test_stack_future'], []]] + ], + ]] + ) + + self.assertTrue(stack_for_fut[1].startswith('* Future(id=')) + + +@unittest.skipIf( + not hasattr(asyncio.futures, "_c_future_add_to_awaited_by"), + "C-accelerated asyncio call graph backend missing", +) +class TestCallStackC(CallStackTestBase, unittest.IsolatedAsyncioTestCase): + def setUp(self): + futures = asyncio.futures + tasks = asyncio.tasks + + self._Future = asyncio.Future + asyncio.Future = futures.Future = futures._CFuture + + self._Task = asyncio.Task + asyncio.Task = tasks.Task = tasks._CTask + + self._future_add_to_awaited_by = asyncio.future_add_to_awaited_by + futures.future_add_to_awaited_by = futures._c_future_add_to_awaited_by + asyncio.future_add_to_awaited_by = futures.future_add_to_awaited_by + + self._future_discard_from_awaited_by = asyncio.future_discard_from_awaited_by + futures.future_discard_from_awaited_by = futures._c_future_discard_from_awaited_by + asyncio.future_discard_from_awaited_by = futures.future_discard_from_awaited_by + + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = tasks._c_current_task + + def tearDown(self): + futures = asyncio.futures + tasks = asyncio.tasks + + futures.future_discard_from_awaited_by = self._future_discard_from_awaited_by + asyncio.future_discard_from_awaited_by = self._future_discard_from_awaited_by + del self._future_discard_from_awaited_by + + futures.future_add_to_awaited_by = self._future_add_to_awaited_by + asyncio.future_add_to_awaited_by = self._future_add_to_awaited_by + del self._future_add_to_awaited_by + + asyncio.Task = self._Task + tasks.Task = self._Task + del self._Task + + asyncio.Future = self._Future + futures.Future = self._Future + del self._Future + + asyncio.current_task = asyncio.tasks.current_task = self._current_task + + +@unittest.skipIf( + not hasattr(asyncio.futures, "_py_future_add_to_awaited_by"), + "Pure Python asyncio call graph backend missing", +) +class TestCallStackPy(CallStackTestBase, unittest.IsolatedAsyncioTestCase): + def setUp(self): + futures = asyncio.futures + tasks = asyncio.tasks + + self._Future = asyncio.Future + asyncio.Future = futures.Future = futures._PyFuture + + self._Task = asyncio.Task + asyncio.Task = tasks.Task = tasks._PyTask + + self._future_add_to_awaited_by = asyncio.future_add_to_awaited_by + futures.future_add_to_awaited_by = futures._py_future_add_to_awaited_by + asyncio.future_add_to_awaited_by = futures.future_add_to_awaited_by + + self._future_discard_from_awaited_by = asyncio.future_discard_from_awaited_by + futures.future_discard_from_awaited_by = futures._py_future_discard_from_awaited_by + asyncio.future_discard_from_awaited_by = futures.future_discard_from_awaited_by + + self._current_task = asyncio.current_task + asyncio.current_task = asyncio.tasks.current_task = tasks._py_current_task + + + def tearDown(self): + futures = asyncio.futures + tasks = asyncio.tasks + + futures.future_discard_from_awaited_by = self._future_discard_from_awaited_by + asyncio.future_discard_from_awaited_by = self._future_discard_from_awaited_by + del self._future_discard_from_awaited_by + + futures.future_add_to_awaited_by = self._future_add_to_awaited_by + asyncio.future_add_to_awaited_by = self._future_add_to_awaited_by + del self._future_add_to_awaited_by + + asyncio.Task = self._Task + tasks.Task = self._Task + del self._Task + + asyncio.Future = self._Future + futures.Future = self._Future + del self._Future + + asyncio.current_task = asyncio.tasks.current_task = self._current_task diff --git a/Lib/test/test_asyncio/test_pep492.py b/Lib/test/test_asyncio/test_pep492.py index 95bdc283df0..a0c8434c945 100644 --- a/Lib/test/test_asyncio/test_pep492.py +++ b/Lib/test/test_asyncio/test_pep492.py @@ -126,9 +126,8 @@ def foo(): yield def test_iscoroutinefunction(self): async def foo(): pass - # TODO: RUSTPYTHON; no DeprecationWarning for iscoroutinefunction - # with self.assertWarns(DeprecationWarning): - self.assertTrue(asyncio.iscoroutinefunction(foo)) + with self.assertWarns(DeprecationWarning): + self.assertTrue(asyncio.iscoroutinefunction(foo)) def test_async_def_coroutines(self): async def bar(): diff --git a/Lib/test/test_asyncio/test_proactor_events.py b/Lib/test/test_asyncio/test_proactor_events.py index 40cb5213c63..edfad5e11db 100644 --- a/Lib/test/test_asyncio/test_proactor_events.py +++ b/Lib/test/test_asyncio/test_proactor_events.py @@ -18,7 +18,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) def close_transport(transport): diff --git a/Lib/test/test_asyncio/test_protocols.py b/Lib/test/test_asyncio/test_protocols.py index 0f232631867..29d3bd22705 100644 --- a/Lib/test/test_asyncio/test_protocols.py +++ b/Lib/test/test_asyncio/test_protocols.py @@ -7,7 +7,7 @@ def tearDownModule(): # not needed for the test file but added for uniformness with all other # asyncio test files for the sake of unified cleanup - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class ProtocolsAbsTests(unittest.TestCase): @@ -19,7 +19,7 @@ def test_base_protocol(self): self.assertIsNone(p.connection_lost(f)) self.assertIsNone(p.pause_writing()) self.assertIsNone(p.resume_writing()) - self.assertFalse(hasattr(p, '__dict__')) + self.assertNotHasAttr(p, '__dict__') def test_protocol(self): f = mock.Mock() @@ -30,7 +30,7 @@ def test_protocol(self): self.assertIsNone(p.eof_received()) self.assertIsNone(p.pause_writing()) self.assertIsNone(p.resume_writing()) - self.assertFalse(hasattr(p, '__dict__')) + self.assertNotHasAttr(p, '__dict__') def test_buffered_protocol(self): f = mock.Mock() @@ -41,7 +41,7 @@ def test_buffered_protocol(self): self.assertIsNone(p.buffer_updated(150)) self.assertIsNone(p.pause_writing()) self.assertIsNone(p.resume_writing()) - self.assertFalse(hasattr(p, '__dict__')) + self.assertNotHasAttr(p, '__dict__') def test_datagram_protocol(self): f = mock.Mock() @@ -50,7 +50,7 @@ def test_datagram_protocol(self): self.assertIsNone(dp.connection_lost(f)) self.assertIsNone(dp.error_received(f)) self.assertIsNone(dp.datagram_received(f, f)) - self.assertFalse(hasattr(dp, '__dict__')) + self.assertNotHasAttr(dp, '__dict__') def test_subprocess_protocol(self): f = mock.Mock() @@ -60,7 +60,7 @@ def test_subprocess_protocol(self): self.assertIsNone(sp.pipe_data_received(1, f)) self.assertIsNone(sp.pipe_connection_lost(1, f)) self.assertIsNone(sp.process_exited()) - self.assertFalse(hasattr(sp, '__dict__')) + self.assertNotHasAttr(sp, '__dict__') if __name__ == '__main__': diff --git a/Lib/test/test_asyncio/test_queues.py b/Lib/test/test_asyncio/test_queues.py index 5019e9a2935..54bbe79f81f 100644 --- a/Lib/test/test_asyncio/test_queues.py +++ b/Lib/test/test_asyncio/test_queues.py @@ -6,7 +6,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class QueueBasicTests(unittest.IsolatedAsyncioTestCase): @@ -18,7 +18,7 @@ async def _test_repr_or_str(self, fn, expect_id): appear in fn(Queue()). """ q = asyncio.Queue() - self.assertTrue(fn(q).startswith('= self._max_clients: diff --git a/Lib/test/test_asyncio/test_sslproto.py b/Lib/test/test_asyncio/test_sslproto.py index ed9e0b344ec..7ab6e1511d7 100644 --- a/Lib/test/test_asyncio/test_sslproto.py +++ b/Lib/test/test_asyncio/test_sslproto.py @@ -21,7 +21,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) @unittest.skipIf(ssl is None, 'No ssl module') @@ -282,8 +282,7 @@ def buffer_updated(self, nsize): with self.assertRaisesRegex(RuntimeError, 'empty buffer'): protocols._feed_data_to_buffered_proto(proto, b'12345') - # TODO: RUSTPYTHON - gc.collect() doesn't release SSLContext properly - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - gc.collect() doesn't release SSLContext properly def test_start_tls_client_reg_proto_1(self): HELLO_MSG = b'1' * self.PAYLOAD_SIZE @@ -350,8 +349,7 @@ async def client(addr): support.gc_collect() self.assertIsNone(client_context()) - # TODO: RUSTPYTHON - gc.collect() doesn't release SSLContext properly - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - gc.collect() doesn't release SSLContext properly def test_create_connection_memory_leak(self): HELLO_MSG = b'1' * self.PAYLOAD_SIZE @@ -670,8 +668,7 @@ async def main(): self.loop.run_until_complete(main()) - # TODO: RUSTPYTHON - gc.collect() doesn't release SSLContext properly - @unittest.expectedFailure + @unittest.expectedFailure # TODO: RUSTPYTHON; - gc.collect() doesn't release SSLContext properly def test_handshake_timeout(self): # bpo-29970: Check that a connection is aborted if handshake is not # completed in timeout period, instead of remaining open indefinitely diff --git a/Lib/test/test_asyncio/test_staggered.py b/Lib/test/test_asyncio/test_staggered.py index 40455a3804e..32e4817b70d 100644 --- a/Lib/test/test_asyncio/test_staggered.py +++ b/Lib/test/test_asyncio/test_staggered.py @@ -8,7 +8,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class StaggeredTests(unittest.IsolatedAsyncioTestCase): diff --git a/Lib/test/test_asyncio/test_streams.py b/Lib/test/test_asyncio/test_streams.py index dcf8bc79a2f..39b6c8aaf05 100644 --- a/Lib/test/test_asyncio/test_streams.py +++ b/Lib/test/test_asyncio/test_streams.py @@ -1,15 +1,12 @@ """Tests for streams.py.""" import gc -import os import queue import pickle import socket -import sys import threading import unittest from unittest import mock -import warnings try: import ssl except ImportError: @@ -17,11 +14,11 @@ import asyncio from test.test_asyncio import utils as test_utils -from test.support import requires_subprocess, socket_helper +from test.support import socket_helper def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class StreamTests(test_utils.TestCase): @@ -50,7 +47,7 @@ def _basetest_open_connection(self, open_connection_fut): self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') f = reader.read() data = self.loop.run_until_complete(f) - self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + self.assertEndsWith(data, b'\r\n\r\nTest message') writer.close() self.assertEqual(messages, []) @@ -75,7 +72,7 @@ def _basetest_open_connection_no_loop_ssl(self, open_connection_fut): writer.write(b'GET / HTTP/1.0\r\n\r\n') f = reader.read() data = self.loop.run_until_complete(f) - self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + self.assertEndsWith(data, b'\r\n\r\nTest message') writer.close() self.assertEqual(messages, []) @@ -822,52 +819,6 @@ async def client(addr): self.assertEqual(msg1, b"hello world 1!\n") self.assertEqual(msg2, b"hello world 2!\n") - @unittest.skipIf(sys.platform == 'win32', "Don't have pipes") - @requires_subprocess() - def test_read_all_from_pipe_reader(self): - # See asyncio issue 168. This test is derived from the example - # subprocess_attach_read_pipe.py, but we configure the - # StreamReader's limit so that twice it is less than the size - # of the data writer. Also we must explicitly attach a child - # watcher to the event loop. - - code = """\ -import os, sys -fd = int(sys.argv[1]) -os.write(fd, b'data') -os.close(fd) -""" - rfd, wfd = os.pipe() - args = [sys.executable, '-c', code, str(wfd)] - - pipe = open(rfd, 'rb', 0) - reader = asyncio.StreamReader(loop=self.loop, limit=1) - protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) - transport, _ = self.loop.run_until_complete( - self.loop.connect_read_pipe(lambda: protocol, pipe)) - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - watcher = asyncio.SafeChildWatcher() - watcher.attach_loop(self.loop) - try: - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - asyncio.set_child_watcher(watcher) - create = asyncio.create_subprocess_exec( - *args, - pass_fds={wfd}, - ) - proc = self.loop.run_until_complete(create) - self.loop.run_until_complete(proc.wait()) - finally: - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - asyncio.set_child_watcher(None) - - os.close(wfd) - data = self.loop.run_until_complete(reader.read(-1)) - self.assertEqual(data, b'data') - def test_streamreader_constructor_without_loop(self): with self.assertRaisesRegex(RuntimeError, 'no current event loop'): asyncio.StreamReader() @@ -1048,7 +999,7 @@ def test_wait_closed_on_close(self): self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') f = rd.read() data = self.loop.run_until_complete(f) - self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + self.assertEndsWith(data, b'\r\n\r\nTest message') self.assertFalse(wr.is_closing()) wr.close() self.assertTrue(wr.is_closing()) @@ -1074,7 +1025,7 @@ async def inner(httpd): data = await rd.readline() self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') data = await rd.read() - self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + self.assertEndsWith(data, b'\r\n\r\nTest message') wr.close() await wr.wait_closed() @@ -1094,7 +1045,7 @@ async def inner(httpd): data = await rd.readline() self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') data = await rd.read() - self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + self.assertEndsWith(data, b'\r\n\r\nTest message') wr.close() with self.assertRaises(ConnectionResetError): wr.write(b'data') @@ -1127,6 +1078,7 @@ def test_eof_feed_when_closing_writer(self): self.assertEqual(messages, []) + @unittest.expectedFailure # TODO: RUSTPYTHON; AssertionError: 0 != 1 def test_unclosed_resource_warnings(self): async def inner(httpd): rd, wr = await asyncio.open_connection(*httpd.address) @@ -1135,12 +1087,12 @@ async def inner(httpd): data = await rd.readline() self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') data = await rd.read() - self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + self.assertEndsWith(data, b'\r\n\r\nTest message') with self.assertWarns(ResourceWarning) as cm: del wr gc.collect() self.assertEqual(len(cm.warnings), 1) - self.assertTrue(str(cm.warnings[0].message).startswith("unclosed None: def test_subprocess_protocol_events(self): # gh-108973: Test that all subprocess protocol methods are called. - # The protocol methods are not called in a determistic order. + # The protocol methods are not called in a deterministic order. # The order depends on the event loop and the operating system. events = [] fds = [1, 2] @@ -931,55 +922,31 @@ async def main(): # Unix class SubprocessWatcherMixin(SubprocessMixin): - Watcher = None - def setUp(self): super().setUp() - policy = asyncio.get_event_loop_policy() - self.loop = policy.new_event_loop() + self.loop = asyncio.new_event_loop() self.set_event_loop(self.loop) - watcher = self._get_watcher() - watcher.attach_loop(self.loop) - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - policy.set_child_watcher(watcher) + def test_watcher_implementation(self): + loop = self.loop + watcher = loop._watcher + if unix_events.can_use_pidfd(): + self.assertIsInstance(watcher, unix_events._PidfdChildWatcher) + else: + self.assertIsInstance(watcher, unix_events._ThreadedChildWatcher) - def tearDown(self): - super().tearDown() - policy = asyncio.get_event_loop_policy() - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - watcher = policy.get_child_watcher() - policy.set_child_watcher(None) - watcher.attach_loop(None) - watcher.close() class SubprocessThreadedWatcherTests(SubprocessWatcherMixin, test_utils.TestCase): + def setUp(self): + self._original_can_use_pidfd = unix_events.can_use_pidfd + # Force the use of the threaded child watcher + unix_events.can_use_pidfd = mock.Mock(return_value=False) + super().setUp() - def _get_watcher(self): - return unix_events.ThreadedChildWatcher() - - class SubprocessSafeWatcherTests(SubprocessWatcherMixin, - test_utils.TestCase): - - def _get_watcher(self): - with self.assertWarns(DeprecationWarning): - return unix_events.SafeChildWatcher() - - class MultiLoopChildWatcherTests(test_utils.TestCase): - - def test_warns(self): - with self.assertWarns(DeprecationWarning): - unix_events.MultiLoopChildWatcher() - - class SubprocessFastWatcherTests(SubprocessWatcherMixin, - test_utils.TestCase): - - def _get_watcher(self): - with self.assertWarns(DeprecationWarning): - return unix_events.FastChildWatcher() + def tearDown(self): + unix_events.can_use_pidfd = self._original_can_use_pidfd + return super().tearDown() @unittest.skipUnless( unix_events.can_use_pidfd(), @@ -988,70 +955,8 @@ def _get_watcher(self): class SubprocessPidfdWatcherTests(SubprocessWatcherMixin, test_utils.TestCase): - def _get_watcher(self): - return unix_events.PidfdChildWatcher() + pass - - class GenericWatcherTests(test_utils.TestCase): - - def test_create_subprocess_fails_with_inactive_watcher(self): - watcher = mock.create_autospec(asyncio.AbstractChildWatcher) - watcher.is_active.return_value = False - - async def execute(): - asyncio.set_child_watcher(watcher) - - with self.assertRaises(RuntimeError): - await subprocess.create_subprocess_exec( - os_helper.FakePath(sys.executable), '-c', 'pass') - - watcher.add_child_handler.assert_not_called() - - with asyncio.Runner(loop_factory=asyncio.new_event_loop) as runner: - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - self.assertIsNone(runner.run(execute())) - self.assertListEqual(watcher.mock_calls, [ - mock.call.__enter__(), - mock.call.is_active(), - mock.call.__exit__(RuntimeError, mock.ANY, mock.ANY), - ], watcher.mock_calls) - - - @unittest.skipUnless( - unix_events.can_use_pidfd(), - "operating system does not support pidfds", - ) - def test_create_subprocess_with_pidfd(self): - async def in_thread(): - proc = await asyncio.create_subprocess_exec( - *PROGRAM_CAT, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - ) - stdout, stderr = await proc.communicate(b"some data") - return proc.returncode, stdout - - async def main(): - # asyncio.Runner did not call asyncio.set_event_loop() - with warnings.catch_warnings(): - warnings.simplefilter('error', DeprecationWarning) - # get_event_loop() raises DeprecationWarning if - # set_event_loop() was never called and RuntimeError if - # it was called at least once. - with self.assertRaises((RuntimeError, DeprecationWarning)): - asyncio.get_event_loop_policy().get_event_loop() - return await asyncio.to_thread(asyncio.run, in_thread()) - with self.assertWarns(DeprecationWarning): - asyncio.set_child_watcher(asyncio.PidfdChildWatcher()) - try: - with asyncio.Runner(loop_factory=asyncio.new_event_loop) as runner: - returncode, stdout = runner.run(main()) - self.assertEqual(returncode, 0) - self.assertEqual(stdout, b'some data') - finally: - with self.assertWarns(DeprecationWarning): - asyncio.set_child_watcher(None) else: # Windows class SubprocessProactorTests(SubprocessMixin, test_utils.TestCase): diff --git a/Lib/test/test_asyncio/test_taskgroups.py b/Lib/test/test_asyncio/test_taskgroups.py index 5910d616610..d4b2554dda9 100644 --- a/Lib/test/test_asyncio/test_taskgroups.py +++ b/Lib/test/test_asyncio/test_taskgroups.py @@ -15,7 +15,7 @@ # To prevent a warning "test altered the execution environment" def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class MyExc(Exception): @@ -30,6 +30,15 @@ def get_error_types(eg): return {type(exc) for exc in eg.exceptions} +def no_other_refs(): + # due to gh-124392 coroutines now refer to their locals + coro = asyncio.current_task().get_coro() + frame = sys._getframe(1) + while coro.cr_frame != frame: + coro = coro.cr_await + return [coro] + + def set_gc_state(enabled): was_enabled = gc.isenabled() if enabled: @@ -918,7 +927,6 @@ async def outer(): await outer() - @unittest.skip("RUSTPYTHON: gc.get_referrers not implemented") async def test_exception_refcycles_direct(self): """Test that TaskGroup doesn't keep a reference to the raised ExceptionGroup""" tg = asyncio.TaskGroup() @@ -934,10 +942,9 @@ class _Done(Exception): exc = e self.assertIsNotNone(exc) - self.assertListEqual(gc.get_referrers(exc), []) + self.assertListEqual(gc.get_referrers(exc), no_other_refs()) - @unittest.skip("RUSTPYTHON: gc.get_referrers not implemented") async def test_exception_refcycles_errors(self): """Test that TaskGroup deletes self._errors, and __aexit__ args""" tg = asyncio.TaskGroup() @@ -953,10 +960,9 @@ class _Done(Exception): exc = excs.exceptions[0] self.assertIsInstance(exc, _Done) - self.assertListEqual(gc.get_referrers(exc), []) + self.assertListEqual(gc.get_referrers(exc), no_other_refs()) - @unittest.skip("RUSTPYTHON: gc.get_referrers not implemented") async def test_exception_refcycles_parent_task(self): """Test that TaskGroup deletes self._parent_task""" tg = asyncio.TaskGroup() @@ -976,10 +982,9 @@ async def coro_fn(): exc = excs.exceptions[0].exceptions[0] self.assertIsInstance(exc, _Done) - self.assertListEqual(gc.get_referrers(exc), []) + self.assertListEqual(gc.get_referrers(exc), no_other_refs()) - @unittest.skip("RUSTPYTHON: gc.get_referrers not implemented") async def test_exception_refcycles_parent_task_wr(self): """Test that TaskGroup deletes self._parent_task and create_task() deletes task""" tg = asyncio.TaskGroup() @@ -1001,9 +1006,8 @@ async def coro_fn(): self.assertIsNone(task_wr()) self.assertIsInstance(exc, _Done) - self.assertListEqual(gc.get_referrers(exc), []) + self.assertListEqual(gc.get_referrers(exc), no_other_refs()) - @unittest.skip("RUSTPYTHON: gc.get_referrers not implemented") async def test_exception_refcycles_propagate_cancellation_error(self): """Test that TaskGroup deletes propagate_cancellation_error""" tg = asyncio.TaskGroup() @@ -1017,9 +1021,8 @@ async def test_exception_refcycles_propagate_cancellation_error(self): exc = e.__cause__ self.assertIsInstance(exc, asyncio.CancelledError) - self.assertListEqual(gc.get_referrers(exc), []) + self.assertListEqual(gc.get_referrers(exc), no_other_refs()) - @unittest.skip("RUSTPYTHON: gc.get_referrers not implemented") async def test_exception_refcycles_base_error(self): """Test that TaskGroup deletes self._base_error""" class MyKeyboardInterrupt(KeyboardInterrupt): @@ -1035,7 +1038,19 @@ class MyKeyboardInterrupt(KeyboardInterrupt): exc = e self.assertIsNotNone(exc) - self.assertListEqual(gc.get_referrers(exc), []) + self.assertListEqual(gc.get_referrers(exc), no_other_refs()) + + async def test_name(self): + name = None + + async def asyncfn(): + nonlocal name + name = asyncio.current_task().get_name() + + async with asyncio.TaskGroup() as tg: + tg.create_task(asyncfn(), name="example name") + + self.assertEqual(name, "example name") async def test_cancels_task_if_created_during_creation(self): @@ -1091,6 +1106,30 @@ async def throw_error(): class TestTaskGroup(BaseTestTaskGroup, unittest.IsolatedAsyncioTestCase): loop_factory = asyncio.EventLoop + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes propagate_cancellation_error + async def test_exception_refcycles_propagate_cancellation_error(self): + return await super().test_exception_refcycles_propagate_cancellation_error() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._base_error + async def test_exception_refcycles_base_error(self): + return await super().test_exception_refcycles_base_error() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._errors, and __aexit__ args + async def test_exception_refcycles_errors(self): + return await super().test_exception_refcycles_errors() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._parent_task + async def test_exception_refcycles_parent_task(self): + return await super().test_exception_refcycles_parent_task() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._parent_task and create_task() deletes task + async def test_exception_refcycles_parent_task_wr(self): + return await super().test_exception_refcycles_parent_task_wr() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup doesn't keep a reference to the raised ExceptionGroup + async def test_exception_refcycles_direct(self): + return await super().test_exception_refcycles_direct() + class TestEagerTaskTaskGroup(BaseTestTaskGroup, unittest.IsolatedAsyncioTestCase): @staticmethod def loop_factory(): @@ -1098,6 +1137,30 @@ def loop_factory(): loop.set_task_factory(asyncio.eager_task_factory) return loop + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes propagate_cancellation_error + async def test_exception_refcycles_propagate_cancellation_error(self): + return await super().test_exception_refcycles_propagate_cancellation_error() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._base_error + async def test_exception_refcycles_base_error(self): + return await super().test_exception_refcycles_base_error() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._errors, and __aexit__ args + async def test_exception_refcycles_errors(self): + return await super().test_exception_refcycles_errors() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._parent_task + async def test_exception_refcycles_parent_task(self): + return await super().test_exception_refcycles_parent_task() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup deletes self._parent_task and create_task() deletes task + async def test_exception_refcycles_parent_task_wr(self): + return await super().test_exception_refcycles_parent_task_wr() + + @unittest.expectedFailure # TODO: RUSTPYTHON; Test that TaskGroup doesn't keep a reference to the raised ExceptionGroup + async def test_exception_refcycles_direct(self): + return await super().test_exception_refcycles_direct() + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py index 7a830230a7d..b0ca67d6716 100644 --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -20,10 +20,11 @@ from test.test_asyncio import utils as test_utils from test import support from test.support.script_helper import assert_python_ok +from test.support.warnings_helper import ignore_warnings def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) async def coroutine_function(): @@ -86,9 +87,10 @@ class BaseTaskTests: Task = None Future = None + all_tasks = None - def new_task(self, loop, coro, name='TestTask', context=None): - return self.__class__.Task(coro, loop=loop, name=name, context=context) + def new_task(self, loop, coro, name='TestTask', context=None, eager_start=None): + return self.__class__.Task(coro, loop=loop, name=name, context=context, eager_start=eager_start) def new_future(self, loop): return self.__class__.Future(loop=loop) @@ -108,7 +110,7 @@ def test_task_cancel_message_getter(self): async def coro(): pass t = self.new_task(self.loop, coro()) - self.assertTrue(hasattr(t, '_cancel_message')) + self.assertHasAttr(t, '_cancel_message') self.assertEqual(t._cancel_message, None) t.cancel('my message') @@ -544,7 +546,7 @@ async def task(): try: await asyncio.sleep(10) except asyncio.CancelledError: - asyncio.current_task().uncancel() + self.current_task().uncancel() await asyncio.sleep(10) try: @@ -596,7 +598,7 @@ def test_uncancel_structured_blocks(self): loop = asyncio.new_event_loop() async def make_request_with_timeout(*, sleep: float, timeout: float): - task = asyncio.current_task() + task = self.current_task() loop = task.get_loop() timed_out = False @@ -1938,6 +1940,7 @@ async def notmutch(): self.assertFalse(task.cancelled()) self.assertIs(task.exception(), base_exc) + @ignore_warnings(category=DeprecationWarning) def test_iscoroutinefunction(self): def fn(): pass @@ -1955,6 +1958,7 @@ async def fn2(): self.assertFalse(asyncio.iscoroutinefunction(mock.Mock())) self.assertTrue(asyncio.iscoroutinefunction(mock.AsyncMock())) + @ignore_warnings(category=DeprecationWarning) def test_coroutine_non_gen_function(self): async def func(): return 'test' @@ -1983,41 +1987,41 @@ async def coro(): self.assertIsNone(t2.result()) def test_current_task(self): - self.assertIsNone(asyncio.current_task(loop=self.loop)) + self.assertIsNone(self.current_task(loop=self.loop)) async def coro(loop): - self.assertIs(asyncio.current_task(), task) + self.assertIs(self.current_task(), task) - self.assertIs(asyncio.current_task(None), task) - self.assertIs(asyncio.current_task(), task) + self.assertIs(self.current_task(None), task) + self.assertIs(self.current_task(), task) task = self.new_task(self.loop, coro(self.loop)) self.loop.run_until_complete(task) - self.assertIsNone(asyncio.current_task(loop=self.loop)) + self.assertIsNone(self.current_task(loop=self.loop)) def test_current_task_with_interleaving_tasks(self): - self.assertIsNone(asyncio.current_task(loop=self.loop)) + self.assertIsNone(self.current_task(loop=self.loop)) fut1 = self.new_future(self.loop) fut2 = self.new_future(self.loop) async def coro1(loop): - self.assertTrue(asyncio.current_task() is task1) + self.assertTrue(self.current_task() is task1) await fut1 - self.assertTrue(asyncio.current_task() is task1) + self.assertTrue(self.current_task() is task1) fut2.set_result(True) async def coro2(loop): - self.assertTrue(asyncio.current_task() is task2) + self.assertTrue(self.current_task() is task2) fut1.set_result(True) await fut2 - self.assertTrue(asyncio.current_task() is task2) + self.assertTrue(self.current_task() is task2) task1 = self.new_task(self.loop, coro1(self.loop)) task2 = self.new_task(self.loop, coro2(self.loop)) self.loop.run_until_complete(asyncio.wait((task1, task2))) - self.assertIsNone(asyncio.current_task(loop=self.loop)) + self.assertIsNone(self.current_task(loop=self.loop)) # Some thorough tests for cancellation propagation through # coroutines, tasks and wait(). @@ -2112,6 +2116,46 @@ def test_shield_cancel_outer(self): self.assertTrue(outer.cancelled()) self.assertEqual(0, 0 if outer._callbacks is None else len(outer._callbacks)) + def test_shield_cancel_outer_result(self): + mock_handler = mock.Mock() + self.loop.set_exception_handler(mock_handler) + inner = self.new_future(self.loop) + outer = asyncio.shield(inner) + test_utils.run_briefly(self.loop) + outer.cancel() + test_utils.run_briefly(self.loop) + inner.set_result(1) + test_utils.run_briefly(self.loop) + mock_handler.assert_not_called() + + def test_shield_cancel_outer_exception(self): + mock_handler = mock.Mock() + self.loop.set_exception_handler(mock_handler) + inner = self.new_future(self.loop) + outer = asyncio.shield(inner) + test_utils.run_briefly(self.loop) + outer.cancel() + test_utils.run_briefly(self.loop) + inner.set_exception(Exception('foo')) + test_utils.run_briefly(self.loop) + mock_handler.assert_called_once() + + def test_shield_duplicate_log_once(self): + mock_handler = mock.Mock() + self.loop.set_exception_handler(mock_handler) + inner = self.new_future(self.loop) + outer = asyncio.shield(inner) + test_utils.run_briefly(self.loop) + outer.cancel() + test_utils.run_briefly(self.loop) + outer = asyncio.shield(inner) + test_utils.run_briefly(self.loop) + outer.cancel() + test_utils.run_briefly(self.loop) + inner.set_exception(Exception('foo')) + test_utils.run_briefly(self.loop) + mock_handler.assert_called_once() + def test_shield_shortcut(self): fut = self.new_future(self.loop) fut.set_result(42) @@ -2249,9 +2293,7 @@ def test_wait_invalid_args(self): self.assertRaises(ValueError, self.loop.run_until_complete, asyncio.wait([])) - @unittest.skip("RUSTPYTHON: requires cyclic garbage collection") def test_log_destroyed_pending_task(self): - Task = self.__class__.Task async def kill_me(loop): future = self.new_future(loop) @@ -2266,14 +2308,12 @@ async def kill_me(loop): # schedule the task coro = kill_me(self.loop) - task = asyncio.ensure_future(coro, loop=self.loop) - - self.assertEqual(asyncio.all_tasks(loop=self.loop), {task}) + task = self.new_task(self.loop, coro) - asyncio.set_event_loop(None) + self.assertEqual(self.all_tasks(loop=self.loop), {task}) # execute the task so it waits for future - self.loop._run_once() + self.loop.run_until_complete(asyncio.sleep(0)) self.assertEqual(len(self.loop._ready), 0) coro = None @@ -2283,14 +2323,36 @@ async def kill_me(loop): # no more reference to kill_me() task: the task is destroyed by the GC support.gc_collect() - self.assertEqual(asyncio.all_tasks(loop=self.loop), set()) - mock_handler.assert_called_with(self.loop, { 'message': 'Task was destroyed but it is pending!', 'task': mock.ANY, 'source_traceback': source_traceback, }) mock_handler.reset_mock() + # task got resurrected by the exception handler + support.gc_collect() + + self.assertEqual(self.all_tasks(loop=self.loop), set()) + + def test_task_not_crash_without_finalization(self): + Task = self.__class__.Task + + class Subclass(Task): + def __del__(self): + pass + + async def corofn(): + await asyncio.sleep(0.01) + + coro = corofn() + task = Subclass(coro, loop = self.loop) + task._log_destroy_pending = False + + del task + + support.gc_collect() + + coro.close() @mock.patch('asyncio.base_events.logger') def test_tb_logger_not_called_after_cancel(self, m_log): @@ -2432,7 +2494,7 @@ async def coro(): message = m_log.error.call_args[0][0] self.assertIn('Task was destroyed but it is pending', message) - self.assertEqual(asyncio.all_tasks(self.loop), set()) + self.assertEqual(self.all_tasks(self.loop), set()) def test_create_task_with_noncoroutine(self): with self.assertRaisesRegex(TypeError, @@ -2664,6 +2726,35 @@ async def main(): self.assertEqual([None, 1, 2], ret) + def test_eager_start_true(self): + name = None + + async def asyncfn(): + nonlocal name + name = self.current_task().get_name() + + async def main(): + t = self.new_task(coro=asyncfn(), loop=asyncio.get_running_loop(), eager_start=True, name="example") + self.assertTrue(t.done()) + self.assertEqual(name, "example") + await t + + def test_eager_start_false(self): + name = None + + async def asyncfn(): + nonlocal name + name = self.current_task().get_name() + + async def main(): + t = self.new_task(coro=asyncfn(), loop=asyncio.get_running_loop(), eager_start=False, name="example") + self.assertFalse(t.done()) + self.assertIsNone(name) + await t + self.assertEqual(name, "example") + + asyncio.run(main(), loop_factory=asyncio.EventLoop) + def test_get_coro(self): loop = asyncio.new_event_loop() coro = coroutine_function() @@ -2697,12 +2788,12 @@ def __str__(self): coro = coroutine_function() with contextlib.closing(asyncio.EventLoop()) as loop: task = asyncio.Task.__new__(asyncio.Task) - for _ in range(5): with self.assertRaisesRegex(RuntimeError, 'break'): task.__init__(coro, loop=loop, context=obj, name=Break()) coro.close() + task._log_destroy_pending = False del task self.assertEqual(sys.getrefcount(obj), initial_refcount) @@ -2827,6 +2918,8 @@ class CTask_CFuture_Tests(BaseTaskTests, SetMethodsTest, Task = getattr(tasks, '_CTask', None) Future = getattr(futures, '_CFuture', None) + all_tasks = getattr(tasks, '_c_all_tasks', None) + current_task = staticmethod(getattr(tasks, '_c_current_task', None)) @support.refcount_test def test_refleaks_in_task___init__(self): @@ -2849,6 +2942,10 @@ async def coro(): with self.assertRaises(AttributeError): del task._log_destroy_pending + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() + @unittest.skipUnless(hasattr(futures, '_CFuture') and hasattr(tasks, '_CTask'), @@ -2858,6 +2955,12 @@ class CTask_CFuture_SubclassTests(BaseTaskTests, test_utils.TestCase): Task = getattr(tasks, '_CTask', None) Future = getattr(futures, '_CFuture', None) + all_tasks = getattr(tasks, '_c_all_tasks', None) + current_task = staticmethod(getattr(tasks, '_c_current_task', None)) + + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() @unittest.skipUnless(hasattr(tasks, '_CTask'), @@ -2867,6 +2970,12 @@ class CTaskSubclass_PyFuture_Tests(BaseTaskTests, test_utils.TestCase): Task = getattr(tasks, '_CTask', None) Future = futures._PyFuture + all_tasks = getattr(tasks, '_c_all_tasks', None) + current_task = staticmethod(getattr(tasks, '_c_current_task', None)) + + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() @unittest.skipUnless(hasattr(futures, '_CFuture'), @@ -2876,6 +2985,12 @@ class PyTask_CFutureSubclass_Tests(BaseTaskTests, test_utils.TestCase): Future = getattr(futures, '_CFuture', None) Task = tasks._PyTask + all_tasks = staticmethod(tasks._py_all_tasks) + current_task = staticmethod(tasks._py_current_task) + + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() @unittest.skipUnless(hasattr(tasks, '_CTask'), @@ -2884,6 +2999,12 @@ class CTask_PyFuture_Tests(BaseTaskTests, test_utils.TestCase): Task = getattr(tasks, '_CTask', None) Future = futures._PyFuture + all_tasks = getattr(tasks, '_c_all_tasks', None) + current_task = staticmethod(getattr(tasks, '_c_current_task', None)) + + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() @unittest.skipUnless(hasattr(futures, '_CFuture'), @@ -2892,6 +3013,12 @@ class PyTask_CFuture_Tests(BaseTaskTests, test_utils.TestCase): Task = tasks._PyTask Future = getattr(futures, '_CFuture', None) + all_tasks = staticmethod(tasks._py_all_tasks) + current_task = staticmethod(tasks._py_current_task) + + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() class PyTask_PyFuture_Tests(BaseTaskTests, SetMethodsTest, @@ -2899,13 +3026,24 @@ class PyTask_PyFuture_Tests(BaseTaskTests, SetMethodsTest, Task = tasks._PyTask Future = futures._PyFuture + all_tasks = staticmethod(tasks._py_all_tasks) + current_task = staticmethod(tasks._py_current_task) + + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() @add_subclass_tests class PyTask_PyFuture_SubclassTests(BaseTaskTests, test_utils.TestCase): Task = tasks._PyTask Future = futures._PyFuture + all_tasks = staticmethod(tasks._py_all_tasks) + current_task = staticmethod(tasks._py_current_task) + @unittest.expectedFailure # TODO: RUSTPYTHON; Actual: not called. + def test_log_destroyed_pending_task(self): + return super().test_log_destroyed_pending_task() @unittest.skipUnless(hasattr(tasks, '_CTask'), 'requires the C _asyncio module') @@ -2938,6 +3076,7 @@ class BaseTaskIntrospectionTests: _unregister_task = None _enter_task = None _leave_task = None + all_tasks = None def test__register_task_1(self): class TaskLike: @@ -2951,9 +3090,9 @@ def done(self): task = TaskLike() loop = mock.Mock() - self.assertEqual(asyncio.all_tasks(loop), set()) + self.assertEqual(self.all_tasks(loop), set()) self._register_task(task) - self.assertEqual(asyncio.all_tasks(loop), {task}) + self.assertEqual(self.all_tasks(loop), {task}) self._unregister_task(task) def test__register_task_2(self): @@ -2967,9 +3106,9 @@ def done(self): task = TaskLike() loop = mock.Mock() - self.assertEqual(asyncio.all_tasks(loop), set()) + self.assertEqual(self.all_tasks(loop), set()) self._register_task(task) - self.assertEqual(asyncio.all_tasks(loop), {task}) + self.assertEqual(self.all_tasks(loop), {task}) self._unregister_task(task) def test__register_task_3(self): @@ -2983,52 +3122,68 @@ def done(self): task = TaskLike() loop = mock.Mock() - self.assertEqual(asyncio.all_tasks(loop), set()) + self.assertEqual(self.all_tasks(loop), set()) self._register_task(task) - self.assertEqual(asyncio.all_tasks(loop), set()) + self.assertEqual(self.all_tasks(loop), set()) self._unregister_task(task) def test__enter_task(self): task = mock.Mock() loop = mock.Mock() - self.assertIsNone(asyncio.current_task(loop)) + # _enter_task is called by Task.__step while the loop + # is running, so set the loop as the running loop + # for a more realistic test. + asyncio._set_running_loop(loop) + self.assertIsNone(self.current_task(loop)) self._enter_task(loop, task) - self.assertIs(asyncio.current_task(loop), task) + self.assertIs(self.current_task(loop), task) self._leave_task(loop, task) + asyncio._set_running_loop(None) def test__enter_task_failure(self): task1 = mock.Mock() task2 = mock.Mock() loop = mock.Mock() + asyncio._set_running_loop(loop) self._enter_task(loop, task1) with self.assertRaises(RuntimeError): self._enter_task(loop, task2) - self.assertIs(asyncio.current_task(loop), task1) + self.assertIs(self.current_task(loop), task1) self._leave_task(loop, task1) + asyncio._set_running_loop(None) def test__leave_task(self): task = mock.Mock() loop = mock.Mock() + asyncio._set_running_loop(loop) self._enter_task(loop, task) self._leave_task(loop, task) - self.assertIsNone(asyncio.current_task(loop)) + self.assertIsNone(self.current_task(loop)) + asyncio._set_running_loop(None) def test__leave_task_failure1(self): task1 = mock.Mock() task2 = mock.Mock() loop = mock.Mock() + # _leave_task is called by Task.__step while the loop + # is running, so set the loop as the running loop + # for a more realistic test. + asyncio._set_running_loop(loop) self._enter_task(loop, task1) with self.assertRaises(RuntimeError): self._leave_task(loop, task2) - self.assertIs(asyncio.current_task(loop), task1) + self.assertIs(self.current_task(loop), task1) self._leave_task(loop, task1) + asyncio._set_running_loop(None) def test__leave_task_failure2(self): task = mock.Mock() loop = mock.Mock() + asyncio._set_running_loop(loop) with self.assertRaises(RuntimeError): self._leave_task(loop, task) - self.assertIsNone(asyncio.current_task(loop)) + self.assertIsNone(self.current_task(loop)) + asyncio._set_running_loop(None) def test__unregister_task(self): task = mock.Mock() @@ -3036,13 +3191,13 @@ def test__unregister_task(self): task.get_loop = lambda: loop self._register_task(task) self._unregister_task(task) - self.assertEqual(asyncio.all_tasks(loop), set()) + self.assertEqual(self.all_tasks(loop), set()) def test__unregister_task_not_registered(self): task = mock.Mock() loop = mock.Mock() self._unregister_task(task) - self.assertEqual(asyncio.all_tasks(loop), set()) + self.assertEqual(self.all_tasks(loop), set()) class PyIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests): @@ -3050,6 +3205,8 @@ class PyIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests): _unregister_task = staticmethod(tasks._py_unregister_task) _enter_task = staticmethod(tasks._py_enter_task) _leave_task = staticmethod(tasks._py_leave_task) + all_tasks = staticmethod(tasks._py_all_tasks) + current_task = staticmethod(tasks._py_current_task) @unittest.skipUnless(hasattr(tasks, '_c_register_task'), @@ -3060,6 +3217,8 @@ class CIntrospectionTests(test_utils.TestCase, BaseTaskIntrospectionTests): _unregister_task = staticmethod(tasks._c_unregister_task) _enter_task = staticmethod(tasks._c_enter_task) _leave_task = staticmethod(tasks._c_leave_task) + all_tasks = staticmethod(tasks._c_all_tasks) + current_task = staticmethod(tasks._c_current_task) else: _register_task = _unregister_task = _enter_task = _leave_task = None @@ -3117,7 +3276,7 @@ def new_task(self, coro): class GenericTaskTests(test_utils.TestCase): def test_future_subclass(self): - self.assertTrue(issubclass(asyncio.Task, asyncio.Future)) + self.assertIsSubclass(asyncio.Task, asyncio.Future) @support.cpython_only def test_asyncio_module_compiled(self): @@ -3126,14 +3285,14 @@ def test_asyncio_module_compiled(self): # fail on systems where C modules were successfully compiled # (hence the test for _functools etc), but _asyncio somehow didn't. try: - import _functools - import _json - import _pickle + import _functools # noqa: F401 + import _json # noqa: F401 + import _pickle # noqa: F401 except ImportError: self.skipTest('C modules are not available') else: try: - import _asyncio + import _asyncio # noqa: F401 except ImportError: self.fail('_asyncio module is missing') diff --git a/Lib/test/test_asyncio/test_threads.py b/Lib/test/test_asyncio/test_threads.py index 774380270a7..8ad5f9b2c9e 100644 --- a/Lib/test/test_asyncio/test_threads.py +++ b/Lib/test/test_asyncio/test_threads.py @@ -8,7 +8,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class ToThreadTests(unittest.IsolatedAsyncioTestCase): diff --git a/Lib/test/test_asyncio/test_timeouts.py b/Lib/test/test_asyncio/test_timeouts.py index 1f7f9ee696a..f60722c48b7 100644 --- a/Lib/test/test_asyncio/test_timeouts.py +++ b/Lib/test/test_asyncio/test_timeouts.py @@ -9,7 +9,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class TimeoutTests(unittest.IsolatedAsyncioTestCase): @@ -220,7 +220,7 @@ async def test_nested_timeouts_loop_busy(self): # Pretend the loop is busy for a while. time.sleep(0.1) await asyncio.sleep(1) - # TimeoutError was cought by (2) + # TimeoutError was caught by (2) await asyncio.sleep(10) # This sleep should be interrupted by (1) t1 = loop.time() self.assertTrue(t0 <= t1 <= t0 + 1) diff --git a/Lib/test/test_asyncio/test_tools.py b/Lib/test/test_asyncio/test_tools.py new file mode 100644 index 00000000000..34e94830204 --- /dev/null +++ b/Lib/test/test_asyncio/test_tools.py @@ -0,0 +1,1706 @@ +import unittest + +from asyncio import tools + +from collections import namedtuple + +FrameInfo = namedtuple('FrameInfo', ['funcname', 'filename', 'lineno']) +CoroInfo = namedtuple('CoroInfo', ['call_stack', 'task_name']) +TaskInfo = namedtuple('TaskInfo', ['task_id', 'task_name', 'coroutine_stack', 'awaited_by']) +AwaitedInfo = namedtuple('AwaitedInfo', ['thread_id', 'awaited_by']) + + +# mock output of get_all_awaited_by function. +TEST_INPUTS_TREE = [ + [ + # test case containing a task called timer being awaited in two + # different subtasks part of a TaskGroup (root1 and root2) which call + # awaiter functions. + ( + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="timer", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("awaiter3", "/path/to/app.py", 130), + FrameInfo("awaiter2", "/path/to/app.py", 120), + FrameInfo("awaiter", "/path/to/app.py", 110) + ], + task_name=4 + ), + CoroInfo( + call_stack=[ + FrameInfo("awaiterB3", "/path/to/app.py", 190), + FrameInfo("awaiterB2", "/path/to/app.py", 180), + FrameInfo("awaiterB", "/path/to/app.py", 170) + ], + task_name=5 + ), + CoroInfo( + call_stack=[ + FrameInfo("awaiterB3", "/path/to/app.py", 190), + FrameInfo("awaiterB2", "/path/to/app.py", 180), + FrameInfo("awaiterB", "/path/to/app.py", 170) + ], + task_name=6 + ), + CoroInfo( + call_stack=[ + FrameInfo("awaiter3", "/path/to/app.py", 130), + FrameInfo("awaiter2", "/path/to/app.py", 120), + FrameInfo("awaiter", "/path/to/app.py", 110) + ], + task_name=7 + ) + ] + ), + TaskInfo( + task_id=8, + task_name="root1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("main", "", 0) + ], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=9, + task_name="root2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("main", "", 0) + ], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="child1_1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=8 + ) + ] + ), + TaskInfo( + task_id=6, + task_name="child2_1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=8 + ) + ] + ), + TaskInfo( + task_id=7, + task_name="child1_2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=9 + ) + ] + ), + TaskInfo( + task_id=5, + task_name="child2_2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=9 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=0, awaited_by=[]) + ), + ( + [ + [ + "└── (T) Task-1", + " └── main", + " └── __aexit__", + " └── _aexit", + " ├── (T) root1", + " │ └── bloch", + " │ └── blocho_caller", + " │ └── __aexit__", + " │ └── _aexit", + " │ ├── (T) child1_1", + " │ │ └── awaiter /path/to/app.py:110", + " │ │ └── awaiter2 /path/to/app.py:120", + " │ │ └── awaiter3 /path/to/app.py:130", + " │ │ └── (T) timer", + " │ └── (T) child2_1", + " │ └── awaiterB /path/to/app.py:170", + " │ └── awaiterB2 /path/to/app.py:180", + " │ └── awaiterB3 /path/to/app.py:190", + " │ └── (T) timer", + " └── (T) root2", + " └── bloch", + " └── blocho_caller", + " └── __aexit__", + " └── _aexit", + " ├── (T) child1_2", + " │ └── awaiter /path/to/app.py:110", + " │ └── awaiter2 /path/to/app.py:120", + " │ └── awaiter3 /path/to/app.py:130", + " │ └── (T) timer", + " └── (T) child2_2", + " └── awaiterB /path/to/app.py:170", + " └── awaiterB2 /path/to/app.py:180", + " └── awaiterB3 /path/to/app.py:190", + " └── (T) timer", + ] + ] + ), + ], + [ + # test case containing two roots + ( + AwaitedInfo( + thread_id=9, + awaited_by=[ + TaskInfo( + task_id=5, + task_name="Task-5", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=6, + task_name="Task-6", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main2", "", 0)], + task_name=5 + ) + ] + ), + TaskInfo( + task_id=7, + task_name="Task-7", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main2", "", 0)], + task_name=5 + ) + ] + ), + TaskInfo( + task_id=8, + task_name="Task-8", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main2", "", 0)], + task_name=5 + ) + ] + ) + ] + ), + AwaitedInfo( + thread_id=10, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=2, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=1 + ) + ] + ), + TaskInfo( + task_id=3, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=1 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="Task-4", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=1 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=11, awaited_by=[]), + AwaitedInfo(thread_id=0, awaited_by=[]) + ), + ( + [ + [ + "└── (T) Task-5", + " └── main2", + " ├── (T) Task-6", + " ├── (T) Task-7", + " └── (T) Task-8", + ], + [ + "└── (T) Task-1", + " └── main", + " ├── (T) Task-2", + " ├── (T) Task-3", + " └── (T) Task-4", + ], + ] + ), + ], + [ + # test case containing two roots, one of them without subtasks + ( + [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-5", + coroutine_stack=[], + awaited_by=[] + ) + ] + ), + AwaitedInfo( + thread_id=3, + awaited_by=[ + TaskInfo( + task_id=4, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=5, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=4 + ) + ] + ), + TaskInfo( + task_id=6, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=4 + ) + ] + ), + TaskInfo( + task_id=7, + task_name="Task-4", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=4 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=8, awaited_by=[]), + AwaitedInfo(thread_id=0, awaited_by=[]) + ] + ), + ( + [ + ["└── (T) Task-5"], + [ + "└── (T) Task-1", + " └── main", + " ├── (T) Task-2", + " ├── (T) Task-3", + " └── (T) Task-4", + ], + ] + ), + ], +] + +TEST_INPUTS_CYCLES_TREE = [ + [ + # this test case contains a cycle: two tasks awaiting each other. + ( + [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="a", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("awaiter2", "", 0)], + task_name=4 + ), + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="b", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("awaiter", "", 0)], + task_name=3 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=0, awaited_by=[]) + ] + ), + ([[4, 3, 4]]), + ], + [ + # this test case contains two cycles + ( + [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="A", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_b", "", 0) + ], + task_name=4 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="B", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_c", "", 0) + ], + task_name=5 + ), + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_a", "", 0) + ], + task_name=3 + ) + ] + ), + TaskInfo( + task_id=5, + task_name="C", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0) + ], + task_name=6 + ) + ] + ), + TaskInfo( + task_id=6, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_b", "", 0) + ], + task_name=4 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=0, awaited_by=[]) + ] + ), + ([[4, 3, 4], [4, 6, 5, 4]]), + ], +] + +TEST_INPUTS_TABLE = [ + [ + # test case containing a task called timer being awaited in two + # different subtasks part of a TaskGroup (root1 and root2) which call + # awaiter functions. + ( + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="timer", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("awaiter3", "", 0), + FrameInfo("awaiter2", "", 0), + FrameInfo("awaiter", "", 0) + ], + task_name=4 + ), + CoroInfo( + call_stack=[ + FrameInfo("awaiter1_3", "", 0), + FrameInfo("awaiter1_2", "", 0), + FrameInfo("awaiter1", "", 0) + ], + task_name=5 + ), + CoroInfo( + call_stack=[ + FrameInfo("awaiter1_3", "", 0), + FrameInfo("awaiter1_2", "", 0), + FrameInfo("awaiter1", "", 0) + ], + task_name=6 + ), + CoroInfo( + call_stack=[ + FrameInfo("awaiter3", "", 0), + FrameInfo("awaiter2", "", 0), + FrameInfo("awaiter", "", 0) + ], + task_name=7 + ) + ] + ), + TaskInfo( + task_id=8, + task_name="root1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("main", "", 0) + ], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=9, + task_name="root2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("main", "", 0) + ], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="child1_1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=8 + ) + ] + ), + TaskInfo( + task_id=6, + task_name="child2_1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=8 + ) + ] + ), + TaskInfo( + task_id=7, + task_name="child1_2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=9 + ) + ] + ), + TaskInfo( + task_id=5, + task_name="child2_2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("_aexit", "", 0), + FrameInfo("__aexit__", "", 0), + FrameInfo("blocho_caller", "", 0), + FrameInfo("bloch", "", 0) + ], + task_name=9 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=0, awaited_by=[]) + ), + ( + [ + [1, "0x2", "Task-1", "", "", "", "0x0"], + [ + 1, + "0x3", + "timer", + "", + "awaiter3 -> awaiter2 -> awaiter", + "child1_1", + "0x4", + ], + [ + 1, + "0x3", + "timer", + "", + "awaiter1_3 -> awaiter1_2 -> awaiter1", + "child2_2", + "0x5", + ], + [ + 1, + "0x3", + "timer", + "", + "awaiter1_3 -> awaiter1_2 -> awaiter1", + "child2_1", + "0x6", + ], + [ + 1, + "0x3", + "timer", + "", + "awaiter3 -> awaiter2 -> awaiter", + "child1_2", + "0x7", + ], + [ + 1, + "0x8", + "root1", + "", + "_aexit -> __aexit__ -> main", + "Task-1", + "0x2", + ], + [ + 1, + "0x9", + "root2", + "", + "_aexit -> __aexit__ -> main", + "Task-1", + "0x2", + ], + [ + 1, + "0x4", + "child1_1", + "", + "_aexit -> __aexit__ -> blocho_caller -> bloch", + "root1", + "0x8", + ], + [ + 1, + "0x6", + "child2_1", + "", + "_aexit -> __aexit__ -> blocho_caller -> bloch", + "root1", + "0x8", + ], + [ + 1, + "0x7", + "child1_2", + "", + "_aexit -> __aexit__ -> blocho_caller -> bloch", + "root2", + "0x9", + ], + [ + 1, + "0x5", + "child2_2", + "", + "_aexit -> __aexit__ -> blocho_caller -> bloch", + "root2", + "0x9", + ], + ] + ), + ], + [ + # test case containing two roots + ( + AwaitedInfo( + thread_id=9, + awaited_by=[ + TaskInfo( + task_id=5, + task_name="Task-5", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=6, + task_name="Task-6", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main2", "", 0)], + task_name=5 + ) + ] + ), + TaskInfo( + task_id=7, + task_name="Task-7", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main2", "", 0)], + task_name=5 + ) + ] + ), + TaskInfo( + task_id=8, + task_name="Task-8", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main2", "", 0)], + task_name=5 + ) + ] + ) + ] + ), + AwaitedInfo( + thread_id=10, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=2, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=1 + ) + ] + ), + TaskInfo( + task_id=3, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=1 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="Task-4", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=1 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=11, awaited_by=[]), + AwaitedInfo(thread_id=0, awaited_by=[]) + ), + ( + [ + [9, "0x5", "Task-5", "", "", "", "0x0"], + [9, "0x6", "Task-6", "", "main2", "Task-5", "0x5"], + [9, "0x7", "Task-7", "", "main2", "Task-5", "0x5"], + [9, "0x8", "Task-8", "", "main2", "Task-5", "0x5"], + [10, "0x1", "Task-1", "", "", "", "0x0"], + [10, "0x2", "Task-2", "", "main", "Task-1", "0x1"], + [10, "0x3", "Task-3", "", "main", "Task-1", "0x1"], + [10, "0x4", "Task-4", "", "main", "Task-1", "0x1"], + ] + ), + ], + [ + # test case containing two roots, one of them without subtasks + ( + [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-5", + coroutine_stack=[], + awaited_by=[] + ) + ] + ), + AwaitedInfo( + thread_id=3, + awaited_by=[ + TaskInfo( + task_id=4, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=5, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=4 + ) + ] + ), + TaskInfo( + task_id=6, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=4 + ) + ] + ), + TaskInfo( + task_id=7, + task_name="Task-4", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=4 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=8, awaited_by=[]), + AwaitedInfo(thread_id=0, awaited_by=[]) + ] + ), + ( + [ + [1, "0x2", "Task-5", "", "", "", "0x0"], + [3, "0x4", "Task-1", "", "", "", "0x0"], + [3, "0x5", "Task-2", "", "main", "Task-1", "0x4"], + [3, "0x6", "Task-3", "", "main", "Task-1", "0x4"], + [3, "0x7", "Task-4", "", "main", "Task-1", "0x4"], + ] + ), + ], + # CASES WITH CYCLES + [ + # this test case contains a cycle: two tasks awaiting each other. + ( + [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="a", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("awaiter2", "", 0)], + task_name=4 + ), + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="b", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("awaiter", "", 0)], + task_name=3 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=0, awaited_by=[]) + ] + ), + ( + [ + [1, "0x2", "Task-1", "", "", "", "0x0"], + [1, "0x3", "a", "", "awaiter2", "b", "0x4"], + [1, "0x3", "a", "", "main", "Task-1", "0x2"], + [1, "0x4", "b", "", "awaiter", "a", "0x3"], + ] + ), + ], + [ + # this test case contains two cycles + ( + [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="A", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_b", "", 0) + ], + task_name=4 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="B", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_c", "", 0) + ], + task_name=5 + ), + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_a", "", 0) + ], + task_name=3 + ) + ] + ), + TaskInfo( + task_id=5, + task_name="C", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0) + ], + task_name=6 + ) + ] + ), + TaskInfo( + task_id=6, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("nested", "", 0), + FrameInfo("nested", "", 0), + FrameInfo("task_b", "", 0) + ], + task_name=4 + ) + ] + ) + ] + ), + AwaitedInfo(thread_id=0, awaited_by=[]) + ] + ), + ( + [ + [1, "0x2", "Task-1", "", "", "", "0x0"], + [ + 1, + "0x3", + "A", + "", + "nested -> nested -> task_b", + "B", + "0x4", + ], + [ + 1, + "0x4", + "B", + "", + "nested -> nested -> task_c", + "C", + "0x5", + ], + [ + 1, + "0x4", + "B", + "", + "nested -> nested -> task_a", + "A", + "0x3", + ], + [ + 1, + "0x5", + "C", + "", + "nested -> nested", + "Task-2", + "0x6", + ], + [ + 1, + "0x6", + "Task-2", + "", + "nested -> nested -> task_b", + "B", + "0x4", + ], + ] + ), + ], +] + + +class TestAsyncioToolsTree(unittest.TestCase): + def test_asyncio_utils(self): + for input_, tree in TEST_INPUTS_TREE: + with self.subTest(input_): + result = tools.build_async_tree(input_) + self.assertEqual(result, tree) + + def test_asyncio_utils_cycles(self): + for input_, cycles in TEST_INPUTS_CYCLES_TREE: + with self.subTest(input_): + try: + tools.build_async_tree(input_) + except tools.CycleFoundException as e: + self.assertEqual(e.cycles, cycles) + + +class TestAsyncioToolsTable(unittest.TestCase): + def test_asyncio_utils(self): + for input_, table in TEST_INPUTS_TABLE: + with self.subTest(input_): + result = tools.build_task_table(input_) + self.assertEqual(result, table) + + +class TestAsyncioToolsBasic(unittest.TestCase): + def test_empty_input_tree(self): + """Test build_async_tree with empty input.""" + result = [] + expected_output = [] + self.assertEqual(tools.build_async_tree(result), expected_output) + + def test_empty_input_table(self): + """Test build_task_table with empty input.""" + result = [] + expected_output = [] + self.assertEqual(tools.build_task_table(result), expected_output) + + def test_only_independent_tasks_tree(self): + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=10, + task_name="taskA", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=11, + task_name="taskB", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + expected = [["└── (T) taskA"], ["└── (T) taskB"]] + result = tools.build_async_tree(input_) + self.assertEqual(sorted(result), sorted(expected)) + + def test_only_independent_tasks_table(self): + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=10, + task_name="taskA", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=11, + task_name="taskB", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + self.assertEqual( + tools.build_task_table(input_), + [[1, '0xa', 'taskA', '', '', '', '0x0'], [1, '0xb', 'taskB', '', '', '', '0x0']] + ) + + def test_single_task_tree(self): + """Test build_async_tree with a single task and no awaits.""" + result = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + expected_output = [ + [ + "└── (T) Task-1", + ] + ] + self.assertEqual(tools.build_async_tree(result), expected_output) + + def test_single_task_table(self): + """Test build_task_table with a single task and no awaits.""" + result = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + expected_output = [[1, '0x2', 'Task-1', '', '', '', '0x0']] + self.assertEqual(tools.build_task_table(result), expected_output) + + def test_cycle_detection(self): + """Test build_async_tree raises CycleFoundException for cyclic input.""" + result = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=3 + ) + ] + ), + TaskInfo( + task_id=3, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=2 + ) + ] + ) + ] + ) + ] + with self.assertRaises(tools.CycleFoundException) as context: + tools.build_async_tree(result) + self.assertEqual(context.exception.cycles, [[3, 2, 3]]) + + def test_complex_tree(self): + """Test build_async_tree with a more complex tree structure.""" + result = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=3 + ) + ] + ) + ] + ) + ] + expected_output = [ + [ + "└── (T) Task-1", + " └── main", + " └── (T) Task-2", + " └── main", + " └── (T) Task-3", + ] + ] + self.assertEqual(tools.build_async_tree(result), expected_output) + + def test_complex_table(self): + """Test build_task_table with a more complex tree structure.""" + result = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=2, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=4, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("main", "", 0)], + task_name=3 + ) + ] + ) + ] + ) + ] + expected_output = [ + [1, '0x2', 'Task-1', '', '', '', '0x0'], + [1, '0x3', 'Task-2', '', 'main', 'Task-1', '0x2'], + [1, '0x4', 'Task-3', '', 'main', 'Task-2', '0x3'] + ] + self.assertEqual(tools.build_task_table(result), expected_output) + + def test_deep_coroutine_chain(self): + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=10, + task_name="leaf", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("c1", "", 0), + FrameInfo("c2", "", 0), + FrameInfo("c3", "", 0), + FrameInfo("c4", "", 0), + FrameInfo("c5", "", 0) + ], + task_name=11 + ) + ] + ), + TaskInfo( + task_id=11, + task_name="root", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + expected = [ + [ + "└── (T) root", + " └── c5", + " └── c4", + " └── c3", + " └── c2", + " └── c1", + " └── (T) leaf", + ] + ] + result = tools.build_async_tree(input_) + self.assertEqual(result, expected) + + def test_multiple_cycles_same_node(self): + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Task-A", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("call1", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=2, + task_name="Task-B", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("call2", "", 0)], + task_name=3 + ) + ] + ), + TaskInfo( + task_id=3, + task_name="Task-C", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("call3", "", 0)], + task_name=1 + ), + CoroInfo( + call_stack=[FrameInfo("call4", "", 0)], + task_name=2 + ) + ] + ) + ] + ) + ] + with self.assertRaises(tools.CycleFoundException) as ctx: + tools.build_async_tree(input_) + cycles = ctx.exception.cycles + self.assertTrue(any(set(c) == {1, 2, 3} for c in cycles)) + + def test_table_output_format(self): + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Task-A", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("foo", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=2, + task_name="Task-B", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + table = tools.build_task_table(input_) + for row in table: + self.assertEqual(len(row), 7) + self.assertIsInstance(row[0], int) # thread ID + self.assertTrue( + isinstance(row[1], str) and row[1].startswith("0x") + ) # hex task ID + self.assertIsInstance(row[2], str) # task name + self.assertIsInstance(row[3], str) # coroutine stack + self.assertIsInstance(row[4], str) # coroutine chain + self.assertIsInstance(row[5], str) # awaiter name + self.assertTrue( + isinstance(row[6], str) and row[6].startswith("0x") + ) # hex awaiter ID + + +class TestAsyncioToolsEdgeCases(unittest.TestCase): + + def test_task_awaits_self(self): + """A task directly awaits itself - should raise a cycle.""" + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Self-Awaiter", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("loopback", "", 0)], + task_name=1 + ) + ] + ) + ] + ) + ] + with self.assertRaises(tools.CycleFoundException) as ctx: + tools.build_async_tree(input_) + self.assertIn([1, 1], ctx.exception.cycles) + + def test_task_with_missing_awaiter_id(self): + """Awaiter ID not in task list - should not crash, just show 'Unknown'.""" + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Task-A", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("coro", "", 0)], + task_name=999 + ) + ] + ) + ] + ) + ] + table = tools.build_task_table(input_) + self.assertEqual(len(table), 1) + self.assertEqual(table[0][5], "Unknown") + + def test_duplicate_coroutine_frames(self): + """Same coroutine frame repeated under a parent - should deduplicate.""" + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="Task-1", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("frameA", "", 0)], + task_name=2 + ), + CoroInfo( + call_stack=[FrameInfo("frameA", "", 0)], + task_name=3 + ) + ] + ), + TaskInfo( + task_id=2, + task_name="Task-2", + coroutine_stack=[], + awaited_by=[] + ), + TaskInfo( + task_id=3, + task_name="Task-3", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + tree = tools.build_async_tree(input_) + # Both children should be under the same coroutine node + flat = "\n".join(tree[0]) + self.assertIn("frameA", flat) + self.assertIn("Task-2", flat) + self.assertIn("Task-1", flat) + + flat = "\n".join(tree[1]) + self.assertIn("frameA", flat) + self.assertIn("Task-3", flat) + self.assertIn("Task-1", flat) + + def test_task_with_no_name(self): + """Task with no name in id2name - should still render with fallback.""" + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="root", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[FrameInfo("f1", "", 0)], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=2, + task_name=None, + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + # If name is None, fallback to string should not crash + tree = tools.build_async_tree(input_) + self.assertIn("(T) None", "\n".join(tree[0])) + + def test_tree_rendering_with_custom_emojis(self): + """Pass custom emojis to the tree renderer.""" + input_ = [ + AwaitedInfo( + thread_id=1, + awaited_by=[ + TaskInfo( + task_id=1, + task_name="MainTask", + coroutine_stack=[], + awaited_by=[ + CoroInfo( + call_stack=[ + FrameInfo("f1", "", 0), + FrameInfo("f2", "", 0) + ], + task_name=2 + ) + ] + ), + TaskInfo( + task_id=2, + task_name="SubTask", + coroutine_stack=[], + awaited_by=[] + ) + ] + ) + ] + tree = tools.build_async_tree(input_, task_emoji="🧵", cor_emoji="🔁") + flat = "\n".join(tree[0]) + self.assertIn("🧵 MainTask", flat) + self.assertIn("🔁 f1", flat) + self.assertIn("🔁 f2", flat) + self.assertIn("🧵 SubTask", flat) diff --git a/Lib/test/test_asyncio/test_transports.py b/Lib/test/test_asyncio/test_transports.py index bbdb218efaa..dbb572e2e15 100644 --- a/Lib/test/test_asyncio/test_transports.py +++ b/Lib/test/test_asyncio/test_transports.py @@ -10,7 +10,7 @@ def tearDownModule(): # not needed for the test file but added for uniformness with all other # asyncio test files for the sake of unified cleanup - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class TransportTests(unittest.TestCase): diff --git a/Lib/test/test_asyncio/test_unix_events.py b/Lib/test/test_asyncio/test_unix_events.py index 0e5488da272..520f5c733c3 100644 --- a/Lib/test/test_asyncio/test_unix_events.py +++ b/Lib/test/test_asyncio/test_unix_events.py @@ -10,11 +10,9 @@ import socket import stat import sys -import threading import time import unittest from unittest import mock -import warnings from test import support from test.support import os_helper @@ -27,13 +25,12 @@ import asyncio -from asyncio import log from asyncio import unix_events from test.test_asyncio import utils as test_utils def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) MOCK_ANY = mock.ANY @@ -1112,697 +1109,6 @@ def test_write_eof_pending(self): self.assertFalse(self.protocol.connection_lost.called) -class AbstractChildWatcherTests(unittest.TestCase): - - def test_warns_on_subclassing(self): - with self.assertWarns(DeprecationWarning): - class MyWatcher(asyncio.AbstractChildWatcher): - pass - - def test_not_implemented(self): - f = mock.Mock() - watcher = asyncio.AbstractChildWatcher() - self.assertRaises( - NotImplementedError, watcher.add_child_handler, f, f) - self.assertRaises( - NotImplementedError, watcher.remove_child_handler, f) - self.assertRaises( - NotImplementedError, watcher.attach_loop, f) - self.assertRaises( - NotImplementedError, watcher.close) - self.assertRaises( - NotImplementedError, watcher.is_active) - self.assertRaises( - NotImplementedError, watcher.__enter__) - self.assertRaises( - NotImplementedError, watcher.__exit__, f, f, f) - - -class BaseChildWatcherTests(unittest.TestCase): - - def test_not_implemented(self): - f = mock.Mock() - watcher = unix_events.BaseChildWatcher() - self.assertRaises( - NotImplementedError, watcher._do_waitpid, f) - - -class ChildWatcherTestsMixin: - - ignore_warnings = mock.patch.object(log.logger, "warning") - - def setUp(self): - super().setUp() - self.loop = self.new_test_loop() - self.running = False - self.zombies = {} - - with mock.patch.object( - self.loop, "add_signal_handler") as self.m_add_signal_handler: - self.watcher = self.create_watcher() - self.watcher.attach_loop(self.loop) - - def waitpid(self, pid, flags): - if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1: - self.assertGreater(pid, 0) - try: - if pid < 0: - return self.zombies.popitem() - else: - return pid, self.zombies.pop(pid) - except KeyError: - pass - if self.running: - return 0, 0 - else: - raise ChildProcessError() - - def add_zombie(self, pid, status): - self.zombies[pid] = status - - def waitstatus_to_exitcode(self, status): - if status > 32768: - return status - 32768 - elif 32700 < status < 32768: - return status - 32768 - else: - return status - - def test_create_watcher(self): - self.m_add_signal_handler.assert_called_once_with( - signal.SIGCHLD, self.watcher._sig_chld) - - def waitpid_mocks(func): - def wrapped_func(self): - def patch(target, wrapper): - return mock.patch(target, wraps=wrapper, - new_callable=mock.Mock) - - with patch('asyncio.unix_events.waitstatus_to_exitcode', self.waitstatus_to_exitcode), \ - patch('os.waitpid', self.waitpid) as m_waitpid: - func(self, m_waitpid) - return wrapped_func - - @waitpid_mocks - def test_sigchld(self, m_waitpid): - # register a child - callback = mock.Mock() - - with self.watcher: - self.running = True - self.watcher.add_child_handler(42, callback, 9, 10, 14) - - self.assertFalse(callback.called) - - # child is running - self.watcher._sig_chld() - - self.assertFalse(callback.called) - - # child terminates (returncode 12) - self.running = False - self.add_zombie(42, EXITCODE(12)) - self.watcher._sig_chld() - - callback.assert_called_once_with(42, 12, 9, 10, 14) - - callback.reset_mock() - - # ensure that the child is effectively reaped - self.add_zombie(42, EXITCODE(13)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback.called) - - # sigchld called again - self.zombies.clear() - self.watcher._sig_chld() - - self.assertFalse(callback.called) - - @waitpid_mocks - def test_sigchld_two_children(self, m_waitpid): - callback1 = mock.Mock() - callback2 = mock.Mock() - - # register child 1 - with self.watcher: - self.running = True - self.watcher.add_child_handler(43, callback1, 7, 8) - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # register child 2 - with self.watcher: - self.watcher.add_child_handler(44, callback2, 147, 18) - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # children are running - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # child 1 terminates (signal 3) - self.add_zombie(43, SIGNAL(3)) - self.watcher._sig_chld() - - callback1.assert_called_once_with(43, -3, 7, 8) - self.assertFalse(callback2.called) - - callback1.reset_mock() - - # child 2 still running - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # child 2 terminates (code 108) - self.add_zombie(44, EXITCODE(108)) - self.running = False - self.watcher._sig_chld() - - callback2.assert_called_once_with(44, 108, 147, 18) - self.assertFalse(callback1.called) - - callback2.reset_mock() - - # ensure that the children are effectively reaped - self.add_zombie(43, EXITCODE(14)) - self.add_zombie(44, EXITCODE(15)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # sigchld called again - self.zombies.clear() - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - @waitpid_mocks - def test_sigchld_two_children_terminating_together(self, m_waitpid): - callback1 = mock.Mock() - callback2 = mock.Mock() - - # register child 1 - with self.watcher: - self.running = True - self.watcher.add_child_handler(45, callback1, 17, 8) - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # register child 2 - with self.watcher: - self.watcher.add_child_handler(46, callback2, 1147, 18) - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # children are running - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # child 1 terminates (code 78) - # child 2 terminates (signal 5) - self.add_zombie(45, EXITCODE(78)) - self.add_zombie(46, SIGNAL(5)) - self.running = False - self.watcher._sig_chld() - - callback1.assert_called_once_with(45, 78, 17, 8) - callback2.assert_called_once_with(46, -5, 1147, 18) - - callback1.reset_mock() - callback2.reset_mock() - - # ensure that the children are effectively reaped - self.add_zombie(45, EXITCODE(14)) - self.add_zombie(46, EXITCODE(15)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - @waitpid_mocks - def test_sigchld_race_condition(self, m_waitpid): - # register a child - callback = mock.Mock() - - with self.watcher: - # child terminates before being registered - self.add_zombie(50, EXITCODE(4)) - self.watcher._sig_chld() - - self.watcher.add_child_handler(50, callback, 1, 12) - - callback.assert_called_once_with(50, 4, 1, 12) - callback.reset_mock() - - # ensure that the child is effectively reaped - self.add_zombie(50, SIGNAL(1)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback.called) - - @waitpid_mocks - def test_sigchld_replace_handler(self, m_waitpid): - callback1 = mock.Mock() - callback2 = mock.Mock() - - # register a child - with self.watcher: - self.running = True - self.watcher.add_child_handler(51, callback1, 19) - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # register the same child again - with self.watcher: - self.watcher.add_child_handler(51, callback2, 21) - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - # child terminates (signal 8) - self.running = False - self.add_zombie(51, SIGNAL(8)) - self.watcher._sig_chld() - - callback2.assert_called_once_with(51, -8, 21) - self.assertFalse(callback1.called) - - callback2.reset_mock() - - # ensure that the child is effectively reaped - self.add_zombie(51, EXITCODE(13)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - - @waitpid_mocks - def test_sigchld_remove_handler(self, m_waitpid): - callback = mock.Mock() - - # register a child - with self.watcher: - self.running = True - self.watcher.add_child_handler(52, callback, 1984) - - self.assertFalse(callback.called) - - # unregister the child - self.watcher.remove_child_handler(52) - - self.assertFalse(callback.called) - - # child terminates (code 99) - self.running = False - self.add_zombie(52, EXITCODE(99)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback.called) - - @waitpid_mocks - def test_sigchld_unknown_status(self, m_waitpid): - callback = mock.Mock() - - # register a child - with self.watcher: - self.running = True - self.watcher.add_child_handler(53, callback, -19) - - self.assertFalse(callback.called) - - # terminate with unknown status - self.zombies[53] = 1178 - self.running = False - self.watcher._sig_chld() - - callback.assert_called_once_with(53, 1178, -19) - - callback.reset_mock() - - # ensure that the child is effectively reaped - self.add_zombie(53, EXITCODE(101)) - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback.called) - - @waitpid_mocks - def test_remove_child_handler(self, m_waitpid): - callback1 = mock.Mock() - callback2 = mock.Mock() - callback3 = mock.Mock() - - # register children - with self.watcher: - self.running = True - self.watcher.add_child_handler(54, callback1, 1) - self.watcher.add_child_handler(55, callback2, 2) - self.watcher.add_child_handler(56, callback3, 3) - - # remove child handler 1 - self.assertTrue(self.watcher.remove_child_handler(54)) - - # remove child handler 2 multiple times - self.assertTrue(self.watcher.remove_child_handler(55)) - self.assertFalse(self.watcher.remove_child_handler(55)) - self.assertFalse(self.watcher.remove_child_handler(55)) - - # all children terminate - self.add_zombie(54, EXITCODE(0)) - self.add_zombie(55, EXITCODE(1)) - self.add_zombie(56, EXITCODE(2)) - self.running = False - with self.ignore_warnings: - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - callback3.assert_called_once_with(56, 2, 3) - - @waitpid_mocks - def test_sigchld_unhandled_exception(self, m_waitpid): - callback = mock.Mock() - - # register a child - with self.watcher: - self.running = True - self.watcher.add_child_handler(57, callback) - - # raise an exception - m_waitpid.side_effect = ValueError - - with mock.patch.object(log.logger, - 'error') as m_error: - - self.assertEqual(self.watcher._sig_chld(), None) - self.assertTrue(m_error.called) - - @waitpid_mocks - def test_sigchld_child_reaped_elsewhere(self, m_waitpid): - # register a child - callback = mock.Mock() - - with self.watcher: - self.running = True - self.watcher.add_child_handler(58, callback) - - self.assertFalse(callback.called) - - # child terminates - self.running = False - self.add_zombie(58, EXITCODE(4)) - - # waitpid is called elsewhere - os.waitpid(58, os.WNOHANG) - - m_waitpid.reset_mock() - - # sigchld - with self.ignore_warnings: - self.watcher._sig_chld() - - if isinstance(self.watcher, asyncio.FastChildWatcher): - # here the FastChildWatcher enters a deadlock - # (there is no way to prevent it) - self.assertFalse(callback.called) - else: - callback.assert_called_once_with(58, 255) - - @waitpid_mocks - def test_sigchld_unknown_pid_during_registration(self, m_waitpid): - # register two children - callback1 = mock.Mock() - callback2 = mock.Mock() - - with self.ignore_warnings, self.watcher: - self.running = True - # child 1 terminates - self.add_zombie(591, EXITCODE(7)) - # an unknown child terminates - self.add_zombie(593, EXITCODE(17)) - - self.watcher._sig_chld() - - self.watcher.add_child_handler(591, callback1) - self.watcher.add_child_handler(592, callback2) - - callback1.assert_called_once_with(591, 7) - self.assertFalse(callback2.called) - - @waitpid_mocks - def test_set_loop(self, m_waitpid): - # register a child - callback = mock.Mock() - - with self.watcher: - self.running = True - self.watcher.add_child_handler(60, callback) - - # attach a new loop - old_loop = self.loop - self.loop = self.new_test_loop() - patch = mock.patch.object - - with patch(old_loop, "remove_signal_handler") as m_old_remove, \ - patch(self.loop, "add_signal_handler") as m_new_add: - - self.watcher.attach_loop(self.loop) - - m_old_remove.assert_called_once_with( - signal.SIGCHLD) - m_new_add.assert_called_once_with( - signal.SIGCHLD, self.watcher._sig_chld) - - # child terminates - self.running = False - self.add_zombie(60, EXITCODE(9)) - self.watcher._sig_chld() - - callback.assert_called_once_with(60, 9) - - @waitpid_mocks - def test_set_loop_race_condition(self, m_waitpid): - # register 3 children - callback1 = mock.Mock() - callback2 = mock.Mock() - callback3 = mock.Mock() - - with self.watcher: - self.running = True - self.watcher.add_child_handler(61, callback1) - self.watcher.add_child_handler(62, callback2) - self.watcher.add_child_handler(622, callback3) - - # detach the loop - old_loop = self.loop - self.loop = None - - with mock.patch.object( - old_loop, "remove_signal_handler") as m_remove_signal_handler: - - with self.assertWarnsRegex( - RuntimeWarning, 'A loop is being detached'): - self.watcher.attach_loop(None) - - m_remove_signal_handler.assert_called_once_with( - signal.SIGCHLD) - - # child 1 & 2 terminate - self.add_zombie(61, EXITCODE(11)) - self.add_zombie(62, SIGNAL(5)) - - # SIGCHLD was not caught - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - self.assertFalse(callback3.called) - - # attach a new loop - self.loop = self.new_test_loop() - - with mock.patch.object( - self.loop, "add_signal_handler") as m_add_signal_handler: - - self.watcher.attach_loop(self.loop) - - m_add_signal_handler.assert_called_once_with( - signal.SIGCHLD, self.watcher._sig_chld) - callback1.assert_called_once_with(61, 11) # race condition! - callback2.assert_called_once_with(62, -5) # race condition! - self.assertFalse(callback3.called) - - callback1.reset_mock() - callback2.reset_mock() - - # child 3 terminates - self.running = False - self.add_zombie(622, EXITCODE(19)) - self.watcher._sig_chld() - - self.assertFalse(callback1.called) - self.assertFalse(callback2.called) - callback3.assert_called_once_with(622, 19) - - @waitpid_mocks - def test_close(self, m_waitpid): - # register two children - callback1 = mock.Mock() - - with self.watcher: - self.running = True - # child 1 terminates - self.add_zombie(63, EXITCODE(9)) - # other child terminates - self.add_zombie(65, EXITCODE(18)) - self.watcher._sig_chld() - - self.watcher.add_child_handler(63, callback1) - self.watcher.add_child_handler(64, callback1) - - self.assertEqual(len(self.watcher._callbacks), 1) - if isinstance(self.watcher, asyncio.FastChildWatcher): - self.assertEqual(len(self.watcher._zombies), 1) - - with mock.patch.object( - self.loop, - "remove_signal_handler") as m_remove_signal_handler: - - self.watcher.close() - - m_remove_signal_handler.assert_called_once_with( - signal.SIGCHLD) - self.assertFalse(self.watcher._callbacks) - if isinstance(self.watcher, asyncio.FastChildWatcher): - self.assertFalse(self.watcher._zombies) - - -class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): - def create_watcher(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - return asyncio.SafeChildWatcher() - - -class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): - def create_watcher(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - return asyncio.FastChildWatcher() - - -class PolicyTests(unittest.TestCase): - - def create_policy(self): - return asyncio.DefaultEventLoopPolicy() - - @mock.patch('asyncio.unix_events.can_use_pidfd') - def test_get_default_child_watcher(self, m_can_use_pidfd): - m_can_use_pidfd.return_value = False - policy = self.create_policy() - self.assertIsNone(policy._watcher) - with self.assertWarns(DeprecationWarning): - watcher = policy.get_child_watcher() - self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher) - - self.assertIs(policy._watcher, watcher) - with self.assertWarns(DeprecationWarning): - self.assertIs(watcher, policy.get_child_watcher()) - - m_can_use_pidfd.return_value = True - policy = self.create_policy() - self.assertIsNone(policy._watcher) - with self.assertWarns(DeprecationWarning): - watcher = policy.get_child_watcher() - self.assertIsInstance(watcher, asyncio.PidfdChildWatcher) - - self.assertIs(policy._watcher, watcher) - with self.assertWarns(DeprecationWarning): - self.assertIs(watcher, policy.get_child_watcher()) - - def test_get_child_watcher_after_set(self): - policy = self.create_policy() - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - watcher = asyncio.FastChildWatcher() - policy.set_child_watcher(watcher) - - self.assertIs(policy._watcher, watcher) - with self.assertWarns(DeprecationWarning): - self.assertIs(watcher, policy.get_child_watcher()) - - def test_get_child_watcher_thread(self): - - def f(): - policy.set_event_loop(policy.new_event_loop()) - - self.assertIsInstance(policy.get_event_loop(), - asyncio.AbstractEventLoop) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - watcher = policy.get_child_watcher() - - self.assertIsInstance(watcher, asyncio.SafeChildWatcher) - self.assertIsNone(watcher._loop) - - policy.get_event_loop().close() - - policy = self.create_policy() - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - policy.set_child_watcher(asyncio.SafeChildWatcher()) - - th = threading.Thread(target=f) - th.start() - th.join() - - def test_child_watcher_replace_mainloop_existing(self): - policy = self.create_policy() - loop = policy.new_event_loop() - policy.set_event_loop(loop) - - # Explicitly setup SafeChildWatcher, - # default ThreadedChildWatcher has no _loop property - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - watcher = asyncio.SafeChildWatcher() - policy.set_child_watcher(watcher) - watcher.attach_loop(loop) - - self.assertIs(watcher._loop, loop) - - new_loop = policy.new_event_loop() - policy.set_event_loop(new_loop) - - self.assertIs(watcher._loop, new_loop) - - policy.set_event_loop(None) - - self.assertIs(watcher._loop, None) - - loop.close() - new_loop.close() - - class TestFunctional(unittest.TestCase): def setUp(self): @@ -1874,11 +1180,46 @@ async def runner(): @support.requires_fork() -class TestFork(unittest.IsolatedAsyncioTestCase): +class TestFork(unittest.TestCase): + + def test_fork_not_share_current_task(self): + loop = object() + task = object() + asyncio._set_running_loop(loop) + self.addCleanup(asyncio._set_running_loop, None) + asyncio.tasks._enter_task(loop, task) + self.addCleanup(asyncio.tasks._leave_task, loop, task) + self.assertIs(asyncio.current_task(), task) + r, w = os.pipe() + self.addCleanup(os.close, r) + self.addCleanup(os.close, w) + pid = os.fork() + if pid == 0: + # child + try: + asyncio._set_running_loop(loop) + current_task = asyncio.current_task() + if current_task is None: + os.write(w, b'NO TASK') + else: + os.write(w, b'TASK:' + str(id(current_task)).encode()) + except BaseException as e: + os.write(w, b'ERROR:' + ascii(e).encode()) + finally: + asyncio._set_running_loop(None) + os._exit(0) + else: + # parent + result = os.read(r, 100) + self.assertEqual(result, b'NO TASK') + wait_process(pid, exitcode=0) - async def test_fork_not_share_event_loop(self): + def test_fork_not_share_event_loop(self): # The forked process should not share the event loop with the parent - loop = asyncio.get_running_loop() + loop = object() + asyncio._set_running_loop(loop) + self.assertIs(asyncio.get_running_loop(), loop) + self.addCleanup(asyncio._set_running_loop, None) r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) @@ -1886,8 +1227,7 @@ async def test_fork_not_share_event_loop(self): if pid == 0: # child try: - with self.assertWarns(DeprecationWarning): - loop = asyncio.get_event_loop_policy().get_event_loop() + loop = asyncio.get_event_loop() os.write(w, b'LOOP:' + str(id(loop)).encode()) except RuntimeError: os.write(w, b'NO LOOP') @@ -1898,8 +1238,7 @@ async def test_fork_not_share_event_loop(self): else: # parent result = os.read(r, 100) - self.assertEqual(result[:5], b'LOOP:', result) - self.assertNotEqual(int(result[5:]), id(loop)) + self.assertEqual(result, b'NO LOOP') wait_process(pid, exitcode=0) @hashlib_helper.requires_hashdigest('md5') diff --git a/Lib/test/test_asyncio/test_waitfor.py b/Lib/test/test_asyncio/test_waitfor.py index 11a8eeeab37..dedc6bf69d7 100644 --- a/Lib/test/test_asyncio/test_waitfor.py +++ b/Lib/test/test_asyncio/test_waitfor.py @@ -5,7 +5,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) # The following value can be used as a very small timeout: diff --git a/Lib/test/test_asyncio/test_windows_events.py b/Lib/test/test_asyncio/test_windows_events.py index 0c128c599ba..0af3368627a 100644 --- a/Lib/test/test_asyncio/test_windows_events.py +++ b/Lib/test/test_asyncio/test_windows_events.py @@ -19,7 +19,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class UpperProto(asyncio.Protocol): @@ -328,17 +328,18 @@ class WinPolicyTests(WindowsEventsTestCase): def test_selector_win_policy(self): async def main(): - self.assertIsInstance( - asyncio.get_running_loop(), - asyncio.SelectorEventLoop) + self.assertIsInstance(asyncio.get_running_loop(), asyncio.SelectorEventLoop) - old_policy = asyncio.get_event_loop_policy() + old_policy = asyncio.events._get_event_loop_policy() try: - asyncio.set_event_loop_policy( - asyncio.WindowsSelectorEventLoopPolicy()) + with self.assertWarnsRegex( + DeprecationWarning, + "'asyncio.WindowsSelectorEventLoopPolicy' is deprecated", + ): + asyncio.events._set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.run(main()) finally: - asyncio.set_event_loop_policy(old_policy) + asyncio.events._set_event_loop_policy(old_policy) def test_proactor_win_policy(self): async def main(): @@ -346,13 +347,16 @@ async def main(): asyncio.get_running_loop(), asyncio.ProactorEventLoop) - old_policy = asyncio.get_event_loop_policy() + old_policy = asyncio.events._get_event_loop_policy() try: - asyncio.set_event_loop_policy( - asyncio.WindowsProactorEventLoopPolicy()) + with self.assertWarnsRegex( + DeprecationWarning, + "'asyncio.WindowsProactorEventLoopPolicy' is deprecated", + ): + asyncio.events._set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) asyncio.run(main()) finally: - asyncio.set_event_loop_policy(old_policy) + asyncio.events._set_event_loop_policy(old_policy) if __name__ == '__main__': diff --git a/Lib/test/test_asyncio/test_windows_utils.py b/Lib/test/test_asyncio/test_windows_utils.py index eafa5be3829..97f078ff911 100644 --- a/Lib/test/test_asyncio/test_windows_utils.py +++ b/Lib/test/test_asyncio/test_windows_utils.py @@ -16,7 +16,7 @@ def tearDownModule(): - asyncio.set_event_loop_policy(None) + asyncio.events._set_event_loop_policy(None) class PipeTests(unittest.TestCase): @@ -121,8 +121,8 @@ def test_popen(self): self.assertGreater(len(out), 0) self.assertGreater(len(err), 0) # allow for partial reads... - self.assertTrue(msg.upper().rstrip().startswith(out)) - self.assertTrue(b"stderr".startswith(err)) + self.assertStartsWith(msg.upper().rstrip(), out) + self.assertStartsWith(b"stderr", err) # The context manager calls wait() and closes resources with p: diff --git a/Lib/test/test_asyncio/utils.py b/Lib/test/test_asyncio/utils.py index d9a2939be13..a480e16e81b 100644 --- a/Lib/test/test_asyncio/utils.py +++ b/Lib/test/test_asyncio/utils.py @@ -14,7 +14,7 @@ import threading import unittest import weakref -import warnings +from ast import literal_eval from unittest import mock from http.server import HTTPServer @@ -55,24 +55,8 @@ def data_file(*filename): ONLYKEY = data_file('certdata', 'ssl_key.pem') SIGNED_CERTFILE = data_file('certdata', 'keycert3.pem') SIGNING_CA = data_file('certdata', 'pycacert.pem') -PEERCERT = { - 'OCSP': ('http://testca.pythontest.net/testca/ocsp/',), - 'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',), - 'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',), - 'issuer': ((('countryName', 'XY'),), - (('organizationName', 'Python Software Foundation CA'),), - (('commonName', 'our-ca-server'),)), - 'notAfter': 'Oct 28 14:23:16 2037 GMT', - 'notBefore': 'Aug 29 14:23:16 2018 GMT', - 'serialNumber': 'CB2D80995A69525C', - 'subject': ((('countryName', 'XY'),), - (('localityName', 'Castle Anthrax'),), - (('organizationName', 'Python Software Foundation'),), - (('commonName', 'localhost'),)), - 'subjectAltName': (('DNS', 'localhost'),), - 'version': 3 -} - +with open(data_file('certdata', 'keycert3.pem.reference')) as file: + PEERCERT = literal_eval(file.read()) def simple_server_sslcontext(): server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) @@ -551,25 +535,6 @@ def close_loop(loop): loop._default_executor.shutdown(wait=True) loop.close() - policy = support.maybe_get_event_loop_policy() - if policy is not None: - try: - with warnings.catch_warnings(): - warnings.simplefilter('ignore', DeprecationWarning) - watcher = policy.get_child_watcher() - except NotImplementedError: - # watcher is not implemented by EventLoopPolicy, e.g. Windows - pass - else: - if isinstance(watcher, asyncio.ThreadedChildWatcher): - # Wait for subprocess to finish, but not forever - for thread in list(watcher._threads.values()): - thread.join(timeout=support.SHORT_TIMEOUT) - if thread.is_alive(): - raise RuntimeError(f"thread {thread} still alive: " - "subprocess still running") - - def set_event_loop(self, loop, *, cleanup=True): if loop is None: raise AssertionError('loop is None') @@ -636,3 +601,9 @@ def func(): await asyncio.sleep(0) if exc is not None: raise exc + + +if sys.platform == 'win32': + DefaultEventLoopPolicy = asyncio.windows_events._DefaultEventLoopPolicy +else: + DefaultEventLoopPolicy = asyncio.unix_events._DefaultEventLoopPolicy diff --git a/Lib/test/test_copy.py b/Lib/test/test_copy.py index 3dec64cc9a2..64b93fa4b5f 100644 --- a/Lib/test/test_copy.py +++ b/Lib/test/test_copy.py @@ -837,12 +837,15 @@ class C(object): v[x] = y self.assertNotIn(x, u) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_copy_weakkeydict(self): self._check_copy_weakdict(weakref.WeakKeyDictionary) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_copy_weakvaluedict(self): self._check_copy_weakdict(weakref.WeakValueDictionary) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_deepcopy_weakkeydict(self): class C(object): def __init__(self, i): @@ -863,6 +866,7 @@ def __init__(self, i): support.gc_collect() # For PyPy or other GCs. self.assertEqual(len(v), 1) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_deepcopy_weakvaluedict(self): class C(object): def __init__(self, i): diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py index bfecca43909..6777855e441 100644 --- a/Lib/test/test_weakref.py +++ b/Lib/test/test_weakref.py @@ -1373,6 +1373,7 @@ def test_weak_keyed_len_race(self): def test_weak_valued_len_race(self): self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k)) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_weak_values(self): # # This exercises d.copy(), d.items(), d[], del d[], len(d). @@ -1405,6 +1406,7 @@ def test_weak_values(self): gc_collect() # For PyPy or other GCs. self.assertRaises(KeyError, dict.__getitem__, 2) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_weak_keys(self): # # This exercises d.copy(), d.items(), d[] = v, d[], del d[], @@ -1770,6 +1772,7 @@ def test_weak_valued_dict_update(self): self.assertEqual(list(d.keys()), [kw]) self.assertEqual(d[kw], o) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_weak_valued_union_operators(self): a = C() b = C() @@ -1822,6 +1825,7 @@ def test_weak_keyed_delitem(self): self.assertEqual(len(d), 1) self.assertEqual(list(d.keys()), [o2]) + @unittest.expectedFailure # TODO: RUSTPYTHON; weakref callback not fired immediately by gc_collect def test_weak_keyed_union_operators(self): o1 = C() o2 = C() diff --git a/crates/stdlib/src/_asyncio.rs b/crates/stdlib/src/_asyncio.rs index 7e8ae5292e2..6e7a8c6e0e5 100644 --- a/crates/stdlib/src/_asyncio.rs +++ b/crates/stdlib/src/_asyncio.rs @@ -17,6 +17,7 @@ pub(crate) mod _asyncio { extend_module, function::{FuncArgs, KwArgs, OptionalArg, OptionalOption, PySetterValue}, protocol::PyIterReturn, + recursion::ReprGuard, types::{ Callable, Constructor, Destructor, Initializer, IterNext, Iterable, Representable, SelfIter, @@ -45,6 +46,15 @@ pub(crate) mod _asyncio { "_current_tasks" => current_tasks, }); + // Register fork handler to clear task state in child process + #[cfg(unix)] + { + let on_fork = vm + .get_attribute_opt(module.to_owned().into(), vm.ctx.intern_str("_on_fork"))? + .expect("_on_fork not found in _asyncio module"); + vm.state.after_forkers_child.lock().push(on_fork); + } + Ok(()) } @@ -830,9 +840,13 @@ pub(crate) mod _asyncio { impl Representable for PyFuture { fn repr_str(zelf: &Py, vm: &VirtualMachine) -> PyResult { - let info = get_future_repr_info(zelf.as_object(), vm)?; let class_name = zelf.class().name().to_string(); - Ok(format!("<{} {}>", class_name, info)) + if let Some(_guard) = ReprGuard::enter(vm, zelf.as_object()) { + let info = get_future_repr_info(zelf.as_object(), vm)?; + Ok(format!("<{} {}>", class_name, info)) + } else { + Ok(format!("<{} ...>", class_name)) + } } } @@ -1915,34 +1929,38 @@ pub(crate) mod _asyncio { fn repr_str(zelf: &Py, vm: &VirtualMachine) -> PyResult { let class_name = zelf.class().name().to_string(); - // Try to use _task_repr_info if available - if let Ok(info) = get_task_repr_info(zelf.as_object(), vm) - && info != "state=unknown" - { - return Ok(format!("<{} {}>", class_name, info)); - } - - // Fallback: build repr from task properties directly - let state = zelf.base.fut_state.load().as_str().to_lowercase(); - let name = zelf - .task_name - .read() - .as_ref() - .and_then(|n| n.str(vm).ok()) - .map(|s| s.as_str().to_string()) - .unwrap_or_else(|| "?".to_string()); - let coro_repr = zelf - .task_coro - .read() - .as_ref() - .and_then(|c| c.repr(vm).ok()) - .map(|s| s.as_str().to_string()) - .unwrap_or_else(|| "?".to_string()); + if let Some(_guard) = ReprGuard::enter(vm, zelf.as_object()) { + // Try to use _task_repr_info if available + if let Ok(info) = get_task_repr_info(zelf.as_object(), vm) + && info != "state=unknown" + { + return Ok(format!("<{} {}>", class_name, info)); + } - Ok(format!( - "<{} {} name='{}' coro={}>", - class_name, state, name, coro_repr - )) + // Fallback: build repr from task properties directly + let state = zelf.base.fut_state.load().as_str().to_lowercase(); + let name = zelf + .task_name + .read() + .as_ref() + .and_then(|n| n.str(vm).ok()) + .map(|s| s.as_str().to_string()) + .unwrap_or_else(|| "?".to_string()); + let coro_repr = zelf + .task_coro + .read() + .as_ref() + .and_then(|c| c.repr(vm).ok()) + .map(|s| s.as_str().to_string()) + .unwrap_or_else(|| "?".to_string()); + + Ok(format!( + "<{} {} name='{}' coro={}>", + class_name, state, name, coro_repr + )) + } else { + Ok(format!("<{} ...>", class_name)) + } } } @@ -2203,9 +2221,12 @@ pub(crate) mod _asyncio { *task.task_fut_waiter.write() = Some(result.clone()); let task_obj: PyObjectRef = task.clone().into(); - let wakeup_wrapper = TaskWakeupMethWrapper::new(task_obj).into_ref(&vm.ctx); + let wakeup_wrapper = TaskWakeupMethWrapper::new(task_obj.clone()).into_ref(&vm.ctx); vm.call_method(&result, "add_done_callback", (wakeup_wrapper,))?; + // Track awaited_by relationship for introspection + future_add_to_awaited_by(result.clone(), task_obj, vm)?; + // If task_must_cancel is set, cancel the awaited future immediately // This propagates the cancellation through the future chain if task.task_must_cancel.load(Ordering::Relaxed) { @@ -2295,6 +2316,9 @@ pub(crate) mod _asyncio { .downcast() .map_err(|_| vm.new_type_error("task_wakeup called with non-Task object"))?; + // Remove awaited_by relationship before resuming + future_discard_from_awaited_by(fut.clone(), task.clone(), vm)?; + *task_ref.task_fut_waiter.write() = None; // Call result() on the awaited future to get either result or exception @@ -2376,7 +2400,6 @@ pub(crate) mod _asyncio { Some(l) if !vm.is_none(&l) => l, _ => { // When loop is None or not provided, use the running loop - // and raise an error if there's no running loop match vm.asyncio_running_loop.borrow().clone() { Some(l) => l, None => return Err(vm.new_runtime_error("no running event loop")), @@ -2384,6 +2407,23 @@ pub(crate) mod _asyncio { } }; + // Fast path: if the loop is the current thread's running loop, + // return the per-thread running task directly + let is_current_loop = vm + .asyncio_running_loop + .borrow() + .as_ref() + .is_some_and(|rl| rl.is(&loop_obj)); + + if is_current_loop { + return Ok(vm + .asyncio_running_task + .borrow() + .clone() + .unwrap_or_else(|| vm.ctx.none())); + } + + // Slow path: look up in the module-level dict for cross-thread queries let current_tasks = get_current_tasks_dict(vm)?; let dict: PyDictRef = current_tasks.downcast().unwrap(); @@ -2464,89 +2504,116 @@ pub(crate) mod _asyncio { #[pyfunction] fn _enter_task(loop_: PyObjectRef, task: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { - let current_tasks = get_current_tasks_dict(vm)?; - let dict: PyDictRef = current_tasks.downcast().unwrap(); - - if dict.contains_key(&*loop_, vm) { - return Err( - vm.new_runtime_error("Cannot enter task while another task is already running") - ); + // Per-thread check, matching CPython's ts->asyncio_running_task + { + let running_task = vm.asyncio_running_task.borrow(); + if running_task.is_some() { + return Err(vm.new_runtime_error(format!( + "Cannot enter into task {:?} while another task {:?} is being executed.", + task, + running_task.as_ref().unwrap() + ))); + } } - dict.set_item(&*loop_, task, vm)?; + *vm.asyncio_running_task.borrow_mut() = Some(task.clone()); + + // Also update the module-level dict for cross-thread queries + if let Ok(current_tasks) = get_current_tasks_dict(vm) + && let Ok(dict) = current_tasks.downcast::() + { + let _ = dict.set_item(&*loop_, task, vm); + } Ok(()) } #[pyfunction] fn _leave_task(loop_: PyObjectRef, task: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { - let current_tasks = get_current_tasks_dict(vm)?; - let dict: PyDictRef = current_tasks.downcast().unwrap(); - - let current = dict.get_item(&*loop_, vm).ok(); - match current { - None => { - return Err( - vm.new_runtime_error("_leave_task: task is not the current task".to_owned()) - ); - } - Some(current) if !current.is(&task) => { - return Err( - vm.new_runtime_error("_leave_task: task is not the current task".to_owned()) - ); + // Per-thread check, matching CPython's ts->asyncio_running_task + { + let running_task = vm.asyncio_running_task.borrow(); + match running_task.as_ref() { + None => { + return Err(vm.new_runtime_error( + "_leave_task: task is not the current task".to_owned(), + )); + } + Some(current) if !current.is(&task) => { + return Err(vm.new_runtime_error( + "_leave_task: task is not the current task".to_owned(), + )); + } + _ => {} } - _ => {} } - dict.del_item(&*loop_, vm)?; + *vm.asyncio_running_task.borrow_mut() = None; + + // Also update the module-level dict + if let Ok(current_tasks) = get_current_tasks_dict(vm) + && let Ok(dict) = current_tasks.downcast::() + { + let _ = dict.del_item(&*loop_, vm); + } Ok(()) } #[pyfunction] fn _swap_current_task(loop_: PyObjectRef, task: PyObjectRef, vm: &VirtualMachine) -> PyResult { - let current_tasks = get_current_tasks_dict(vm)?; - let dict: PyDictRef = current_tasks.downcast().unwrap(); - - let prev = match dict.get_item(&*loop_, vm) { - Ok(t) => t, - Err(_) => vm.ctx.none(), - }; + // Per-thread swap, matching CPython's swap_current_task + let prev = vm + .asyncio_running_task + .borrow() + .clone() + .unwrap_or_else(|| vm.ctx.none()); if vm.is_none(&task) { - let _ = dict.del_item(&*loop_, vm); + *vm.asyncio_running_task.borrow_mut() = None; } else { - dict.set_item(&*loop_, task, vm)?; + *vm.asyncio_running_task.borrow_mut() = Some(task.clone()); + } + + // Also update the module-level dict for cross-thread queries + if let Ok(current_tasks) = get_current_tasks_dict(vm) + && let Ok(dict) = current_tasks.downcast::() + { + if vm.is_none(&task) { + let _ = dict.del_item(&*loop_, vm); + } else { + let _ = dict.set_item(&*loop_, task, vm); + } } Ok(prev) } + /// Reset task state after fork in child process. + #[pyfunction] + fn _on_fork(vm: &VirtualMachine) -> PyResult<()> { + // Clear current_tasks dict so child process doesn't inherit parent's tasks + if let Ok(current_tasks) = get_current_tasks_dict(vm) { + vm.call_method(¤t_tasks, "clear", ())?; + } + // Clear the running loop and task + *vm.asyncio_running_loop.borrow_mut() = None; + *vm.asyncio_running_task.borrow_mut() = None; + Ok(()) + } + #[pyfunction] fn future_add_to_awaited_by( fut: PyObjectRef, waiter: PyObjectRef, vm: &VirtualMachine, ) -> PyResult<()> { - // Use optimized method for native Future/Task + // Only operate on native Future/Task objects (including subclasses). + // Non-native objects are silently ignored. if let Some(task) = fut.downcast_ref::() { return task.base.awaited_by_add(waiter, vm); } if let Some(future) = fut.downcast_ref::() { return future.awaited_by_add(waiter, vm); } - - // Fallback for non-native futures (Python subclasses) - if let Ok(Some(awaited_by)) = - vm.get_attribute_opt(fut.clone(), vm.ctx.intern_str("_asyncio_awaited_by")) - && !vm.is_none(&awaited_by) - { - vm.call_method(&awaited_by, "add", (waiter,))?; - return Ok(()); - } - - let new_set = PySet::default().into_ref(&vm.ctx); - new_set.add(waiter, vm)?; - fut.set_attr(vm.ctx.intern_str("_asyncio_awaited_by"), new_set, vm)?; - Ok(()) } @@ -2556,21 +2623,14 @@ pub(crate) mod _asyncio { waiter: PyObjectRef, vm: &VirtualMachine, ) -> PyResult<()> { - // Use optimized method for native Future/Task + // Only operate on native Future/Task objects (including subclasses). + // Non-native objects are silently ignored. if let Some(task) = fut.downcast_ref::() { return task.base.awaited_by_discard(&waiter, vm); } if let Some(future) = fut.downcast_ref::() { return future.awaited_by_discard(&waiter, vm); } - - // Fallback for non-native futures - if let Ok(Some(awaited_by)) = - vm.get_attribute_opt(fut.clone(), vm.ctx.intern_str("_asyncio_awaited_by")) - && !vm.is_none(&awaited_by) - { - vm.call_method(&awaited_by, "discard", (waiter,))?; - } Ok(()) } @@ -2596,6 +2656,14 @@ pub(crate) mod _asyncio { fn __self__(&self, vm: &VirtualMachine) -> PyObjectRef { self.task.read().clone().unwrap_or_else(|| vm.ctx.none()) } + + #[pygetset] + fn __qualname__(&self, vm: &VirtualMachine) -> PyResult> { + match self.task.read().as_ref() { + Some(t) => vm.get_attribute_opt(t.clone(), vm.ctx.intern_str("__qualname__")), + None => Ok(None), + } + } } impl Callable for TaskStepMethWrapper { @@ -2610,12 +2678,12 @@ pub(crate) mod _asyncio { } impl Representable for TaskStepMethWrapper { - fn repr_str(zelf: &Py, vm: &VirtualMachine) -> PyResult { - let task = zelf.task.read().clone(); - match task { - Some(t) => Ok(t.repr(vm)?.as_str().to_string()), - None => Ok("".to_string()), - } + fn repr_str(zelf: &Py, _vm: &VirtualMachine) -> PyResult { + Ok(format!( + "<{} object at {:#x}>", + zelf.class().name(), + zelf.get_id() + )) } } @@ -2634,6 +2702,14 @@ pub(crate) mod _asyncio { task: PyRwLock::new(Some(task)), } } + + #[pygetset] + fn __qualname__(&self, vm: &VirtualMachine) -> PyResult> { + match self.task.read().as_ref() { + Some(t) => vm.get_attribute_opt(t.clone(), vm.ctx.intern_str("__qualname__")), + None => Ok(None), + } + } } impl Callable for TaskWakeupMethWrapper { @@ -2648,12 +2724,12 @@ pub(crate) mod _asyncio { } impl Representable for TaskWakeupMethWrapper { - fn repr_str(zelf: &Py, vm: &VirtualMachine) -> PyResult { - let task = zelf.task.read().clone(); - match task { - Some(t) => Ok(t.repr(vm)?.as_str().to_string()), - None => Ok("".to_string()), - } + fn repr_str(zelf: &Py, _vm: &VirtualMachine) -> PyResult { + Ok(format!( + "<{} object at {:#x}>", + zelf.class().name(), + zelf.get_id() + )) } } diff --git a/crates/stdlib/src/_remote_debugging.rs b/crates/stdlib/src/_remote_debugging.rs new file mode 100644 index 00000000000..57aa9876a01 --- /dev/null +++ b/crates/stdlib/src/_remote_debugging.rs @@ -0,0 +1,107 @@ +pub(crate) use _remote_debugging::module_def; + +#[pymodule] +mod _remote_debugging { + use crate::vm::{ + Py, PyObjectRef, PyResult, VirtualMachine, + builtins::PyType, + function::FuncArgs, + types::{Constructor, PyStructSequence}, + }; + + #[pystruct_sequence_data] + struct FrameInfoData { + filename: String, + lineno: i64, + funcname: String, + } + + #[pyattr] + #[pystruct_sequence( + name = "FrameInfo", + module = "_remote_debugging", + data = "FrameInfoData" + )] + struct FrameInfo; + + #[pyclass(with(PyStructSequence))] + impl FrameInfo {} + + #[pystruct_sequence_data] + struct TaskInfoData { + task_id: PyObjectRef, + task_name: PyObjectRef, + coroutine_stack: PyObjectRef, + awaited_by: PyObjectRef, + } + + #[pyattr] + #[pystruct_sequence(name = "TaskInfo", module = "_remote_debugging", data = "TaskInfoData")] + struct TaskInfo; + + #[pyclass(with(PyStructSequence))] + impl TaskInfo {} + + #[pystruct_sequence_data] + struct CoroInfoData { + call_stack: PyObjectRef, + task_name: PyObjectRef, + } + + #[pyattr] + #[pystruct_sequence(name = "CoroInfo", module = "_remote_debugging", data = "CoroInfoData")] + struct CoroInfo; + + #[pyclass(with(PyStructSequence))] + impl CoroInfo {} + + #[pystruct_sequence_data] + struct ThreadInfoData { + thread_id: PyObjectRef, + frame_info: PyObjectRef, + } + + #[pyattr] + #[pystruct_sequence( + name = "ThreadInfo", + module = "_remote_debugging", + data = "ThreadInfoData" + )] + struct ThreadInfo; + + #[pyclass(with(PyStructSequence))] + impl ThreadInfo {} + + #[pystruct_sequence_data] + struct AwaitedInfoData { + thread_id: PyObjectRef, + awaited_by: PyObjectRef, + } + + #[pyattr] + #[pystruct_sequence( + name = "AwaitedInfo", + module = "_remote_debugging", + data = "AwaitedInfoData" + )] + struct AwaitedInfo; + + #[pyclass(with(PyStructSequence))] + impl AwaitedInfo {} + + #[pyattr] + #[pyclass(name = "RemoteUnwinder", module = "_remote_debugging")] + #[derive(Debug, PyPayload)] + struct RemoteUnwinder {} + + impl Constructor for RemoteUnwinder { + type Args = FuncArgs; + + fn py_new(_cls: &Py, _args: Self::Args, vm: &VirtualMachine) -> PyResult { + Err(vm.new_not_implemented_error("_remote_debugging is not available".to_owned())) + } + } + + #[pyclass(with(Constructor))] + impl RemoteUnwinder {} +} diff --git a/crates/stdlib/src/lib.rs b/crates/stdlib/src/lib.rs index e61ae918a78..64cf960f182 100644 --- a/crates/stdlib/src/lib.rs +++ b/crates/stdlib/src/lib.rs @@ -12,6 +12,7 @@ extern crate alloc; pub(crate) mod macros; mod _asyncio; +mod _remote_debugging; pub mod array; mod binascii; mod bisect; @@ -115,6 +116,7 @@ pub fn stdlib_module_defs(ctx: &Context) -> Vec<&'static builtins::PyModuleDef> vec![ _asyncio::module_def(ctx), _opcode::module_def(ctx), + _remote_debugging::module_def(ctx), array::module_def(ctx), binascii::module_def(ctx), bisect::module_def(ctx), diff --git a/crates/vm/src/builtins/asyncgenerator.rs b/crates/vm/src/builtins/asyncgenerator.rs index 59f94ffb312..9903c9280ff 100644 --- a/crates/vm/src/builtins/asyncgenerator.rs +++ b/crates/vm/src/builtins/asyncgenerator.rs @@ -614,7 +614,7 @@ impl PyAnextAwaitable { await_method?.call((), vm)? } else { return Err(vm.new_type_error(format!( - "object {} can't be used in 'await' expression", + "'{}' object can't be awaited", wrapped.class().name() ))); } @@ -624,7 +624,7 @@ impl PyAnextAwaitable { await_method?.call((), vm)? } else { return Err(vm.new_type_error(format!( - "object {} can't be used in 'await' expression", + "'{}' object can't be awaited", wrapped.class().name() ))); } diff --git a/crates/vm/src/builtins/frame.rs b/crates/vm/src/builtins/frame.rs index 28c4e751476..a6cd7514c4e 100644 --- a/crates/vm/src/builtins/frame.rs +++ b/crates/vm/src/builtins/frame.rs @@ -119,6 +119,11 @@ impl Frame { #[pyclass] impl Py { + #[pygetset] + fn f_generator(&self) -> Option { + self.generator.lock().clone() + } + #[pygetset] pub fn f_back(&self, vm: &VirtualMachine) -> Option> { // TODO: actually store f_back inside Frame struct diff --git a/crates/vm/src/builtins/function.rs b/crates/vm/src/builtins/function.rs index 7eb74dec41e..14c393f0b89 100644 --- a/crates/vm/src/builtins/function.rs +++ b/crates/vm/src/builtins/function.rs @@ -529,13 +529,22 @@ impl Py { let is_coro = code.flags.contains(bytecode::CodeFlags::COROUTINE); match (is_gen, is_coro) { (true, false) => { - Ok(PyGenerator::new(frame, self.__name__(), self.__qualname__()).into_pyobject(vm)) + let obj = PyGenerator::new(frame.clone(), self.__name__(), self.__qualname__()) + .into_pyobject(vm); + frame.set_generator(obj.clone()); + Ok(obj) } (false, true) => { - Ok(PyCoroutine::new(frame, self.__name__(), self.__qualname__()).into_pyobject(vm)) + let obj = PyCoroutine::new(frame.clone(), self.__name__(), self.__qualname__()) + .into_pyobject(vm); + frame.set_generator(obj.clone()); + Ok(obj) } (true, true) => { - Ok(PyAsyncGen::new(frame, self.__name__(), self.__qualname__()).into_pyobject(vm)) + let obj = PyAsyncGen::new(frame.clone(), self.__name__(), self.__qualname__()) + .into_pyobject(vm); + frame.set_generator(obj.clone()); + Ok(obj) } (false, false) => vm.run_frame(frame), } diff --git a/crates/vm/src/frame.rs b/crates/vm/src/frame.rs index 844594f7859..711e184b9f3 100644 --- a/crates/vm/src/frame.rs +++ b/crates/vm/src/frame.rs @@ -84,6 +84,8 @@ pub struct Frame { // member pub trace_lines: PyMutex, pub temporary_refs: PyMutex>, + /// Back-reference to owning generator/coroutine/async generator + pub generator: PyMutex>, } impl PyPayload for Frame { @@ -111,6 +113,7 @@ unsafe impl Traverse for Frame { self.trace.traverse(tracer_fn); self.state.traverse(tracer_fn); self.temporary_refs.traverse(tracer_fn); + self.generator.traverse(tracer_fn); } } @@ -170,9 +173,14 @@ impl Frame { trace: PyMutex::new(vm.ctx.none()), trace_lines: PyMutex::new(true), temporary_refs: PyMutex::new(vec![]), + generator: PyMutex::new(None), } } + pub fn set_generator(&self, generator: PyObjectRef) { + *self.generator.lock() = Some(generator); + } + pub fn current_location(&self) -> SourceLocation { self.code.locations[self.lasti() as usize - 1].0 } @@ -274,7 +282,21 @@ impl Py { } pub fn yield_from_target(&self) -> Option { - self.with_exec(|exec| exec.yield_from_target().map(PyObject::to_owned)) + // Use try_lock to avoid deadlock when the frame is currently executing. + // A running coroutine has no yield-from target. + let mut state = self.state.try_lock()?; + let exec = ExecutingFrame { + code: &self.code, + fastlocals: &self.fastlocals, + cells_frees: &self.cells_frees, + locals: &self.locals, + globals: &self.globals, + builtins: &self.builtins, + lasti: &self.lasti, + object: self, + state: &mut state, + }; + exec.yield_from_target().map(PyObject::to_owned) } pub fn is_internal_frame(&self) -> bool {