From 5cefaa6410d854935aea8b81c89b4839751ccd7a Mon Sep 17 00:00:00 2001 From: Terry Luan Date: Tue, 6 Jan 2026 00:13:13 -0500 Subject: [PATCH 1/2] Updated asyncio to v3.13.11 --- Lib/asyncio/__main__.py | 126 +++++++++++++++---- Lib/asyncio/base_events.py | 221 +++++++++++++++++++++------------ Lib/asyncio/base_subprocess.py | 56 +++++++-- Lib/asyncio/events.py | 20 ++- Lib/asyncio/format_helpers.py | 22 ++-- Lib/asyncio/futures.py | 25 ++-- Lib/asyncio/locks.py | 153 ++++++++++++++--------- Lib/asyncio/proactor_events.py | 15 +-- Lib/asyncio/queues.py | 69 +++++++++- Lib/asyncio/runners.py | 1 + Lib/asyncio/selector_events.py | 117 ++++++++--------- Lib/asyncio/sslproto.py | 11 +- Lib/asyncio/staggered.py | 93 +++++++++----- Lib/asyncio/streams.py | 81 +++++++----- Lib/asyncio/taskgroups.py | 98 ++++++++++----- Lib/asyncio/tasks.py | 179 ++++++++++++++++---------- Lib/asyncio/timeouts.py | 20 ++- Lib/asyncio/transports.py | 2 + Lib/asyncio/unix_events.py | 38 +++++- Lib/asyncio/windows_events.py | 40 +++--- 20 files changed, 936 insertions(+), 451 deletions(-) diff --git a/Lib/asyncio/__main__.py b/Lib/asyncio/__main__.py index 18bb87a5bc4..8e9af83cc8a 100644 --- a/Lib/asyncio/__main__.py +++ b/Lib/asyncio/__main__.py @@ -1,41 +1,51 @@ import ast import asyncio -import code import concurrent.futures +import contextvars import inspect +import os +import site import sys import threading import types import warnings +from _colorize import can_colorize, ANSIColors # type: ignore[import-not-found] +from _pyrepl.console import InteractiveColoredConsole + from . import futures -class AsyncIOInteractiveConsole(code.InteractiveConsole): +class AsyncIOInteractiveConsole(InteractiveColoredConsole): def __init__(self, locals, loop): - super().__init__(locals) + super().__init__(locals, filename="") self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT self.loop = loop + self.context = contextvars.copy_context() def runcode(self, code): + global return_code future = concurrent.futures.Future() def callback(): + global return_code global repl_future - global repl_future_interrupted + global keyboard_interrupted repl_future = None - repl_future_interrupted = False + keyboard_interrupted = False func = types.FunctionType(code, self.locals) try: coro = func() - except SystemExit: - raise + except SystemExit as se: + return_code = se.code + self.loop.stop() + return except KeyboardInterrupt as ex: - repl_future_interrupted = True + keyboard_interrupted = True future.set_exception(ex) return except BaseException as ex: @@ -47,39 +57,71 @@ def callback(): return try: - repl_future = self.loop.create_task(coro) + repl_future = self.loop.create_task(coro, context=self.context) futures._chain_future(repl_future, future) except BaseException as exc: future.set_exception(exc) - loop.call_soon_threadsafe(callback) + self.loop.call_soon_threadsafe(callback, context=self.context) try: return future.result() - except SystemExit: - raise + except SystemExit as se: + return_code = se.code + self.loop.stop() + return except BaseException: - if repl_future_interrupted: - self.write("\nKeyboardInterrupt\n") + if keyboard_interrupted: + if not CAN_USE_PYREPL: + self.write("\nKeyboardInterrupt\n") else: self.showtraceback() - + return self.STATEMENT_FAILED class REPLThread(threading.Thread): def run(self): + global return_code + try: banner = ( f'asyncio REPL {sys.version} on {sys.platform}\n' f'Use "await" directly instead of "asyncio.run()".\n' f'Type "help", "copyright", "credits" or "license" ' f'for more information.\n' - f'{getattr(sys, "ps1", ">>> ")}import asyncio' ) - console.interact( - banner=banner, - exitmsg='exiting asyncio REPL...') + console.write(banner) + + if startup_path := os.getenv("PYTHONSTARTUP"): + sys.audit("cpython.run_startup", startup_path) + + import tokenize + with tokenize.open(startup_path) as f: + startup_code = compile(f.read(), startup_path, "exec") + exec(startup_code, console.locals) + + ps1 = getattr(sys, "ps1", ">>> ") + if can_colorize() and CAN_USE_PYREPL: + ps1 = f"{ANSIColors.BOLD_MAGENTA}{ps1}{ANSIColors.RESET}" + console.write(f"{ps1}import asyncio\n") + + if CAN_USE_PYREPL: + from _pyrepl.simple_interact import ( + run_multiline_interactive_console, + ) + try: + run_multiline_interactive_console(console) + except SystemExit: + # expected via the `exit` and `quit` commands + pass + except BaseException: + # unexpected issue + console.showtraceback() + console.write("Internal error, ") + return_code = 1 + else: + console.interact(banner="", exitmsg="") finally: warnings.filterwarnings( 'ignore', @@ -88,8 +130,25 @@ def run(self): loop.call_soon_threadsafe(loop.stop) + def interrupt(self) -> None: + if not CAN_USE_PYREPL: + return + + from _pyrepl.simple_interact import _get_reader + r = _get_reader() + if r.threading_hook is not None: + r.threading_hook.add("") # type: ignore + if __name__ == '__main__': + sys.audit("cpython.run_stdin") + + if os.getenv('PYTHON_BASIC_REPL'): + CAN_USE_PYREPL = False + else: + from _pyrepl.main import CAN_USE_PYREPL + + return_code = 0 loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) @@ -102,14 +161,31 @@ def run(self): console = AsyncIOInteractiveConsole(repl_locals, loop) repl_future = None - repl_future_interrupted = False + keyboard_interrupted = False try: import readline # NoQA except ImportError: - pass + readline = None + + interactive_hook = getattr(sys, "__interactivehook__", None) - repl_thread = REPLThread() + if interactive_hook is not None: + sys.audit("cpython.run_interactivehook", interactive_hook) + interactive_hook() + + if interactive_hook is site.register_readline: + # Fix the completer function to use the interactive console locals + try: + import rlcompleter + except: + pass + else: + if readline is not None: + completer = rlcompleter.Completer(console.locals) + readline.set_completer(completer.complete) + + repl_thread = REPLThread(name="Interactive thread") repl_thread.daemon = True repl_thread.start() @@ -117,9 +193,13 @@ def run(self): try: loop.run_forever() except KeyboardInterrupt: + keyboard_interrupted = True if repl_future and not repl_future.done(): repl_future.cancel() - repl_future_interrupted = True + repl_thread.interrupt() continue else: break + + console.write('exiting asyncio REPL...\n') + sys.exit(return_code) diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py index 29eff0499cb..356fc3d7913 100644 --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -17,7 +17,6 @@ import collections.abc import concurrent.futures import errno -import functools import heapq import itertools import os @@ -279,7 +278,9 @@ def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog, ssl_handshake_timeout, ssl_shutdown_timeout=None): self._loop = loop self._sockets = sockets - self._active_count = 0 + # Weak references so we don't break Transport's ability to + # detect abandoned transports + self._clients = weakref.WeakSet() self._waiters = [] self._protocol_factory = protocol_factory self._backlog = backlog @@ -292,14 +293,13 @@ def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog, def __repr__(self): return f'<{self.__class__.__name__} sockets={self.sockets!r}>' - def _attach(self): + def _attach(self, transport): assert self._sockets is not None - self._active_count += 1 + self._clients.add(transport) - def _detach(self): - assert self._active_count > 0 - self._active_count -= 1 - if self._active_count == 0 and self._sockets is None: + def _detach(self, transport): + self._clients.discard(transport) + if len(self._clients) == 0 and self._sockets is None: self._wakeup() def _wakeup(self): @@ -348,9 +348,17 @@ def close(self): self._serving_forever_fut.cancel() self._serving_forever_fut = None - if self._active_count == 0: + if len(self._clients) == 0: self._wakeup() + def close_clients(self): + for transport in self._clients.copy(): + transport.close() + + def abort_clients(self): + for transport in self._clients.copy(): + transport.abort() + async def start_serving(self): self._start_serving() # Skip one loop iteration so that all 'loop.add_reader' @@ -422,6 +430,8 @@ def __init__(self): self._clock_resolution = time.get_clock_info('monotonic').resolution self._exception_handler = None self.set_debug(coroutines._is_debug_mode()) + # The preserved state of async generator hooks. + self._old_agen_hooks = None # In debug mode, if the execution of a callback or a step of a task # exceed this duration in seconds, the slow callback/task is logged. self.slow_callback_duration = 0.1 @@ -448,26 +458,30 @@ def create_future(self): """Create a Future object attached to the loop.""" return futures.Future(loop=self) - def create_task(self, coro, *, name=None, context=None): + def create_task(self, coro, *, name=None, context=None, **kwargs): """Schedule a coroutine object. Return a task object. """ self._check_closed() - if self._task_factory is None: - task = tasks.Task(coro, loop=self, name=name, context=context) + if self._task_factory is not None: + if context is not None: + kwargs["context"] = context + + task = self._task_factory(self, coro, **kwargs) + task.set_name(name) + + else: + task = tasks.Task(coro, loop=self, name=name, context=context, **kwargs) if task._source_traceback: del task._source_traceback[-1] - else: - if context is None: - # Use legacy API if context is not needed - task = self._task_factory(self, coro) - else: - task = self._task_factory(self, coro, context=context) - - tasks._set_task_name(task, name) - return task + try: + return task + finally: + # gh-128552: prevent a refcycle of + # task.exception().__traceback__->BaseEventLoop.create_task->task + del task def set_task_factory(self, factory): """Set a task factory that will be used by loop.create_task(). @@ -475,9 +489,10 @@ def set_task_factory(self, factory): If factory is None the default task factory will be set. If factory is a callable, it should have a signature matching - '(loop, coro)', where 'loop' will be a reference to the active - event loop, 'coro' will be a coroutine object. The callable - must return a Future. + '(loop, coro, **kwargs)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object, and **kwargs will be + arbitrary keyword arguments that should be passed on to Task. + The callable must return a Task. """ if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') @@ -624,29 +639,52 @@ def _check_running(self): raise RuntimeError( 'Cannot run the event loop while another loop is running') - def run_forever(self): - """Run until stop() is called.""" + def _run_forever_setup(self): + """Prepare the run loop to process events. + + This method exists so that custom event loop subclasses (e.g., event loops + that integrate a GUI event loop with Python's event loop) have access to all the + loop setup logic. + """ self._check_closed() self._check_running() self._set_coroutine_origin_tracking(self._debug) - old_agen_hooks = sys.get_asyncgen_hooks() - try: - self._thread_id = threading.get_ident() - sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, - finalizer=self._asyncgen_finalizer_hook) + self._old_agen_hooks = sys.get_asyncgen_hooks() + self._thread_id = threading.get_ident() + sys.set_asyncgen_hooks( + firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook + ) + + events._set_running_loop(self) + + def _run_forever_cleanup(self): + """Clean up after an event loop finishes the looping over events. + + This method exists so that custom event loop subclasses (e.g., event loops + that integrate a GUI event loop with Python's event loop) have access to all the + loop cleanup logic. + """ + self._stopping = False + self._thread_id = None + events._set_running_loop(None) + self._set_coroutine_origin_tracking(False) + # Restore any pre-existing async generator hooks. + if self._old_agen_hooks is not None: + sys.set_asyncgen_hooks(*self._old_agen_hooks) + self._old_agen_hooks = None - events._set_running_loop(self) + def run_forever(self): + """Run until stop() is called.""" + self._run_forever_setup() + try: while True: self._run_once() if self._stopping: break finally: - self._stopping = False - self._thread_id = None - events._set_running_loop(None) - self._set_coroutine_origin_tracking(False) - sys.set_asyncgen_hooks(*old_agen_hooks) + self._run_forever_cleanup() def run_until_complete(self, future): """Run until the Future is done. @@ -981,39 +1019,43 @@ async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): family, type_, proto, _, address = addr_info sock = None try: - sock = socket.socket(family=family, type=type_, proto=proto) - sock.setblocking(False) - if local_addr_infos is not None: - for lfamily, _, _, _, laddr in local_addr_infos: - # skip local addresses of different family - if lfamily != family: - continue - try: - sock.bind(laddr) - break - except OSError as exc: - msg = ( - f'error while attempting to bind on ' - f'address {laddr!r}: ' - f'{exc.strerror.lower()}' - ) - exc = OSError(exc.errno, msg) - my_exceptions.append(exc) - else: # all bind attempts failed - if my_exceptions: - raise my_exceptions.pop() - else: - raise OSError(f"no matching local address with {family=} found") - await self.sock_connect(sock, address) - return sock - except OSError as exc: - my_exceptions.append(exc) - if sock is not None: - sock.close() - raise + try: + sock = socket.socket(family=family, type=type_, proto=proto) + sock.setblocking(False) + if local_addr_infos is not None: + for lfamily, _, _, _, laddr in local_addr_infos: + # skip local addresses of different family + if lfamily != family: + continue + try: + sock.bind(laddr) + break + except OSError as exc: + msg = ( + f'error while attempting to bind on ' + f'address {laddr!r}: {str(exc).lower()}' + ) + exc = OSError(exc.errno, msg) + my_exceptions.append(exc) + else: # all bind attempts failed + if my_exceptions: + raise my_exceptions.pop() + else: + raise OSError(f"no matching local address with {family=} found") + await self.sock_connect(sock, address) + return sock + except OSError as exc: + my_exceptions.append(exc) + raise except: if sock is not None: - sock.close() + try: + sock.close() + except OSError: + # An error when closing a newly created socket is + # not important, but it can overwrite more important + # non-OSError error. So ignore it. + pass raise finally: exceptions = my_exceptions = None @@ -1107,11 +1149,18 @@ async def create_connection( except OSError: continue else: # using happy eyeballs - sock, _, _ = await staggered.staggered_race( - (functools.partial(self._connect_sock, - exceptions, addrinfo, laddr_infos) - for addrinfo in infos), - happy_eyeballs_delay, loop=self) + sock = (await staggered.staggered_race( + ( + # can't use functools.partial as it keeps a reference + # to exceptions + lambda addrinfo=addrinfo: self._connect_sock( + exceptions, addrinfo, laddr_infos + ) + for addrinfo in infos + ), + happy_eyeballs_delay, + loop=self, + ))[0] # can't use sock, _, _ as it keeks a reference to exceptions if sock is None: exceptions = [exc for sub in exceptions for exc in sub] @@ -1120,7 +1169,7 @@ async def create_connection( raise ExceptionGroup("create_connection failed", exceptions) if len(exceptions) == 1: raise exceptions[0] - else: + elif exceptions: # If they all have the same str(), raise one. model = str(exceptions[0]) if all(str(exc) == model for exc in exceptions): @@ -1129,6 +1178,9 @@ async def create_connection( # the various error messages. raise OSError('Multiple exceptions: {}'.format( ', '.join(str(exc) for exc in exceptions))) + else: + # No exceptions were collected, raise a timeout error + raise TimeoutError('create_connection failed') finally: exceptions = None @@ -1254,8 +1306,8 @@ async def _sendfile_fallback(self, transp, file, offset, count): read = await self.run_in_executor(None, file.readinto, view) if not read: return total_sent # EOF - await proto.drain() transp.write(view[:read]) + await proto.drain() total_sent += read finally: if total_sent > 0 and hasattr(file, 'seek'): @@ -1474,6 +1526,7 @@ async def create_server( ssl=None, reuse_address=None, reuse_port=None, + keep_alive=None, ssl_handshake_timeout=None, ssl_shutdown_timeout=None, start_serving=True): @@ -1545,8 +1598,13 @@ async def create_server( if reuse_address: sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, True) - if reuse_port: + # Since Linux 6.12.9, SO_REUSEPORT is not allowed + # on other address families than AF_INET/AF_INET6. + if reuse_port and af in (socket.AF_INET, socket.AF_INET6): _set_reuseport(sock) + if keep_alive: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. @@ -1561,7 +1619,7 @@ async def create_server( except OSError as err: msg = ('error while attempting ' 'to bind on address %r: %s' - % (sa, err.strerror.lower())) + % (sa, str(err).lower())) if err.errno == errno.EADDRNOTAVAIL: # Assume the family is not enabled (bpo-30945) sockets.pop() @@ -1833,6 +1891,8 @@ def call_exception_handler(self, context): - 'protocol' (optional): Protocol instance; - 'transport' (optional): Transport instance; - 'socket' (optional): Socket instance; + - 'source_traceback' (optional): Traceback of the source; + - 'handle_traceback' (optional): Traceback of the handle; - 'asyncgen' (optional): Asynchronous generator that caused the exception. @@ -1943,8 +2003,11 @@ def _run_once(self): timeout = 0 elif self._scheduled: # Compute the desired timeout. - when = self._scheduled[0]._when - timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT) + timeout = self._scheduled[0]._when - self.time() + if timeout > MAXIMUM_SELECT_TIMEOUT: + timeout = MAXIMUM_SELECT_TIMEOUT + elif timeout < 0: + timeout = 0 event_list = self._selector.select(timeout) self._process_events(event_list) diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py index 4c9b0dd5653..321a4e5d5d1 100644 --- a/Lib/asyncio/base_subprocess.py +++ b/Lib/asyncio/base_subprocess.py @@ -1,6 +1,9 @@ import collections import subprocess import warnings +import os +import signal +import sys from . import protocols from . import transports @@ -23,6 +26,7 @@ def __init__(self, loop, protocol, args, shell, self._pending_calls = collections.deque() self._pipes = {} self._finished = False + self._pipes_connected = False if stdin == subprocess.PIPE: self._pipes[0] = None @@ -101,7 +105,12 @@ def close(self): for proto in self._pipes.values(): if proto is None: continue - proto.pipe.close() + # See gh-114177 + # skip closing the pipe if loop is already closed + # this can happen e.g. when loop is closed immediately after + # process is killed + if self._loop and not self._loop.is_closed(): + proto.pipe.close() if (self._proc is not None and # has the child process finished? @@ -115,7 +124,8 @@ def close(self): try: self._proc.kill() - except ProcessLookupError: + except (ProcessLookupError, PermissionError): + # the process may have already exited or may be running setuid pass # Don't clear the _proc reference yet: _post_init() may still run @@ -141,17 +151,31 @@ def _check_proc(self): if self._proc is None: raise ProcessLookupError() - def send_signal(self, signal): - self._check_proc() - self._proc.send_signal(signal) + if sys.platform == 'win32': + def send_signal(self, signal): + self._check_proc() + self._proc.send_signal(signal) + + def terminate(self): + self._check_proc() + self._proc.terminate() + + def kill(self): + self._check_proc() + self._proc.kill() + else: + def send_signal(self, signal): + self._check_proc() + try: + os.kill(self._proc.pid, signal) + except ProcessLookupError: + pass - def terminate(self): - self._check_proc() - self._proc.terminate() + def terminate(self): + self.send_signal(signal.SIGTERM) - def kill(self): - self._check_proc() - self._proc.kill() + def kill(self): + self.send_signal(signal.SIGKILL) async def _connect_pipes(self, waiter): try: @@ -190,6 +214,7 @@ async def _connect_pipes(self, waiter): else: if waiter is not None and not waiter.cancelled(): waiter.set_result(None) + self._pipes_connected = True def _call(self, cb, *data): if self._pending_calls is not None: @@ -233,6 +258,15 @@ def _try_finish(self): assert not self._finished if self._returncode is None: return + if not self._pipes_connected: + # self._pipes_connected can be False if not all pipes were connected + # because either the process failed to start or the self._connect_pipes task + # got cancelled. In this broken state we consider all pipes disconnected and + # to avoid hanging forever in self._wait as otherwise _exit_waiters + # would never be woken up, we wake them up here. + for waiter in self._exit_waiters: + if not waiter.cancelled(): + waiter.set_result(self._returncode) if all(p is not None and p.disconnected for p in self._pipes.values()): self._finished = True diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py index 016852880ca..3b740b9b905 100644 --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -54,7 +54,8 @@ def _repr_info(self): info.append('cancelled') if self._callback is not None: info.append(format_helpers._format_callback_source( - self._callback, self._args)) + self._callback, self._args, + debug=self._loop.get_debug())) if self._source_traceback: frame = self._source_traceback[-1] info.append(f'created at {frame[0]}:{frame[1]}') @@ -90,7 +91,8 @@ def _run(self): raise except BaseException as exc: cb = format_helpers._format_callback_source( - self._callback, self._args) + self._callback, self._args, + debug=self._loop.get_debug()) msg = f'Exception in callback {cb}' context = { 'message': msg, @@ -173,6 +175,14 @@ def close(self): """Stop serving. This leaves existing connections open.""" raise NotImplementedError + def close_clients(self): + """Close all active connections.""" + raise NotImplementedError + + def abort_clients(self): + """Close all active connections immediately.""" + raise NotImplementedError + def get_loop(self): """Get the event loop the Server object is attached to.""" raise NotImplementedError @@ -282,7 +292,7 @@ def create_future(self): # Method scheduling a coroutine object: create a task. - def create_task(self, coro, *, name=None, context=None): + def create_task(self, coro, **kwargs): raise NotImplementedError # Methods for interacting with threads. @@ -320,6 +330,7 @@ async def create_server( *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None, reuse_port=None, + keep_alive=None, ssl_handshake_timeout=None, ssl_shutdown_timeout=None, start_serving=True): @@ -358,6 +369,9 @@ async def create_server( they all set this flag when being created. This option is not supported on Windows. + keep_alive set to True keeps connections active by enabling the + periodic transmission of messages. + ssl_handshake_timeout is the time in seconds that an SSL server will wait for completion of the SSL handshake before aborting the connection. Default is 60s. diff --git a/Lib/asyncio/format_helpers.py b/Lib/asyncio/format_helpers.py index 27d11fd4fa9..93737b7708a 100644 --- a/Lib/asyncio/format_helpers.py +++ b/Lib/asyncio/format_helpers.py @@ -19,19 +19,26 @@ def _get_function_source(func): return None -def _format_callback_source(func, args): - func_repr = _format_callback(func, args, None) +def _format_callback_source(func, args, *, debug=False): + func_repr = _format_callback(func, args, None, debug=debug) source = _get_function_source(func) if source: func_repr += f' at {source[0]}:{source[1]}' return func_repr -def _format_args_and_kwargs(args, kwargs): +def _format_args_and_kwargs(args, kwargs, *, debug=False): """Format function arguments and keyword arguments. Special case for a single parameter: ('hello',) is formatted as ('hello'). + + Note that this function only returns argument details when + debug=True is specified, as arguments may contain sensitive + information. """ + if not debug: + return '()' + # use reprlib to limit the length of the output items = [] if args: @@ -41,10 +48,11 @@ def _format_args_and_kwargs(args, kwargs): return '({})'.format(', '.join(items)) -def _format_callback(func, args, kwargs, suffix=''): +def _format_callback(func, args, kwargs, *, debug=False, suffix=''): if isinstance(func, functools.partial): - suffix = _format_args_and_kwargs(args, kwargs) + suffix - return _format_callback(func.func, func.args, func.keywords, suffix) + suffix = _format_args_and_kwargs(args, kwargs, debug=debug) + suffix + return _format_callback(func.func, func.args, func.keywords, + debug=debug, suffix=suffix) if hasattr(func, '__qualname__') and func.__qualname__: func_repr = func.__qualname__ @@ -53,7 +61,7 @@ def _format_callback(func, args, kwargs, suffix=''): else: func_repr = repr(func) - func_repr += _format_args_and_kwargs(args, kwargs) + func_repr += _format_args_and_kwargs(args, kwargs, debug=debug) if suffix: func_repr += suffix return func_repr diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py index 97fc4e3fcb6..51932639097 100644 --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -138,9 +138,6 @@ def _make_cancelled_error(self): exc = exceptions.CancelledError() else: exc = exceptions.CancelledError(self._cancel_message) - exc.__context__ = self._cancelled_exc - # Remove the reference since we don't need this anymore. - self._cancelled_exc = None return exc def cancel(self, msg=None): @@ -194,8 +191,7 @@ def result(self): the future is done and has an exception set, this exception is raised. """ if self._state == _CANCELLED: - exc = self._make_cancelled_error() - raise exc + raise self._make_cancelled_error() if self._state != _FINISHED: raise exceptions.InvalidStateError('Result is not ready.') self.__log_traceback = False @@ -212,8 +208,7 @@ def exception(self): InvalidStateError. """ if self._state == _CANCELLED: - exc = self._make_cancelled_error() - raise exc + raise self._make_cancelled_error() if self._state != _FINISHED: raise exceptions.InvalidStateError('Exception is not set.') self.__log_traceback = False @@ -272,9 +267,13 @@ def set_exception(self, exception): raise exceptions.InvalidStateError(f'{self._state}: {self!r}') if isinstance(exception, type): exception = exception() - if type(exception) is StopIteration: - raise TypeError("StopIteration interacts badly with generators " - "and cannot be raised into a Future") + if isinstance(exception, StopIteration): + new_exc = RuntimeError("StopIteration interacts badly with " + "generators and cannot be raised into a " + "Future") + new_exc.__cause__ = exception + new_exc.__context__ = exception + exception = new_exc self._exception = exception self._exception_tb = exception.__traceback__ self._state = _FINISHED @@ -318,11 +317,9 @@ def _set_result_unless_cancelled(fut, result): def _convert_future_exc(exc): exc_class = type(exc) if exc_class is concurrent.futures.CancelledError: - return exceptions.CancelledError(*exc.args) - elif exc_class is concurrent.futures.TimeoutError: - return exceptions.TimeoutError(*exc.args) + return exceptions.CancelledError(*exc.args).with_traceback(exc.__traceback__) elif exc_class is concurrent.futures.InvalidStateError: - return exceptions.InvalidStateError(*exc.args) + return exceptions.InvalidStateError(*exc.args).with_traceback(exc.__traceback__) else: return exc diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py index ce5d8d5bfb2..3df4c693a91 100644 --- a/Lib/asyncio/locks.py +++ b/Lib/asyncio/locks.py @@ -24,25 +24,23 @@ class Lock(_ContextManagerMixin, mixins._LoopBoundMixin): """Primitive lock objects. A primitive lock is a synchronization primitive that is not owned - by a particular coroutine when locked. A primitive lock is in one + by a particular task when locked. A primitive lock is in one of two states, 'locked' or 'unlocked'. It is created in the unlocked state. It has two basic methods, acquire() and release(). When the state is unlocked, acquire() changes the state to locked and returns immediately. When the state is locked, acquire() blocks until a call to release() in - another coroutine changes it to unlocked, then the acquire() call + another task changes it to unlocked, then the acquire() call resets it to locked and returns. The release() method should only be called in the locked state; it changes the state to unlocked and returns immediately. If an attempt is made to release an unlocked lock, a RuntimeError will be raised. - When more than one coroutine is blocked in acquire() waiting for - the state to turn to unlocked, only one coroutine proceeds when a - release() call resets the state to unlocked; first coroutine which - is blocked in acquire() is being processed. - - acquire() is a coroutine and should be called with 'await'. + When more than one task is blocked in acquire() waiting for + the state to turn to unlocked, only one task proceeds when a + release() call resets the state to unlocked; successive release() + calls will unblock tasks in FIFO order. Locks also support the asynchronous context management protocol. 'async with lock' statement should be used. @@ -95,6 +93,8 @@ async def acquire(self): This method blocks until the lock is unlocked, then sets it to locked and returns True. """ + # Implement fair scheduling, where thread always waits + # its turn. Jumping the queue if all are cancelled is an optimization. if (not self._locked and (self._waiters is None or all(w.cancelled() for w in self._waiters))): self._locked = True @@ -105,19 +105,22 @@ async def acquire(self): fut = self._get_loop().create_future() self._waiters.append(fut) - # Finally block should be called before the CancelledError - # handling as we don't want CancelledError to call - # _wake_up_first() and attempt to wake up itself. try: try: await fut finally: self._waiters.remove(fut) except exceptions.CancelledError: + # Currently the only exception designed be able to occur here. + + # Ensure the lock invariant: If lock is not claimed (or about + # to be claimed by us) and there is a Task in waiters, + # ensure that the Task at the head will run. if not self._locked: self._wake_up_first() raise + # assert self._locked is False self._locked = True return True @@ -125,7 +128,7 @@ def release(self): """Release a lock. When the lock is locked, reset it to unlocked, and return. - If any other coroutines are blocked waiting for the lock to become + If any other tasks are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. When invoked on an unlocked lock, a RuntimeError is raised. @@ -139,7 +142,7 @@ def release(self): raise RuntimeError('Lock is not acquired.') def _wake_up_first(self): - """Wake up the first waiter if it isn't done.""" + """Ensure that the first waiter will wake up.""" if not self._waiters: return try: @@ -147,9 +150,7 @@ def _wake_up_first(self): except StopIteration: return - # .done() necessarily means that a waiter will wake up later on and - # either take the lock, or, if it was cancelled and lock wasn't - # taken already, will hit this again and wake up a new waiter. + # .done() means that the waiter is already set to wake up. if not fut.done(): fut.set_result(True) @@ -179,8 +180,8 @@ def is_set(self): return self._value def set(self): - """Set the internal flag to true. All coroutines waiting for it to - become true are awakened. Coroutine that call wait() once the flag is + """Set the internal flag to true. All tasks waiting for it to + become true are awakened. Tasks that call wait() once the flag is true will not block at all. """ if not self._value: @@ -191,7 +192,7 @@ def set(self): fut.set_result(True) def clear(self): - """Reset the internal flag to false. Subsequently, coroutines calling + """Reset the internal flag to false. Subsequently, tasks calling wait() will block until set() is called to set the internal flag to true again.""" self._value = False @@ -200,7 +201,7 @@ async def wait(self): """Block until the internal flag is true. If the internal flag is true on entry, return True - immediately. Otherwise, block until another coroutine calls + immediately. Otherwise, block until another task calls set() to set the flag to true, then return True. """ if self._value: @@ -219,8 +220,8 @@ class Condition(_ContextManagerMixin, mixins._LoopBoundMixin): """Asynchronous equivalent to threading.Condition. This class implements condition variable objects. A condition variable - allows one or more coroutines to wait until they are notified by another - coroutine. + allows one or more tasks to wait until they are notified by another + task. A new Lock object is created and used as the underlying lock. """ @@ -247,45 +248,64 @@ def __repr__(self): async def wait(self): """Wait until notified. - If the calling coroutine has not acquired the lock when this + If the calling task has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notify_all() call for - the same condition variable in another coroutine. Once + the same condition variable in another task. Once awakened, it re-acquires the lock and returns True. + + This method may return spuriously, + which is why the caller should always + re-check the state and be prepared to wait() again. """ if not self.locked(): raise RuntimeError('cannot wait on un-acquired lock') + fut = self._get_loop().create_future() self.release() try: - fut = self._get_loop().create_future() - self._waiters.append(fut) try: - await fut - return True - finally: - self._waiters.remove(fut) - - finally: - # Must reacquire lock even if wait is cancelled - cancelled = False - while True: + self._waiters.append(fut) try: - await self.acquire() - break - except exceptions.CancelledError: - cancelled = True + await fut + return True + finally: + self._waiters.remove(fut) - if cancelled: - raise exceptions.CancelledError + finally: + # Must re-acquire lock even if wait is cancelled. + # We only catch CancelledError here, since we don't want any + # other (fatal) errors with the future to cause us to spin. + err = None + while True: + try: + await self.acquire() + break + except exceptions.CancelledError as e: + err = e + + if err is not None: + try: + raise err # Re-raise most recent exception instance. + finally: + err = None # Break reference cycles. + except BaseException: + # Any error raised out of here _may_ have occurred after this Task + # believed to have been successfully notified. + # Make sure to notify another Task instead. This may result + # in a "spurious wakeup", which is allowed as part of the + # Condition Variable protocol. + self._notify(1) + raise async def wait_for(self, predicate): """Wait until a predicate becomes true. - The predicate should be a callable which result will be - interpreted as a boolean value. The final predicate value is + The predicate should be a callable whose result will be + interpreted as a boolean value. The method will repeatedly + wait() until it evaluates to true. The final predicate value is the return value. """ result = predicate() @@ -295,20 +315,22 @@ async def wait_for(self, predicate): return result def notify(self, n=1): - """By default, wake up one coroutine waiting on this condition, if any. - If the calling coroutine has not acquired the lock when this method + """By default, wake up one task waiting on this condition, if any. + If the calling task has not acquired the lock when this method is called, a RuntimeError is raised. - This method wakes up at most n of the coroutines waiting for the - condition variable; it is a no-op if no coroutines are waiting. + This method wakes up n of the tasks waiting for the condition + variable; if fewer than n are waiting, they are all awoken. - Note: an awakened coroutine does not actually return from its + Note: an awakened task does not actually return from its wait() call until it can reacquire the lock. Since notify() does not release the lock, its caller should. """ if not self.locked(): raise RuntimeError('cannot notify on un-acquired lock') + self._notify(n) + def _notify(self, n): idx = 0 for fut in self._waiters: if idx >= n: @@ -357,6 +379,7 @@ def __repr__(self): def locked(self): """Returns True if semaphore cannot be acquired immediately.""" + # Due to state, or FIFO rules (must allow others to run first). return self._value == 0 or ( any(not w.cancelled() for w in (self._waiters or ()))) @@ -365,11 +388,12 @@ async def acquire(self): If the internal counter is larger than zero on entry, decrement it by one and return True immediately. If it is - zero on entry, block, waiting until some other coroutine has + zero on entry, block, waiting until some other task has called release() to make it larger than 0, and then return True. """ if not self.locked(): + # Maintain FIFO, wait for others to start even if _value > 0. self._value -= 1 return True @@ -378,29 +402,34 @@ async def acquire(self): fut = self._get_loop().create_future() self._waiters.append(fut) - # Finally block should be called before the CancelledError - # handling as we don't want CancelledError to call - # _wake_up_first() and attempt to wake up itself. try: try: await fut finally: self._waiters.remove(fut) except exceptions.CancelledError: - if not fut.cancelled(): + # Currently the only exception designed be able to occur here. + if fut.done() and not fut.cancelled(): + # Our Future was successfully set to True via _wake_up_next(), + # but we are not about to successfully acquire(). Therefore we + # must undo the bookkeeping already done and attempt to wake + # up someone else. self._value += 1 - self._wake_up_next() raise - if self._value > 0: - self._wake_up_next() + finally: + # New waiters may have arrived but had to wait due to FIFO. + # Wake up as many as are allowed. + while self._value > 0: + if not self._wake_up_next(): + break # There was no-one to wake up. return True def release(self): """Release a semaphore, incrementing the internal counter by one. - When it was zero on entry and another coroutine is waiting for it to - become larger than zero again, wake up that coroutine. + When it was zero on entry and another task is waiting for it to + become larger than zero again, wake up that task. """ self._value += 1 self._wake_up_next() @@ -408,13 +437,15 @@ def release(self): def _wake_up_next(self): """Wake up the first waiter that isn't done.""" if not self._waiters: - return + return False for fut in self._waiters: if not fut.done(): self._value -= 1 fut.set_result(True) - return + # `fut` is now `done()` and not `cancelled()`. + return True + return False class BoundedSemaphore(Semaphore): @@ -454,7 +485,7 @@ class Barrier(mixins._LoopBoundMixin): def __init__(self, parties): """Create a barrier, initialised to 'parties' tasks.""" if parties < 1: - raise ValueError('parties must be > 0') + raise ValueError('parties must be >= 1') self._cond = Condition() # notify all tasks when state changes diff --git a/Lib/asyncio/proactor_events.py b/Lib/asyncio/proactor_events.py index 1e2a730cf36..f404273c3ae 100644 --- a/Lib/asyncio/proactor_events.py +++ b/Lib/asyncio/proactor_events.py @@ -63,7 +63,7 @@ def __init__(self, loop, sock, protocol, waiter=None, self._called_connection_lost = False self._eof_written = False if self._server is not None: - self._server._attach() + self._server._attach(self) self._loop.call_soon(self._protocol.connection_made, self) if waiter is not None: # only wake up the waiter when connection_made() has been called @@ -167,7 +167,7 @@ def _call_connection_lost(self, exc): self._sock = None server = self._server if server is not None: - server._detach() + server._detach(self) self._server = None self._called_connection_lost = True @@ -460,6 +460,8 @@ def _pipe_closed(self, fut): class _ProactorDatagramTransport(_ProactorBasePipeTransport, transports.DatagramTransport): max_size = 256 * 1024 + _header_size = 8 + def __init__(self, loop, sock, protocol, address=None, waiter=None, extra=None): self._address = address @@ -487,9 +489,6 @@ def sendto(self, data, addr=None): raise TypeError('data argument must be bytes-like object (%r)', type(data)) - if not data: - return - if self._address is not None and addr not in (None, self._address): raise ValueError( f'Invalid address: must be None or {self._address}') @@ -502,7 +501,7 @@ def sendto(self, data, addr=None): # Ensure that what we buffer is immutable. self._buffer.append((bytes(data), addr)) - self._buffer_size += len(data) + self._buffer_size += len(data) + self._header_size if self._write_fut is None: # No current write operations are active, kick one off @@ -529,7 +528,7 @@ def _loop_writing(self, fut=None): return data, addr = self._buffer.popleft() - self._buffer_size -= len(data) + self._buffer_size -= len(data) + self._header_size if self._address is not None: self._write_fut = self._loop._proactor.send(self._sock, data) @@ -724,6 +723,8 @@ async def sock_sendto(self, sock, data, address): return await self._proactor.sendto(sock, data, 0, address) async def sock_connect(self, sock, address): + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") return await self._proactor.connect(sock, address) async def sock_accept(self, sock): diff --git a/Lib/asyncio/queues.py b/Lib/asyncio/queues.py index a9656a6df56..084fccaaff2 100644 --- a/Lib/asyncio/queues.py +++ b/Lib/asyncio/queues.py @@ -1,4 +1,11 @@ -__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty') +__all__ = ( + 'Queue', + 'PriorityQueue', + 'LifoQueue', + 'QueueFull', + 'QueueEmpty', + 'QueueShutDown', +) import collections import heapq @@ -18,6 +25,11 @@ class QueueFull(Exception): pass +class QueueShutDown(Exception): + """Raised when putting on to or getting from a shut-down Queue.""" + pass + + class Queue(mixins._LoopBoundMixin): """A queue, useful for coordinating producer and consumer coroutines. @@ -41,6 +53,7 @@ def __init__(self, maxsize=0): self._finished = locks.Event() self._finished.set() self._init(maxsize) + self._is_shutdown = False # These three are overridable in subclasses. @@ -81,6 +94,8 @@ def _format(self): result += f' _putters[{len(self._putters)}]' if self._unfinished_tasks: result += f' tasks={self._unfinished_tasks}' + if self._is_shutdown: + result += ' shutdown' return result def qsize(self): @@ -112,8 +127,12 @@ async def put(self, item): Put an item into the queue. If the queue is full, wait until a free slot is available before adding item. + + Raises QueueShutDown if the queue has been shut down. """ while self.full(): + if self._is_shutdown: + raise QueueShutDown putter = self._get_loop().create_future() self._putters.append(putter) try: @@ -125,7 +144,7 @@ async def put(self, item): self._putters.remove(putter) except ValueError: # The putter could be removed from self._putters by a - # previous get_nowait call. + # previous get_nowait call or a shutdown call. pass if not self.full() and not putter.cancelled(): # We were woken up by get_nowait(), but can't take @@ -138,7 +157,11 @@ def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. + + Raises QueueShutDown if the queue has been shut down. """ + if self._is_shutdown: + raise QueueShutDown if self.full(): raise QueueFull self._put(item) @@ -150,8 +173,13 @@ async def get(self): """Remove and return an item from the queue. If queue is empty, wait until an item is available. + + Raises QueueShutDown if the queue has been shut down and is empty, or + if the queue has been shut down immediately. """ while self.empty(): + if self._is_shutdown and self.empty(): + raise QueueShutDown getter = self._get_loop().create_future() self._getters.append(getter) try: @@ -163,7 +191,7 @@ async def get(self): self._getters.remove(getter) except ValueError: # The getter could be removed from self._getters by a - # previous put_nowait call. + # previous put_nowait call, or a shutdown call. pass if not self.empty() and not getter.cancelled(): # We were woken up by put_nowait(), but can't take @@ -176,8 +204,13 @@ def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. + + Raises QueueShutDown if the queue has been shut down and is empty, or + if the queue has been shut down immediately. """ if self.empty(): + if self._is_shutdown: + raise QueueShutDown raise QueueEmpty item = self._get() self._wakeup_next(self._putters) @@ -214,6 +247,36 @@ async def join(self): if self._unfinished_tasks > 0: await self._finished.wait() + def shutdown(self, immediate=False): + """Shut-down the queue, making queue gets and puts raise QueueShutDown. + + By default, gets will only raise once the queue is empty. Set + 'immediate' to True to make gets raise immediately instead. + + All blocked callers of put() and get() will be unblocked. + + If 'immediate', the queue is drained and unfinished tasks + is reduced by the number of drained tasks. If unfinished tasks + is reduced to zero, callers of Queue.join are unblocked. + """ + self._is_shutdown = True + if immediate: + while not self.empty(): + self._get() + if self._unfinished_tasks > 0: + self._unfinished_tasks -= 1 + if self._unfinished_tasks == 0: + self._finished.set() + # All getters need to re-check queue-empty to raise ShutDown + while self._getters: + getter = self._getters.popleft() + if not getter.done(): + getter.set_result(None) + while self._putters: + putter = self._putters.popleft() + if not putter.done(): + putter.set_result(None) + class PriorityQueue(Queue): """A subclass of Queue; retrieves entries in priority order (lowest first). diff --git a/Lib/asyncio/runners.py b/Lib/asyncio/runners.py index 1b89236599a..102ae78021b 100644 --- a/Lib/asyncio/runners.py +++ b/Lib/asyncio/runners.py @@ -168,6 +168,7 @@ def run(main, *, debug=None, loop_factory=None): running in the same thread. If debug is True, the event loop will be run in debug mode. + If loop_factory is passed, it is used for new event loop creation. This function always creates a new event loop and closes it at the end. It should be used as a main entry point for asyncio programs, and should diff --git a/Lib/asyncio/selector_events.py b/Lib/asyncio/selector_events.py index 790711f8340..8701467d413 100644 --- a/Lib/asyncio/selector_events.py +++ b/Lib/asyncio/selector_events.py @@ -265,22 +265,17 @@ def _ensure_fd_no_transport(self, fd): except (AttributeError, TypeError, ValueError): # This code matches selectors._fileobj_to_fd function. raise ValueError(f"Invalid file object: {fd!r}") from None - try: - transport = self._transports[fileno] - except KeyError: - pass - else: - if not transport.is_closing(): - raise RuntimeError( - f'File descriptor {fd!r} is used by transport ' - f'{transport!r}') + transport = self._transports.get(fileno) + if transport and not transport.is_closing(): + raise RuntimeError( + f'File descriptor {fd!r} is used by transport ' + f'{transport!r}') def _add_reader(self, fd, callback, *args): self._check_closed() handle = events.Handle(callback, args, self, None) - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: self._selector.register(fd, selectors.EVENT_READ, (handle, None)) else: @@ -294,30 +289,27 @@ def _add_reader(self, fd, callback, *args): def _remove_reader(self, fd): if self.is_closed(): return False - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: return False + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) else: - mask, (reader, writer) = key.events, key.data - mask &= ~selectors.EVENT_READ - if not mask: - self._selector.unregister(fd) - else: - self._selector.modify(fd, mask, (None, writer)) + self._selector.modify(fd, mask, (None, writer)) - if reader is not None: - reader.cancel() - return True - else: - return False + if reader is not None: + reader.cancel() + return True + else: + return False def _add_writer(self, fd, callback, *args): self._check_closed() handle = events.Handle(callback, args, self, None) - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: self._selector.register(fd, selectors.EVENT_WRITE, (None, handle)) else: @@ -332,24 +324,22 @@ def _remove_writer(self, fd): """Remove a writer callback.""" if self.is_closed(): return False - try: - key = self._selector.get_key(fd) - except KeyError: + key = self._selector.get_map().get(fd) + if key is None: return False + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) else: - mask, (reader, writer) = key.events, key.data - # Remove both writer and connector. - mask &= ~selectors.EVENT_WRITE - if not mask: - self._selector.unregister(fd) - else: - self._selector.modify(fd, mask, (reader, None)) + self._selector.modify(fd, mask, (reader, None)) - if writer is not None: - writer.cancel() - return True - else: - return False + if writer is not None: + writer.cancel() + return True + else: + return False def add_reader(self, fd, callback, *args): """Add a reader callback.""" @@ -801,7 +791,7 @@ def __init__(self, loop, sock, protocol, extra=None, server=None): self._paused = False # Set when pause_reading() called if self._server is not None: - self._server._attach() + self._server._attach(self) loop._transports[self._sock_fd] = self def __repr__(self): @@ -878,6 +868,8 @@ def __del__(self, _warn=warnings.warn): if self._sock is not None: _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) self._sock.close() + if self._server is not None: + self._server._detach(self) def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. @@ -916,7 +908,7 @@ def _call_connection_lost(self, exc): self._loop = None server = self._server if server is not None: - server._detach() + server._detach(self) self._server = None def get_write_buffer_size(self): @@ -1054,8 +1046,8 @@ def _read_ready__on_eof(self): def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError(f'data argument must be a bytes-like object, ' - f'not {type(data).__name__!r}') + raise TypeError(f'data argument must be a bytes, bytearray, or memoryview ' + f'object, not {type(data).__name__!r}') if self._eof: raise RuntimeError('Cannot call write() after write_eof()') if self._empty_waiter is not None: @@ -1178,20 +1170,31 @@ def writelines(self, list_of_data): raise RuntimeError('unable to writelines; sendfile is in progress') if not list_of_data: return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + self._buffer.extend([memoryview(data) for data in list_of_data]) self._write_ready() # If the entire buffer couldn't be written, register a write handler if self._buffer: self._loop._add_writer(self._sock_fd, self._write_ready) + self._maybe_pause_protocol() def can_write_eof(self): return True def _call_connection_lost(self, exc): - super()._call_connection_lost(exc) - if self._empty_waiter is not None: - self._empty_waiter.set_exception( - ConnectionError("Connection is closed by peer")) + try: + super()._call_connection_lost(exc) + finally: + self._write_ready = None + if self._empty_waiter is not None: + self._empty_waiter.set_exception( + ConnectionError("Connection is closed by peer")) def _make_empty_waiter(self): if self._empty_waiter is not None: @@ -1206,13 +1209,13 @@ def _reset_empty_waiter(self): def close(self): self._read_ready_cb = None - self._write_ready = None super().close() class _SelectorDatagramTransport(_SelectorTransport, transports.DatagramTransport): _buffer_factory = collections.deque + _header_size = 8 def __init__(self, loop, sock, protocol, address=None, waiter=None, extra=None): @@ -1251,8 +1254,6 @@ def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError(f'data argument must be a bytes-like object, ' f'not {type(data).__name__!r}') - if not data: - return if self._address: if addr not in (None, self._address): @@ -1288,13 +1289,13 @@ def sendto(self, data, addr=None): # Ensure that what we buffer is immutable. self._buffer.append((bytes(data), addr)) - self._buffer_size += len(data) + self._buffer_size += len(data) + self._header_size self._maybe_pause_protocol() def _sendto_ready(self): while self._buffer: data, addr = self._buffer.popleft() - self._buffer_size -= len(data) + self._buffer_size -= len(data) + self._header_size try: if self._extra['peername']: self._sock.send(data) @@ -1302,7 +1303,7 @@ def _sendto_ready(self): self._sock.sendto(data, addr) except (BlockingIOError, InterruptedError): self._buffer.appendleft((data, addr)) # Try again later. - self._buffer_size += len(data) + self._buffer_size += len(data) + self._header_size break except OSError as exc: self._protocol.error_received(exc) diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py index e51669a2ab2..74c5f0d5ca0 100644 --- a/Lib/asyncio/sslproto.py +++ b/Lib/asyncio/sslproto.py @@ -101,7 +101,7 @@ def get_protocol(self): return self._ssl_protocol._app_protocol def is_closing(self): - return self._closed + return self._closed or self._ssl_protocol._is_transport_closing() def close(self): """Close the transport. @@ -379,6 +379,9 @@ def _get_app_transport(self): self._app_transport_created = True return self._app_transport + def _is_transport_closing(self): + return self._transport is not None and self._transport.is_closing() + def connection_made(self, transport): """Called when the low-level connection is made. @@ -542,7 +545,7 @@ def _start_handshake(self): # start handshake timeout count down self._handshake_timeout_handle = \ self._loop.call_later(self._ssl_handshake_timeout, - lambda: self._check_handshake_timeout()) + self._check_handshake_timeout) self._do_handshake() @@ -623,7 +626,7 @@ def _start_shutdown(self): self._set_state(SSLProtocolState.FLUSHING) self._shutdown_timeout_handle = self._loop.call_later( self._ssl_shutdown_timeout, - lambda: self._check_shutdown_timeout() + self._check_shutdown_timeout ) self._do_flush() @@ -762,7 +765,7 @@ def _do_read__buffered(self): else: break else: - self._loop.call_soon(lambda: self._do_read()) + self._loop.call_soon(self._do_read) except SSLAgainErrors: pass if offset > 0: diff --git a/Lib/asyncio/staggered.py b/Lib/asyncio/staggered.py index 451a53a16f3..0afed64fdf9 100644 --- a/Lib/asyncio/staggered.py +++ b/Lib/asyncio/staggered.py @@ -3,7 +3,6 @@ __all__ = 'staggered_race', import contextlib -import typing from . import events from . import exceptions as exceptions_mod @@ -11,16 +10,7 @@ from . import tasks -async def staggered_race( - coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]], - delay: typing.Optional[float], - *, - loop: events.AbstractEventLoop = None, -) -> typing.Tuple[ - typing.Any, - typing.Optional[int], - typing.List[typing.Optional[Exception]] -]: +async def staggered_race(coro_fns, delay, *, loop=None): """Run coroutines with staggered start times and take the first to finish. This method takes an iterable of coroutine functions. The first one is @@ -76,11 +66,33 @@ async def staggered_race( enum_coro_fns = enumerate(coro_fns) winner_result = None winner_index = None + unhandled_exceptions = [] exceptions = [] - running_tasks = [] + running_tasks = set() + on_completed_fut = None + + def task_done(task): + running_tasks.discard(task) + if ( + on_completed_fut is not None + and not on_completed_fut.done() + and not running_tasks + ): + on_completed_fut.set_result(None) + + if task.cancelled(): + return + + exc = task.exception() + if exc is None: + return + unhandled_exceptions.append(exc) - async def run_one_coro( - previous_failed: typing.Optional[locks.Event]) -> None: + async def run_one_coro(ok_to_start, previous_failed) -> None: + # in eager tasks this waits for the calling task to append this task + # to running_tasks, in regular tasks this wait is a no-op that does + # not yield a future. See gh-124309. + await ok_to_start.wait() # Wait for the previous task to finish, or for delay seconds if previous_failed is not None: with contextlib.suppress(exceptions_mod.TimeoutError): @@ -96,9 +108,13 @@ async def run_one_coro( return # Start task that will run the next coroutine this_failed = locks.Event() - next_task = loop.create_task(run_one_coro(this_failed)) - running_tasks.append(next_task) - assert len(running_tasks) == this_index + 2 + next_ok_to_start = locks.Event() + next_task = loop.create_task(run_one_coro(next_ok_to_start, this_failed)) + running_tasks.add(next_task) + next_task.add_done_callback(task_done) + # next_task has been appended to running_tasks so next_task is ok to + # start. + next_ok_to_start.set() # Prepare place to put this coroutine's exceptions if not won exceptions.append(None) assert len(exceptions) == this_index + 1 @@ -123,27 +139,36 @@ async def run_one_coro( # up as done() == True, cancelled() == False, exception() == # asyncio.CancelledError. This behavior is specified in # https://bugs.python.org/issue30048 - for i, t in enumerate(running_tasks): - if i != this_index: + current_task = tasks.current_task(loop) + for t in running_tasks: + if t is not current_task: t.cancel() - first_task = loop.create_task(run_one_coro(None)) - running_tasks.append(first_task) + propagate_cancellation_error = None try: - # Wait for a growing list of tasks to all finish: poor man's version of - # curio's TaskGroup or trio's nursery - done_count = 0 - while done_count != len(running_tasks): - done, _ = await tasks.wait(running_tasks) - done_count = len(done) + ok_to_start = locks.Event() + first_task = loop.create_task(run_one_coro(ok_to_start, None)) + running_tasks.add(first_task) + first_task.add_done_callback(task_done) + # first_task has been appended to running_tasks so first_task is ok to start. + ok_to_start.set() + propagate_cancellation_error = None + # Make sure no tasks are left running if we leave this function + while running_tasks: + on_completed_fut = loop.create_future() + try: + await on_completed_fut + except exceptions_mod.CancelledError as ex: + propagate_cancellation_error = ex + for task in running_tasks: + task.cancel(*ex.args) + on_completed_fut = None + if __debug__ and unhandled_exceptions: # If run_one_coro raises an unhandled exception, it's probably a # programming error, and I want to see it. - if __debug__: - for d in done: - if d.done() and not d.cancelled() and d.exception(): - raise d.exception() + raise ExceptionGroup("staggered race failed", unhandled_exceptions) + if propagate_cancellation_error is not None: + raise propagate_cancellation_error return winner_result, winner_index, exceptions finally: - # Make sure no tasks are left running if we leave this function - for t in running_tasks: - t.cancel() + del exceptions, propagate_cancellation_error, unhandled_exceptions diff --git a/Lib/asyncio/streams.py b/Lib/asyncio/streams.py index f310aa2f367..64aac4cc50d 100644 --- a/Lib/asyncio/streams.py +++ b/Lib/asyncio/streams.py @@ -201,7 +201,6 @@ def __init__(self, stream_reader, client_connected_cb=None, loop=None): # is established. self._strong_reader = stream_reader self._reject_connection = False - self._stream_writer = None self._task = None self._transport = None self._client_connected_cb = client_connected_cb @@ -214,10 +213,8 @@ def _stream_reader(self): return None return self._stream_reader_wr() - def _replace_writer(self, writer): + def _replace_transport(self, transport): loop = self._loop - transport = writer.transport - self._stream_writer = writer self._transport = transport self._over_ssl = transport.get_extra_info('sslcontext') is not None @@ -239,11 +236,8 @@ def connection_made(self, transport): reader.set_transport(transport) self._over_ssl = transport.get_extra_info('sslcontext') is not None if self._client_connected_cb is not None: - self._stream_writer = StreamWriter(transport, self, - reader, - self._loop) - res = self._client_connected_cb(reader, - self._stream_writer) + writer = StreamWriter(transport, self, reader, self._loop) + res = self._client_connected_cb(reader, writer) if coroutines.iscoroutine(res): def callback(task): if task.cancelled(): @@ -405,9 +399,9 @@ async def start_tls(self, sslcontext, *, ssl_handshake_timeout=ssl_handshake_timeout, ssl_shutdown_timeout=ssl_shutdown_timeout) self._transport = new_transport - protocol._replace_writer(self) + protocol._replace_transport(new_transport) - def __del__(self): + def __del__(self, warnings=warnings): if not self._transport.is_closing(): if self._loop.is_closed(): warnings.warn("loop is closed", ResourceWarning) @@ -596,20 +590,34 @@ async def readuntil(self, separator=b'\n'): If the data cannot be read because of over limit, a LimitOverrunError exception will be raised, and the data will be left in the internal buffer, so it can be read again. + + The ``separator`` may also be a tuple of separators. In this + case the return value will be the shortest possible that has any + separator as the suffix. For the purposes of LimitOverrunError, + the shortest possible separator is considered to be the one that + matched. """ - seplen = len(separator) - if seplen == 0: + if isinstance(separator, tuple): + # Makes sure shortest matches wins + separator = sorted(separator, key=len) + else: + separator = [separator] + if not separator: + raise ValueError('Separator should contain at least one element') + min_seplen = len(separator[0]) + max_seplen = len(separator[-1]) + if min_seplen == 0: raise ValueError('Separator should be at least one-byte string') if self._exception is not None: raise self._exception # Consume whole buffer except last bytes, which length is - # one less than seplen. Let's check corner cases with - # separator='SEPARATOR': + # one less than max_seplen. Let's check corner cases with + # separator[-1]='SEPARATOR': # * we have received almost complete separator (without last # byte). i.e buffer='some textSEPARATO'. In this case we - # can safely consume len(separator) - 1 bytes. + # can safely consume max_seplen - 1 bytes. # * last byte of buffer is first byte of separator, i.e. # buffer='abcdefghijklmnopqrS'. We may safely consume # everything except that last byte, but this require to @@ -622,26 +630,35 @@ async def readuntil(self, separator=b'\n'): # messages :) # `offset` is the number of bytes from the beginning of the buffer - # where there is no occurrence of `separator`. + # where there is no occurrence of any `separator`. offset = 0 - # Loop until we find `separator` in the buffer, exceed the buffer size, + # Loop until we find a `separator` in the buffer, exceed the buffer size, # or an EOF has happened. while True: buflen = len(self._buffer) - # Check if we now have enough data in the buffer for `separator` to - # fit. - if buflen - offset >= seplen: - isep = self._buffer.find(separator, offset) - - if isep != -1: - # `separator` is in the buffer. `isep` will be used later - # to retrieve the data. + # Check if we now have enough data in the buffer for shortest + # separator to fit. + if buflen - offset >= min_seplen: + match_start = None + match_end = None + for sep in separator: + isep = self._buffer.find(sep, offset) + + if isep != -1: + # `separator` is in the buffer. `match_start` and + # `match_end` will be used later to retrieve the + # data. + end = isep + len(sep) + if match_end is None or end < match_end: + match_end = end + match_start = isep + if match_end is not None: break # see upper comment for explanation. - offset = buflen + 1 - seplen + offset = max(0, buflen + 1 - max_seplen) if offset > self._limit: raise exceptions.LimitOverrunError( 'Separator is not found, and chunk exceed the limit', @@ -650,7 +667,7 @@ async def readuntil(self, separator=b'\n'): # Complete message (with full separator) may be present in buffer # even when EOF flag is set. This may happen when the last chunk # adds data which makes separator be found. That's why we check for - # EOF *ater* inspecting the buffer. + # EOF *after* inspecting the buffer. if self._eof: chunk = bytes(self._buffer) self._buffer.clear() @@ -659,12 +676,12 @@ async def readuntil(self, separator=b'\n'): # _wait_for_data() will resume reading if stream was paused. await self._wait_for_data('readuntil') - if isep > self._limit: + if match_start > self._limit: raise exceptions.LimitOverrunError( - 'Separator is found, but chunk is longer than limit', isep) + 'Separator is found, but chunk is longer than limit', match_start) - chunk = self._buffer[:isep + seplen] - del self._buffer[:isep + seplen] + chunk = self._buffer[:match_end] + del self._buffer[:match_end] self._maybe_resume_transport() return bytes(chunk) diff --git a/Lib/asyncio/taskgroups.py b/Lib/asyncio/taskgroups.py index d264e51f1fd..8fda6c8d55e 100644 --- a/Lib/asyncio/taskgroups.py +++ b/Lib/asyncio/taskgroups.py @@ -66,6 +66,20 @@ async def __aenter__(self): return self async def __aexit__(self, et, exc, tb): + tb = None + try: + return await self._aexit(et, exc) + finally: + # Exceptions are heavy objects that can have object + # cycles (bad for GC); let's not keep a reference to + # a bunch of them. It would be nicer to use a try/finally + # in __aexit__ directly but that introduced some diff noise + self._parent_task = None + self._errors = None + self._base_error = None + exc = None + + async def _aexit(self, et, exc): self._exiting = True if (exc is not None and @@ -73,14 +87,10 @@ async def __aexit__(self, et, exc, tb): self._base_error is None): self._base_error = exc - propagate_cancellation_error = \ - exc if et is exceptions.CancelledError else None - if self._parent_cancel_requested: - # If this flag is set we *must* call uncancel(). - if self._parent_task.uncancel() == 0: - # If there are no pending cancellations left, - # don't propagate CancelledError. - propagate_cancellation_error = None + if et is not None and issubclass(et, exceptions.CancelledError): + propagate_cancellation_error = exc + else: + propagate_cancellation_error = None if et is not None: if not self._aborting: @@ -126,25 +136,47 @@ async def __aexit__(self, et, exc, tb): assert not self._tasks if self._base_error is not None: - raise self._base_error + try: + raise self._base_error + finally: + exc = None + + if self._parent_cancel_requested: + # If this flag is set we *must* call uncancel(). + if self._parent_task.uncancel() == 0: + # If there are no pending cancellations left, + # don't propagate CancelledError. + propagate_cancellation_error = None # Propagate CancelledError if there is one, except if there # are other errors -- those have priority. - if propagate_cancellation_error and not self._errors: - raise propagate_cancellation_error - - if et is not None and et is not exceptions.CancelledError: + try: + if propagate_cancellation_error is not None and not self._errors: + try: + raise propagate_cancellation_error + finally: + exc = None + finally: + propagate_cancellation_error = None + + if et is not None and not issubclass(et, exceptions.CancelledError): self._errors.append(exc) if self._errors: - # Exceptions are heavy objects that can have object - # cycles (bad for GC); let's not keep a reference to - # a bunch of them. + # If the parent task is being cancelled from the outside + # of the taskgroup, un-cancel and re-cancel the parent task, + # which will keep the cancel count stable. + if self._parent_task.cancelling(): + self._parent_task.uncancel() + self._parent_task.cancel() try: - me = BaseExceptionGroup('unhandled errors in a TaskGroup', self._errors) - raise me from None + raise BaseExceptionGroup( + 'unhandled errors in a TaskGroup', + self._errors, + ) from None finally: - self._errors = None + exc = None + def create_task(self, coro, *, name=None, context=None): """Create a new task in this group and return it. @@ -152,25 +184,31 @@ def create_task(self, coro, *, name=None, context=None): Similar to `asyncio.create_task`. """ if not self._entered: + coro.close() raise RuntimeError(f"TaskGroup {self!r} has not been entered") if self._exiting and not self._tasks: + coro.close() raise RuntimeError(f"TaskGroup {self!r} is finished") if self._aborting: + coro.close() raise RuntimeError(f"TaskGroup {self!r} is shutting down") if context is None: - task = self._loop.create_task(coro) + task = self._loop.create_task(coro, name=name) else: - task = self._loop.create_task(coro, context=context) - tasks._set_task_name(task, name) - # optimization: Immediately call the done callback if the task is + task = self._loop.create_task(coro, name=name, context=context) + + # Always schedule the done callback even if the task is # already done (e.g. if the coro was able to complete eagerly), - # and skip scheduling a done callback - if task.done(): - self._on_task_done(task) - else: - self._tasks.add(task) - task.add_done_callback(self._on_task_done) - return task + # otherwise if the task completes with an exception then it will cancel + # the current task too early. gh-128550, gh-128588 + self._tasks.add(task) + task.add_done_callback(self._on_task_done) + try: + return task + finally: + # gh-128552: prevent a refcycle of + # task.exception().__traceback__->TaskGroup.create_task->task + del task # Since Python 3.8 Tasks propagate all exceptions correctly, # except for KeyboardInterrupt and SystemExit which are diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py index 0b22e28d8e0..dadcb5b5f36 100644 --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -15,8 +15,8 @@ import functools import inspect import itertools +import math import types -import warnings import weakref from types import GenericAlias @@ -25,6 +25,7 @@ from . import events from . import exceptions from . import futures +from . import queues from . import timeouts # Helper to generate new task names @@ -67,19 +68,6 @@ def all_tasks(loop=None): if futures._get_loop(t) is loop and not t.done()} -def _set_task_name(task, name): - if name is not None: - try: - set_name = task.set_name - except AttributeError: - warnings.warn("Task.set_name() was added in Python 3.8, " - "the method support will be mandatory for third-party " - "task implementations since 3.13.", - DeprecationWarning, stacklevel=3) - else: - set_name(name) - - class Task(futures._PyFuture): # Inherit Python Task implementation # from a Python Future implementation. @@ -267,6 +255,8 @@ def uncancel(self): """ if self._num_cancels_requested > 0: self._num_cancels_requested -= 1 + if self._num_cancels_requested == 0: + self._must_cancel = False return self._num_cancels_requested def __eager_start(self): @@ -417,11 +407,10 @@ def create_task(coro, *, name=None, context=None): loop = events.get_running_loop() if context is None: # Use legacy API if context is not needed - task = loop.create_task(coro) + task = loop.create_task(coro, name=name) else: - task = loop.create_task(coro, context=context) + task = loop.create_task(coro, name=name, context=context) - _set_task_name(task, name) return task @@ -437,8 +426,6 @@ async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED): The fs iterable must not be empty. - Coroutines will be wrapped in Tasks. - Returns two sets of Future: (done, pending). Usage: @@ -580,62 +567,125 @@ async def _cancel_and_wait(fut): fut.remove_done_callback(cb) -# This is *not* a @coroutine! It is just an iterator (yielding Futures). +class _AsCompletedIterator: + """Iterator of awaitables representing tasks of asyncio.as_completed. + + As an asynchronous iterator, iteration yields futures as they finish. As a + plain iterator, new coroutines are yielded that will return or raise the + result of the next underlying future to complete. + """ + def __init__(self, aws, timeout): + self._done = queues.Queue() + self._timeout_handle = None + + loop = events.get_event_loop() + todo = {ensure_future(aw, loop=loop) for aw in set(aws)} + for f in todo: + f.add_done_callback(self._handle_completion) + if todo and timeout is not None: + self._timeout_handle = ( + loop.call_later(timeout, self._handle_timeout) + ) + self._todo = todo + self._todo_left = len(todo) + + def __aiter__(self): + return self + + def __iter__(self): + return self + + async def __anext__(self): + if not self._todo_left: + raise StopAsyncIteration + assert self._todo_left > 0 + self._todo_left -= 1 + return await self._wait_for_one() + + def __next__(self): + if not self._todo_left: + raise StopIteration + assert self._todo_left > 0 + self._todo_left -= 1 + return self._wait_for_one(resolve=True) + + def _handle_timeout(self): + for f in self._todo: + f.remove_done_callback(self._handle_completion) + self._done.put_nowait(None) # Sentinel for _wait_for_one(). + self._todo.clear() # Can't do todo.remove(f) in the loop. + + def _handle_completion(self, f): + if not self._todo: + return # _handle_timeout() was here first. + self._todo.remove(f) + self._done.put_nowait(f) + if not self._todo and self._timeout_handle is not None: + self._timeout_handle.cancel() + + async def _wait_for_one(self, resolve=False): + # Wait for the next future to be done and return it unless resolve is + # set, in which case return either the result of the future or raise + # an exception. + f = await self._done.get() + if f is None: + # Dummy value from _handle_timeout(). + raise exceptions.TimeoutError + return f.result() if resolve else f + + def as_completed(fs, *, timeout=None): - """Return an iterator whose values are coroutines. + """Create an iterator of awaitables or their results in completion order. - When waiting for the yielded coroutines you'll get the results (or - exceptions!) of the original Futures (or coroutines), in the order - in which and as soon as they complete. + Run the supplied awaitables concurrently. The returned object can be + iterated to obtain the results of the awaitables as they finish. - This differs from PEP 3148; the proper way to use this is: + The object returned can be iterated as an asynchronous iterator or a plain + iterator. When asynchronous iteration is used, the originally-supplied + awaitables are yielded if they are tasks or futures. This makes it easy to + correlate previously-scheduled tasks with their results: - for f in as_completed(fs): - result = await f # The 'await' may raise. - # Use result. + ipv4_connect = create_task(open_connection("127.0.0.1", 80)) + ipv6_connect = create_task(open_connection("::1", 80)) + tasks = [ipv4_connect, ipv6_connect] - If a timeout is specified, the 'await' will raise - TimeoutError when the timeout occurs before all Futures are done. + async for earliest_connect in as_completed(tasks): + # earliest_connect is done. The result can be obtained by + # awaiting it or calling earliest_connect.result() + reader, writer = await earliest_connect - Note: The futures 'f' are not necessarily members of fs. - """ - if futures.isfuture(fs) or coroutines.iscoroutine(fs): - raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}") + if earliest_connect is ipv6_connect: + print("IPv6 connection established.") + else: + print("IPv4 connection established.") - from .queues import Queue # Import here to avoid circular import problem. - done = Queue() + During asynchronous iteration, implicitly-created tasks will be yielded for + supplied awaitables that aren't tasks or futures. - loop = events.get_event_loop() - todo = {ensure_future(f, loop=loop) for f in set(fs)} - timeout_handle = None + When used as a plain iterator, each iteration yields a new coroutine that + returns the result or raises the exception of the next completed awaitable. + This pattern is compatible with Python versions older than 3.13: - def _on_timeout(): - for f in todo: - f.remove_done_callback(_on_completion) - done.put_nowait(None) # Queue a dummy value for _wait_for_one(). - todo.clear() # Can't do todo.remove(f) in the loop. + ipv4_connect = create_task(open_connection("127.0.0.1", 80)) + ipv6_connect = create_task(open_connection("::1", 80)) + tasks = [ipv4_connect, ipv6_connect] - def _on_completion(f): - if not todo: - return # _on_timeout() was here first. - todo.remove(f) - done.put_nowait(f) - if not todo and timeout_handle is not None: - timeout_handle.cancel() + for next_connect in as_completed(tasks): + # next_connect is not one of the original task objects. It must be + # awaited to obtain the result value or raise the exception of the + # awaitable that finishes next. + reader, writer = await next_connect - async def _wait_for_one(): - f = await done.get() - if f is None: - # Dummy value from _on_timeout(). - raise exceptions.TimeoutError - return f.result() # May raise f.exception(). + A TimeoutError is raised if the timeout occurs before all awaitables are + done. This is raised by the async for loop during asynchronous iteration or + by the coroutines yielded during plain iteration. + """ + if inspect.isawaitable(fs): + raise TypeError( + f"expects an iterable of awaitables, not {type(fs).__name__}" + ) - for f in todo: - f.add_done_callback(_on_completion) - if todo and timeout is not None: - timeout_handle = loop.call_later(timeout, _on_timeout) - for _ in range(len(todo)): - yield _wait_for_one() + return _AsCompletedIterator(fs, timeout) @types.coroutine @@ -656,6 +706,9 @@ async def sleep(delay, result=None): await __sleep0() return result + if math.isnan(delay): + raise ValueError("Invalid delay: NaN (not a number)") + loop = events.get_running_loop() future = loop.create_future() h = loop.call_later(delay, diff --git a/Lib/asyncio/timeouts.py b/Lib/asyncio/timeouts.py index 30042abb3ad..e6f5100691d 100644 --- a/Lib/asyncio/timeouts.py +++ b/Lib/asyncio/timeouts.py @@ -109,10 +109,16 @@ async def __aexit__( if self._state is _State.EXPIRING: self._state = _State.EXPIRED - if self._task.uncancel() <= self._cancelling and exc_type is exceptions.CancelledError: + if self._task.uncancel() <= self._cancelling and exc_type is not None: # Since there are no new cancel requests, we're # handling this. - raise TimeoutError from exc_val + if issubclass(exc_type, exceptions.CancelledError): + raise TimeoutError from exc_val + elif exc_val is not None: + self._insert_timeout_error(exc_val) + if isinstance(exc_val, ExceptionGroup): + for exc in exc_val.exceptions: + self._insert_timeout_error(exc) elif self._state is _State.ENTERED: self._state = _State.EXITED @@ -125,6 +131,16 @@ def _on_timeout(self) -> None: # drop the reference early self._timeout_handler = None + @staticmethod + def _insert_timeout_error(exc_val: BaseException) -> None: + while exc_val.__context__ is not None: + if isinstance(exc_val.__context__, exceptions.CancelledError): + te = TimeoutError() + te.__context__ = te.__cause__ = exc_val.__context__ + exc_val.__context__ = te + break + exc_val = exc_val.__context__ + def timeout(delay: Optional[float]) -> Timeout: """Timeout async context manager. diff --git a/Lib/asyncio/transports.py b/Lib/asyncio/transports.py index 30fd41d49af..34c7ad44ffd 100644 --- a/Lib/asyncio/transports.py +++ b/Lib/asyncio/transports.py @@ -181,6 +181,8 @@ def sendto(self, data, addr=None): to be sent out asynchronously. addr is target socket address. If addr is None use target address pointed on transport creation. + If data is an empty bytes object a zero-length datagram will be + sent. """ raise NotImplementedError diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py index f2e920ada46..41ccf1b78fb 100644 --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -32,6 +32,7 @@ 'FastChildWatcher', 'PidfdChildWatcher', 'MultiLoopChildWatcher', 'ThreadedChildWatcher', 'DefaultEventLoopPolicy', + 'EventLoop', ) @@ -63,6 +64,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): def __init__(self, selector=None): super().__init__(selector) self._signal_handlers = {} + self._unix_server_sockets = {} def close(self): super().close() @@ -283,7 +285,7 @@ async def create_unix_server( sock=None, backlog=100, ssl=None, ssl_handshake_timeout=None, ssl_shutdown_timeout=None, - start_serving=True): + start_serving=True, cleanup_socket=True): if isinstance(ssl, bool): raise TypeError('ssl argument must be an SSLContext or None') @@ -339,6 +341,15 @@ async def create_unix_server( raise ValueError( f'A UNIX Domain Stream Socket was expected, got {sock!r}') + if cleanup_socket: + path = sock.getsockname() + # Check for abstract socket. `str` and `bytes` paths are supported. + if path[0] not in (0, '\x00'): + try: + self._unix_server_sockets[sock] = os.stat(path).st_ino + except FileNotFoundError: + pass + sock.setblocking(False) server = base_events.Server(self, [sock], protocol_factory, ssl, backlog, ssl_handshake_timeout, @@ -393,6 +404,9 @@ def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno, fut.set_result(total_sent) return + # On 32-bit architectures truncate to 1GiB to avoid OverflowError + blocksize = min(blocksize, sys.maxsize//2 + 1) + try: sent = os.sendfile(fd, fileno, offset, blocksize) except (BlockingIOError, InterruptedError): @@ -456,6 +470,27 @@ def cb(fut): self.remove_writer(fd) fut.add_done_callback(cb) + def _stop_serving(self, sock): + # Is this a unix socket that needs cleanup? + if sock in self._unix_server_sockets: + path = sock.getsockname() + else: + path = None + + super()._stop_serving(sock) + + if path is not None: + prev_ino = self._unix_server_sockets[sock] + del self._unix_server_sockets[sock] + try: + if os.stat(path).st_ino == prev_ino: + os.unlink(path) + except FileNotFoundError: + pass + except OSError as err: + logger.error('Unable to clean up listening UNIX socket ' + '%r: %r', path, err) + class _UnixReadPipeTransport(transports.ReadTransport): @@ -1498,3 +1533,4 @@ def set_child_watcher(self, watcher): SelectorEventLoop = _UnixSelectorEventLoop DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy +EventLoop = SelectorEventLoop diff --git a/Lib/asyncio/windows_events.py b/Lib/asyncio/windows_events.py index cb613451a58..bf99bc271c7 100644 --- a/Lib/asyncio/windows_events.py +++ b/Lib/asyncio/windows_events.py @@ -30,7 +30,7 @@ __all__ = ( 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy', - 'WindowsProactorEventLoopPolicy', + 'WindowsProactorEventLoopPolicy', 'EventLoop', ) @@ -315,24 +315,25 @@ def __init__(self, proactor=None): proactor = IocpProactor() super().__init__(proactor) - def run_forever(self): - try: - assert self._self_reading_future is None - self.call_soon(self._loop_self_reading) - super().run_forever() - finally: - if self._self_reading_future is not None: - ov = self._self_reading_future._ov - self._self_reading_future.cancel() - # self_reading_future always uses IOCP, so even though it's - # been cancelled, we need to make sure that the IOCP message - # is received so that the kernel is not holding on to the - # memory, possibly causing memory corruption later. Only - # unregister it if IO is complete in all respects. Otherwise - # we need another _poll() later to complete the IO. - if ov is not None and not ov.pending: - self._proactor._unregister(ov) - self._self_reading_future = None + def _run_forever_setup(self): + assert self._self_reading_future is None + self.call_soon(self._loop_self_reading) + super()._run_forever_setup() + + def _run_forever_cleanup(self): + super()._run_forever_cleanup() + if self._self_reading_future is not None: + ov = self._self_reading_future._ov + self._self_reading_future.cancel() + # self_reading_future always uses IOCP, so even though it's + # been cancelled, we need to make sure that the IOCP message + # is received so that the kernel is not holding on to the + # memory, possibly causing memory corruption later. Only + # unregister it if IO is complete in all respects. Otherwise + # we need another _poll() later to complete the IO. + if ov is not None and not ov.pending: + self._proactor._unregister(ov) + self._self_reading_future = None async def create_pipe_connection(self, protocol_factory, address): f = self._proactor.connect_pipe(address) @@ -899,3 +900,4 @@ class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy +EventLoop = ProactorEventLoop From 32d8e6c1f8e2ea2236ac82c4f5e4c0d76ec555d1 Mon Sep 17 00:00:00 2001 From: Terry Luan Date: Tue, 6 Jan 2026 21:28:14 -0500 Subject: [PATCH 2/2] Removed expectedFailure from `test_async_case.py` --- Lib/test/test_unittest/test_async_case.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Lib/test/test_unittest/test_async_case.py b/Lib/test/test_unittest/test_async_case.py index f77a5888eeb..8f73f466379 100644 --- a/Lib/test/test_unittest/test_async_case.py +++ b/Lib/test/test_unittest/test_async_case.py @@ -486,7 +486,6 @@ async def test_demo1(self): result = test.run() self.assertTrue(result.wasSuccessful()) - @unittest.expectedFailure # TODO: RUSTPYTHON def test_loop_factory(self): asyncio.set_event_loop_policy(None)