| 1 | n/a | """Base implementation of event loop. |
|---|
| 2 | n/a | |
|---|
| 3 | n/a | The event loop can be broken up into a multiplexer (the part |
|---|
| 4 | n/a | responsible for notifying us of I/O events) and the event loop proper, |
|---|
| 5 | n/a | which wraps a multiplexer with functionality for scheduling callbacks, |
|---|
| 6 | n/a | immediately or at a given time in the future. |
|---|
| 7 | n/a | |
|---|
| 8 | n/a | Whenever a public API takes a callback, subsequent positional |
|---|
| 9 | n/a | arguments will be passed to the callback if/when it is called. This |
|---|
| 10 | n/a | avoids the proliferation of trivial lambdas implementing closures. |
|---|
| 11 | n/a | Keyword arguments for the callback are not supported; this is a |
|---|
| 12 | n/a | conscious design decision, leaving the door open for keyword arguments |
|---|
| 13 | n/a | to modify the meaning of the API call itself. |
|---|
| 14 | n/a | """ |
|---|
| 15 | n/a | |
|---|
| 16 | n/a | import collections |
|---|
| 17 | n/a | import concurrent.futures |
|---|
| 18 | n/a | import heapq |
|---|
| 19 | n/a | import itertools |
|---|
| 20 | n/a | import logging |
|---|
| 21 | n/a | import os |
|---|
| 22 | n/a | import socket |
|---|
| 23 | n/a | import subprocess |
|---|
| 24 | n/a | import threading |
|---|
| 25 | n/a | import time |
|---|
| 26 | n/a | import traceback |
|---|
| 27 | n/a | import sys |
|---|
| 28 | n/a | import warnings |
|---|
| 29 | n/a | import weakref |
|---|
| 30 | n/a | |
|---|
| 31 | n/a | from . import compat |
|---|
| 32 | n/a | from . import coroutines |
|---|
| 33 | n/a | from . import events |
|---|
| 34 | n/a | from . import futures |
|---|
| 35 | n/a | from . import tasks |
|---|
| 36 | n/a | from .coroutines import coroutine |
|---|
| 37 | n/a | from .log import logger |
|---|
| 38 | n/a | |
|---|
| 39 | n/a | |
|---|
| 40 | n/a | __all__ = ['BaseEventLoop'] |
|---|
| 41 | n/a | |
|---|
| 42 | n/a | |
|---|
| 43 | n/a | # Minimum number of _scheduled timer handles before cleanup of |
|---|
| 44 | n/a | # cancelled handles is performed. |
|---|
| 45 | n/a | _MIN_SCHEDULED_TIMER_HANDLES = 100 |
|---|
| 46 | n/a | |
|---|
| 47 | n/a | # Minimum fraction of _scheduled timer handles that are cancelled |
|---|
| 48 | n/a | # before cleanup of cancelled handles is performed. |
|---|
| 49 | n/a | _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 |
|---|
| 50 | n/a | |
|---|
| 51 | n/a | # Exceptions which must not call the exception handler in fatal error |
|---|
| 52 | n/a | # methods (_fatal_error()) |
|---|
| 53 | n/a | _FATAL_ERROR_IGNORE = (BrokenPipeError, |
|---|
| 54 | n/a | ConnectionResetError, ConnectionAbortedError) |
|---|
| 55 | n/a | |
|---|
| 56 | n/a | |
|---|
| 57 | n/a | def _format_handle(handle): |
|---|
| 58 | n/a | cb = handle._callback |
|---|
| 59 | n/a | if isinstance(getattr(cb, '__self__', None), tasks.Task): |
|---|
| 60 | n/a | # format the task |
|---|
| 61 | n/a | return repr(cb.__self__) |
|---|
| 62 | n/a | else: |
|---|
| 63 | n/a | return str(handle) |
|---|
| 64 | n/a | |
|---|
| 65 | n/a | |
|---|
| 66 | n/a | def _format_pipe(fd): |
|---|
| 67 | n/a | if fd == subprocess.PIPE: |
|---|
| 68 | n/a | return '<pipe>' |
|---|
| 69 | n/a | elif fd == subprocess.STDOUT: |
|---|
| 70 | n/a | return '<stdout>' |
|---|
| 71 | n/a | else: |
|---|
| 72 | n/a | return repr(fd) |
|---|
| 73 | n/a | |
|---|
| 74 | n/a | |
|---|
| 75 | n/a | def _set_reuseport(sock): |
|---|
| 76 | n/a | if not hasattr(socket, 'SO_REUSEPORT'): |
|---|
| 77 | n/a | raise ValueError('reuse_port not supported by socket module') |
|---|
| 78 | n/a | else: |
|---|
| 79 | n/a | try: |
|---|
| 80 | n/a | sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) |
|---|
| 81 | n/a | except OSError: |
|---|
| 82 | n/a | raise ValueError('reuse_port not supported by socket module, ' |
|---|
| 83 | n/a | 'SO_REUSEPORT defined but not implemented.') |
|---|
| 84 | n/a | |
|---|
| 85 | n/a | |
|---|
| 86 | n/a | def _is_stream_socket(sock): |
|---|
| 87 | n/a | # Linux's socket.type is a bitmask that can include extra info |
|---|
| 88 | n/a | # about socket, therefore we can't do simple |
|---|
| 89 | n/a | # `sock_type == socket.SOCK_STREAM`. |
|---|
| 90 | n/a | return (sock.type & socket.SOCK_STREAM) == socket.SOCK_STREAM |
|---|
| 91 | n/a | |
|---|
| 92 | n/a | |
|---|
| 93 | n/a | def _is_dgram_socket(sock): |
|---|
| 94 | n/a | # Linux's socket.type is a bitmask that can include extra info |
|---|
| 95 | n/a | # about socket, therefore we can't do simple |
|---|
| 96 | n/a | # `sock_type == socket.SOCK_DGRAM`. |
|---|
| 97 | n/a | return (sock.type & socket.SOCK_DGRAM) == socket.SOCK_DGRAM |
|---|
| 98 | n/a | |
|---|
| 99 | n/a | |
|---|
| 100 | n/a | def _ipaddr_info(host, port, family, type, proto): |
|---|
| 101 | n/a | # Try to skip getaddrinfo if "host" is already an IP. Users might have |
|---|
| 102 | n/a | # handled name resolution in their own code and pass in resolved IPs. |
|---|
| 103 | n/a | if not hasattr(socket, 'inet_pton'): |
|---|
| 104 | n/a | return |
|---|
| 105 | n/a | |
|---|
| 106 | n/a | if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ |
|---|
| 107 | n/a | host is None: |
|---|
| 108 | n/a | return None |
|---|
| 109 | n/a | |
|---|
| 110 | n/a | if type == socket.SOCK_STREAM: |
|---|
| 111 | n/a | # Linux only: |
|---|
| 112 | n/a | # getaddrinfo() can raise when socket.type is a bit mask. |
|---|
| 113 | n/a | # So if socket.type is a bit mask of SOCK_STREAM, and say |
|---|
| 114 | n/a | # SOCK_NONBLOCK, we simply return None, which will trigger |
|---|
| 115 | n/a | # a call to getaddrinfo() letting it process this request. |
|---|
| 116 | n/a | proto = socket.IPPROTO_TCP |
|---|
| 117 | n/a | elif type == socket.SOCK_DGRAM: |
|---|
| 118 | n/a | proto = socket.IPPROTO_UDP |
|---|
| 119 | n/a | else: |
|---|
| 120 | n/a | return None |
|---|
| 121 | n/a | |
|---|
| 122 | n/a | if port is None: |
|---|
| 123 | n/a | port = 0 |
|---|
| 124 | n/a | elif isinstance(port, bytes) and port == b'': |
|---|
| 125 | n/a | port = 0 |
|---|
| 126 | n/a | elif isinstance(port, str) and port == '': |
|---|
| 127 | n/a | port = 0 |
|---|
| 128 | n/a | else: |
|---|
| 129 | n/a | # If port's a service name like "http", don't skip getaddrinfo. |
|---|
| 130 | n/a | try: |
|---|
| 131 | n/a | port = int(port) |
|---|
| 132 | n/a | except (TypeError, ValueError): |
|---|
| 133 | n/a | return None |
|---|
| 134 | n/a | |
|---|
| 135 | n/a | if family == socket.AF_UNSPEC: |
|---|
| 136 | n/a | afs = [socket.AF_INET] |
|---|
| 137 | n/a | if hasattr(socket, 'AF_INET6'): |
|---|
| 138 | n/a | afs.append(socket.AF_INET6) |
|---|
| 139 | n/a | else: |
|---|
| 140 | n/a | afs = [family] |
|---|
| 141 | n/a | |
|---|
| 142 | n/a | if isinstance(host, bytes): |
|---|
| 143 | n/a | host = host.decode('idna') |
|---|
| 144 | n/a | if '%' in host: |
|---|
| 145 | n/a | # Linux's inet_pton doesn't accept an IPv6 zone index after host, |
|---|
| 146 | n/a | # like '::1%lo0'. |
|---|
| 147 | n/a | return None |
|---|
| 148 | n/a | |
|---|
| 149 | n/a | for af in afs: |
|---|
| 150 | n/a | try: |
|---|
| 151 | n/a | socket.inet_pton(af, host) |
|---|
| 152 | n/a | # The host has already been resolved. |
|---|
| 153 | n/a | return af, type, proto, '', (host, port) |
|---|
| 154 | n/a | except OSError: |
|---|
| 155 | n/a | pass |
|---|
| 156 | n/a | |
|---|
| 157 | n/a | # "host" is not an IP address. |
|---|
| 158 | n/a | return None |
|---|
| 159 | n/a | |
|---|
| 160 | n/a | |
|---|
| 161 | n/a | def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0, |
|---|
| 162 | n/a | flags=0, loop): |
|---|
| 163 | n/a | host, port = address[:2] |
|---|
| 164 | n/a | info = _ipaddr_info(host, port, family, type, proto) |
|---|
| 165 | n/a | if info is not None: |
|---|
| 166 | n/a | # "host" is already a resolved IP. |
|---|
| 167 | n/a | fut = loop.create_future() |
|---|
| 168 | n/a | fut.set_result([info]) |
|---|
| 169 | n/a | return fut |
|---|
| 170 | n/a | else: |
|---|
| 171 | n/a | return loop.getaddrinfo(host, port, family=family, type=type, |
|---|
| 172 | n/a | proto=proto, flags=flags) |
|---|
| 173 | n/a | |
|---|
| 174 | n/a | |
|---|
| 175 | n/a | def _run_until_complete_cb(fut): |
|---|
| 176 | n/a | exc = fut._exception |
|---|
| 177 | n/a | if (isinstance(exc, BaseException) |
|---|
| 178 | n/a | and not isinstance(exc, Exception)): |
|---|
| 179 | n/a | # Issue #22429: run_forever() already finished, no need to |
|---|
| 180 | n/a | # stop it. |
|---|
| 181 | n/a | return |
|---|
| 182 | n/a | fut._loop.stop() |
|---|
| 183 | n/a | |
|---|
| 184 | n/a | |
|---|
| 185 | n/a | class Server(events.AbstractServer): |
|---|
| 186 | n/a | |
|---|
| 187 | n/a | def __init__(self, loop, sockets): |
|---|
| 188 | n/a | self._loop = loop |
|---|
| 189 | n/a | self.sockets = sockets |
|---|
| 190 | n/a | self._active_count = 0 |
|---|
| 191 | n/a | self._waiters = [] |
|---|
| 192 | n/a | |
|---|
| 193 | n/a | def __repr__(self): |
|---|
| 194 | n/a | return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets) |
|---|
| 195 | n/a | |
|---|
| 196 | n/a | def _attach(self): |
|---|
| 197 | n/a | assert self.sockets is not None |
|---|
| 198 | n/a | self._active_count += 1 |
|---|
| 199 | n/a | |
|---|
| 200 | n/a | def _detach(self): |
|---|
| 201 | n/a | assert self._active_count > 0 |
|---|
| 202 | n/a | self._active_count -= 1 |
|---|
| 203 | n/a | if self._active_count == 0 and self.sockets is None: |
|---|
| 204 | n/a | self._wakeup() |
|---|
| 205 | n/a | |
|---|
| 206 | n/a | def close(self): |
|---|
| 207 | n/a | sockets = self.sockets |
|---|
| 208 | n/a | if sockets is None: |
|---|
| 209 | n/a | return |
|---|
| 210 | n/a | self.sockets = None |
|---|
| 211 | n/a | for sock in sockets: |
|---|
| 212 | n/a | self._loop._stop_serving(sock) |
|---|
| 213 | n/a | if self._active_count == 0: |
|---|
| 214 | n/a | self._wakeup() |
|---|
| 215 | n/a | |
|---|
| 216 | n/a | def _wakeup(self): |
|---|
| 217 | n/a | waiters = self._waiters |
|---|
| 218 | n/a | self._waiters = None |
|---|
| 219 | n/a | for waiter in waiters: |
|---|
| 220 | n/a | if not waiter.done(): |
|---|
| 221 | n/a | waiter.set_result(waiter) |
|---|
| 222 | n/a | |
|---|
| 223 | n/a | @coroutine |
|---|
| 224 | n/a | def wait_closed(self): |
|---|
| 225 | n/a | if self.sockets is None or self._waiters is None: |
|---|
| 226 | n/a | return |
|---|
| 227 | n/a | waiter = self._loop.create_future() |
|---|
| 228 | n/a | self._waiters.append(waiter) |
|---|
| 229 | n/a | yield from waiter |
|---|
| 230 | n/a | |
|---|
| 231 | n/a | |
|---|
| 232 | n/a | class BaseEventLoop(events.AbstractEventLoop): |
|---|
| 233 | n/a | |
|---|
| 234 | n/a | def __init__(self): |
|---|
| 235 | n/a | self._timer_cancelled_count = 0 |
|---|
| 236 | n/a | self._closed = False |
|---|
| 237 | n/a | self._stopping = False |
|---|
| 238 | n/a | self._ready = collections.deque() |
|---|
| 239 | n/a | self._scheduled = [] |
|---|
| 240 | n/a | self._default_executor = None |
|---|
| 241 | n/a | self._internal_fds = 0 |
|---|
| 242 | n/a | # Identifier of the thread running the event loop, or None if the |
|---|
| 243 | n/a | # event loop is not running |
|---|
| 244 | n/a | self._thread_id = None |
|---|
| 245 | n/a | self._clock_resolution = time.get_clock_info('monotonic').resolution |
|---|
| 246 | n/a | self._exception_handler = None |
|---|
| 247 | n/a | self.set_debug((not sys.flags.ignore_environment |
|---|
| 248 | n/a | and bool(os.environ.get('PYTHONASYNCIODEBUG')))) |
|---|
| 249 | n/a | # In debug mode, if the execution of a callback or a step of a task |
|---|
| 250 | n/a | # exceed this duration in seconds, the slow callback/task is logged. |
|---|
| 251 | n/a | self.slow_callback_duration = 0.1 |
|---|
| 252 | n/a | self._current_handle = None |
|---|
| 253 | n/a | self._task_factory = None |
|---|
| 254 | n/a | self._coroutine_wrapper_set = False |
|---|
| 255 | n/a | |
|---|
| 256 | n/a | if hasattr(sys, 'get_asyncgen_hooks'): |
|---|
| 257 | n/a | # Python >= 3.6 |
|---|
| 258 | n/a | # A weak set of all asynchronous generators that are |
|---|
| 259 | n/a | # being iterated by the loop. |
|---|
| 260 | n/a | self._asyncgens = weakref.WeakSet() |
|---|
| 261 | n/a | else: |
|---|
| 262 | n/a | self._asyncgens = None |
|---|
| 263 | n/a | |
|---|
| 264 | n/a | # Set to True when `loop.shutdown_asyncgens` is called. |
|---|
| 265 | n/a | self._asyncgens_shutdown_called = False |
|---|
| 266 | n/a | |
|---|
| 267 | n/a | def __repr__(self): |
|---|
| 268 | n/a | return ('<%s running=%s closed=%s debug=%s>' |
|---|
| 269 | n/a | % (self.__class__.__name__, self.is_running(), |
|---|
| 270 | n/a | self.is_closed(), self.get_debug())) |
|---|
| 271 | n/a | |
|---|
| 272 | n/a | def create_future(self): |
|---|
| 273 | n/a | """Create a Future object attached to the loop.""" |
|---|
| 274 | n/a | return futures.Future(loop=self) |
|---|
| 275 | n/a | |
|---|
| 276 | n/a | def create_task(self, coro): |
|---|
| 277 | n/a | """Schedule a coroutine object. |
|---|
| 278 | n/a | |
|---|
| 279 | n/a | Return a task object. |
|---|
| 280 | n/a | """ |
|---|
| 281 | n/a | self._check_closed() |
|---|
| 282 | n/a | if self._task_factory is None: |
|---|
| 283 | n/a | task = tasks.Task(coro, loop=self) |
|---|
| 284 | n/a | if task._source_traceback: |
|---|
| 285 | n/a | del task._source_traceback[-1] |
|---|
| 286 | n/a | else: |
|---|
| 287 | n/a | task = self._task_factory(self, coro) |
|---|
| 288 | n/a | return task |
|---|
| 289 | n/a | |
|---|
| 290 | n/a | def set_task_factory(self, factory): |
|---|
| 291 | n/a | """Set a task factory that will be used by loop.create_task(). |
|---|
| 292 | n/a | |
|---|
| 293 | n/a | If factory is None the default task factory will be set. |
|---|
| 294 | n/a | |
|---|
| 295 | n/a | If factory is a callable, it should have a signature matching |
|---|
| 296 | n/a | '(loop, coro)', where 'loop' will be a reference to the active |
|---|
| 297 | n/a | event loop, 'coro' will be a coroutine object. The callable |
|---|
| 298 | n/a | must return a Future. |
|---|
| 299 | n/a | """ |
|---|
| 300 | n/a | if factory is not None and not callable(factory): |
|---|
| 301 | n/a | raise TypeError('task factory must be a callable or None') |
|---|
| 302 | n/a | self._task_factory = factory |
|---|
| 303 | n/a | |
|---|
| 304 | n/a | def get_task_factory(self): |
|---|
| 305 | n/a | """Return a task factory, or None if the default one is in use.""" |
|---|
| 306 | n/a | return self._task_factory |
|---|
| 307 | n/a | |
|---|
| 308 | n/a | def _make_socket_transport(self, sock, protocol, waiter=None, *, |
|---|
| 309 | n/a | extra=None, server=None): |
|---|
| 310 | n/a | """Create socket transport.""" |
|---|
| 311 | n/a | raise NotImplementedError |
|---|
| 312 | n/a | |
|---|
| 313 | n/a | def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, |
|---|
| 314 | n/a | *, server_side=False, server_hostname=None, |
|---|
| 315 | n/a | extra=None, server=None): |
|---|
| 316 | n/a | """Create SSL transport.""" |
|---|
| 317 | n/a | raise NotImplementedError |
|---|
| 318 | n/a | |
|---|
| 319 | n/a | def _make_datagram_transport(self, sock, protocol, |
|---|
| 320 | n/a | address=None, waiter=None, extra=None): |
|---|
| 321 | n/a | """Create datagram transport.""" |
|---|
| 322 | n/a | raise NotImplementedError |
|---|
| 323 | n/a | |
|---|
| 324 | n/a | def _make_read_pipe_transport(self, pipe, protocol, waiter=None, |
|---|
| 325 | n/a | extra=None): |
|---|
| 326 | n/a | """Create read pipe transport.""" |
|---|
| 327 | n/a | raise NotImplementedError |
|---|
| 328 | n/a | |
|---|
| 329 | n/a | def _make_write_pipe_transport(self, pipe, protocol, waiter=None, |
|---|
| 330 | n/a | extra=None): |
|---|
| 331 | n/a | """Create write pipe transport.""" |
|---|
| 332 | n/a | raise NotImplementedError |
|---|
| 333 | n/a | |
|---|
| 334 | n/a | @coroutine |
|---|
| 335 | n/a | def _make_subprocess_transport(self, protocol, args, shell, |
|---|
| 336 | n/a | stdin, stdout, stderr, bufsize, |
|---|
| 337 | n/a | extra=None, **kwargs): |
|---|
| 338 | n/a | """Create subprocess transport.""" |
|---|
| 339 | n/a | raise NotImplementedError |
|---|
| 340 | n/a | |
|---|
| 341 | n/a | def _write_to_self(self): |
|---|
| 342 | n/a | """Write a byte to self-pipe, to wake up the event loop. |
|---|
| 343 | n/a | |
|---|
| 344 | n/a | This may be called from a different thread. |
|---|
| 345 | n/a | |
|---|
| 346 | n/a | The subclass is responsible for implementing the self-pipe. |
|---|
| 347 | n/a | """ |
|---|
| 348 | n/a | raise NotImplementedError |
|---|
| 349 | n/a | |
|---|
| 350 | n/a | def _process_events(self, event_list): |
|---|
| 351 | n/a | """Process selector events.""" |
|---|
| 352 | n/a | raise NotImplementedError |
|---|
| 353 | n/a | |
|---|
| 354 | n/a | def _check_closed(self): |
|---|
| 355 | n/a | if self._closed: |
|---|
| 356 | n/a | raise RuntimeError('Event loop is closed') |
|---|
| 357 | n/a | |
|---|
| 358 | n/a | def _asyncgen_finalizer_hook(self, agen): |
|---|
| 359 | n/a | self._asyncgens.discard(agen) |
|---|
| 360 | n/a | if not self.is_closed(): |
|---|
| 361 | n/a | self.create_task(agen.aclose()) |
|---|
| 362 | n/a | # Wake up the loop if the finalizer was called from |
|---|
| 363 | n/a | # a different thread. |
|---|
| 364 | n/a | self._write_to_self() |
|---|
| 365 | n/a | |
|---|
| 366 | n/a | def _asyncgen_firstiter_hook(self, agen): |
|---|
| 367 | n/a | if self._asyncgens_shutdown_called: |
|---|
| 368 | n/a | warnings.warn( |
|---|
| 369 | n/a | "asynchronous generator {!r} was scheduled after " |
|---|
| 370 | n/a | "loop.shutdown_asyncgens() call".format(agen), |
|---|
| 371 | n/a | ResourceWarning, source=self) |
|---|
| 372 | n/a | |
|---|
| 373 | n/a | self._asyncgens.add(agen) |
|---|
| 374 | n/a | |
|---|
| 375 | n/a | @coroutine |
|---|
| 376 | n/a | def shutdown_asyncgens(self): |
|---|
| 377 | n/a | """Shutdown all active asynchronous generators.""" |
|---|
| 378 | n/a | self._asyncgens_shutdown_called = True |
|---|
| 379 | n/a | |
|---|
| 380 | n/a | if self._asyncgens is None or not len(self._asyncgens): |
|---|
| 381 | n/a | # If Python version is <3.6 or we don't have any asynchronous |
|---|
| 382 | n/a | # generators alive. |
|---|
| 383 | n/a | return |
|---|
| 384 | n/a | |
|---|
| 385 | n/a | closing_agens = list(self._asyncgens) |
|---|
| 386 | n/a | self._asyncgens.clear() |
|---|
| 387 | n/a | |
|---|
| 388 | n/a | shutdown_coro = tasks.gather( |
|---|
| 389 | n/a | *[ag.aclose() for ag in closing_agens], |
|---|
| 390 | n/a | return_exceptions=True, |
|---|
| 391 | n/a | loop=self) |
|---|
| 392 | n/a | |
|---|
| 393 | n/a | results = yield from shutdown_coro |
|---|
| 394 | n/a | for result, agen in zip(results, closing_agens): |
|---|
| 395 | n/a | if isinstance(result, Exception): |
|---|
| 396 | n/a | self.call_exception_handler({ |
|---|
| 397 | n/a | 'message': 'an error occurred during closing of ' |
|---|
| 398 | n/a | 'asynchronous generator {!r}'.format(agen), |
|---|
| 399 | n/a | 'exception': result, |
|---|
| 400 | n/a | 'asyncgen': agen |
|---|
| 401 | n/a | }) |
|---|
| 402 | n/a | |
|---|
| 403 | n/a | def run_forever(self): |
|---|
| 404 | n/a | """Run until stop() is called.""" |
|---|
| 405 | n/a | self._check_closed() |
|---|
| 406 | n/a | if self.is_running(): |
|---|
| 407 | n/a | raise RuntimeError('This event loop is already running') |
|---|
| 408 | n/a | if events._get_running_loop() is not None: |
|---|
| 409 | n/a | raise RuntimeError( |
|---|
| 410 | n/a | 'Cannot run the event loop while another loop is running') |
|---|
| 411 | n/a | self._set_coroutine_wrapper(self._debug) |
|---|
| 412 | n/a | self._thread_id = threading.get_ident() |
|---|
| 413 | n/a | if self._asyncgens is not None: |
|---|
| 414 | n/a | old_agen_hooks = sys.get_asyncgen_hooks() |
|---|
| 415 | n/a | sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, |
|---|
| 416 | n/a | finalizer=self._asyncgen_finalizer_hook) |
|---|
| 417 | n/a | try: |
|---|
| 418 | n/a | events._set_running_loop(self) |
|---|
| 419 | n/a | while True: |
|---|
| 420 | n/a | self._run_once() |
|---|
| 421 | n/a | if self._stopping: |
|---|
| 422 | n/a | break |
|---|
| 423 | n/a | finally: |
|---|
| 424 | n/a | self._stopping = False |
|---|
| 425 | n/a | self._thread_id = None |
|---|
| 426 | n/a | events._set_running_loop(None) |
|---|
| 427 | n/a | self._set_coroutine_wrapper(False) |
|---|
| 428 | n/a | if self._asyncgens is not None: |
|---|
| 429 | n/a | sys.set_asyncgen_hooks(*old_agen_hooks) |
|---|
| 430 | n/a | |
|---|
| 431 | n/a | def run_until_complete(self, future): |
|---|
| 432 | n/a | """Run until the Future is done. |
|---|
| 433 | n/a | |
|---|
| 434 | n/a | If the argument is a coroutine, it is wrapped in a Task. |
|---|
| 435 | n/a | |
|---|
| 436 | n/a | WARNING: It would be disastrous to call run_until_complete() |
|---|
| 437 | n/a | with the same coroutine twice -- it would wrap it in two |
|---|
| 438 | n/a | different Tasks and that can't be good. |
|---|
| 439 | n/a | |
|---|
| 440 | n/a | Return the Future's result, or raise its exception. |
|---|
| 441 | n/a | """ |
|---|
| 442 | n/a | self._check_closed() |
|---|
| 443 | n/a | |
|---|
| 444 | n/a | new_task = not futures.isfuture(future) |
|---|
| 445 | n/a | future = tasks.ensure_future(future, loop=self) |
|---|
| 446 | n/a | if new_task: |
|---|
| 447 | n/a | # An exception is raised if the future didn't complete, so there |
|---|
| 448 | n/a | # is no need to log the "destroy pending task" message |
|---|
| 449 | n/a | future._log_destroy_pending = False |
|---|
| 450 | n/a | |
|---|
| 451 | n/a | future.add_done_callback(_run_until_complete_cb) |
|---|
| 452 | n/a | try: |
|---|
| 453 | n/a | self.run_forever() |
|---|
| 454 | n/a | except: |
|---|
| 455 | n/a | if new_task and future.done() and not future.cancelled(): |
|---|
| 456 | n/a | # The coroutine raised a BaseException. Consume the exception |
|---|
| 457 | n/a | # to not log a warning, the caller doesn't have access to the |
|---|
| 458 | n/a | # local task. |
|---|
| 459 | n/a | future.exception() |
|---|
| 460 | n/a | raise |
|---|
| 461 | n/a | future.remove_done_callback(_run_until_complete_cb) |
|---|
| 462 | n/a | if not future.done(): |
|---|
| 463 | n/a | raise RuntimeError('Event loop stopped before Future completed.') |
|---|
| 464 | n/a | |
|---|
| 465 | n/a | return future.result() |
|---|
| 466 | n/a | |
|---|
| 467 | n/a | def stop(self): |
|---|
| 468 | n/a | """Stop running the event loop. |
|---|
| 469 | n/a | |
|---|
| 470 | n/a | Every callback already scheduled will still run. This simply informs |
|---|
| 471 | n/a | run_forever to stop looping after a complete iteration. |
|---|
| 472 | n/a | """ |
|---|
| 473 | n/a | self._stopping = True |
|---|
| 474 | n/a | |
|---|
| 475 | n/a | def close(self): |
|---|
| 476 | n/a | """Close the event loop. |
|---|
| 477 | n/a | |
|---|
| 478 | n/a | This clears the queues and shuts down the executor, |
|---|
| 479 | n/a | but does not wait for the executor to finish. |
|---|
| 480 | n/a | |
|---|
| 481 | n/a | The event loop must not be running. |
|---|
| 482 | n/a | """ |
|---|
| 483 | n/a | if self.is_running(): |
|---|
| 484 | n/a | raise RuntimeError("Cannot close a running event loop") |
|---|
| 485 | n/a | if self._closed: |
|---|
| 486 | n/a | return |
|---|
| 487 | n/a | if self._debug: |
|---|
| 488 | n/a | logger.debug("Close %r", self) |
|---|
| 489 | n/a | self._closed = True |
|---|
| 490 | n/a | self._ready.clear() |
|---|
| 491 | n/a | self._scheduled.clear() |
|---|
| 492 | n/a | executor = self._default_executor |
|---|
| 493 | n/a | if executor is not None: |
|---|
| 494 | n/a | self._default_executor = None |
|---|
| 495 | n/a | executor.shutdown(wait=False) |
|---|
| 496 | n/a | |
|---|
| 497 | n/a | def is_closed(self): |
|---|
| 498 | n/a | """Returns True if the event loop was closed.""" |
|---|
| 499 | n/a | return self._closed |
|---|
| 500 | n/a | |
|---|
| 501 | n/a | # On Python 3.3 and older, objects with a destructor part of a reference |
|---|
| 502 | n/a | # cycle are never destroyed. It's not more the case on Python 3.4 thanks |
|---|
| 503 | n/a | # to the PEP 442. |
|---|
| 504 | n/a | if compat.PY34: |
|---|
| 505 | n/a | def __del__(self): |
|---|
| 506 | n/a | if not self.is_closed(): |
|---|
| 507 | n/a | warnings.warn("unclosed event loop %r" % self, ResourceWarning, |
|---|
| 508 | n/a | source=self) |
|---|
| 509 | n/a | if not self.is_running(): |
|---|
| 510 | n/a | self.close() |
|---|
| 511 | n/a | |
|---|
| 512 | n/a | def is_running(self): |
|---|
| 513 | n/a | """Returns True if the event loop is running.""" |
|---|
| 514 | n/a | return (self._thread_id is not None) |
|---|
| 515 | n/a | |
|---|
| 516 | n/a | def time(self): |
|---|
| 517 | n/a | """Return the time according to the event loop's clock. |
|---|
| 518 | n/a | |
|---|
| 519 | n/a | This is a float expressed in seconds since an epoch, but the |
|---|
| 520 | n/a | epoch, precision, accuracy and drift are unspecified and may |
|---|
| 521 | n/a | differ per event loop. |
|---|
| 522 | n/a | """ |
|---|
| 523 | n/a | return time.monotonic() |
|---|
| 524 | n/a | |
|---|
| 525 | n/a | def call_later(self, delay, callback, *args): |
|---|
| 526 | n/a | """Arrange for a callback to be called at a given time. |
|---|
| 527 | n/a | |
|---|
| 528 | n/a | Return a Handle: an opaque object with a cancel() method that |
|---|
| 529 | n/a | can be used to cancel the call. |
|---|
| 530 | n/a | |
|---|
| 531 | n/a | The delay can be an int or float, expressed in seconds. It is |
|---|
| 532 | n/a | always relative to the current time. |
|---|
| 533 | n/a | |
|---|
| 534 | n/a | Each callback will be called exactly once. If two callbacks |
|---|
| 535 | n/a | are scheduled for exactly the same time, it undefined which |
|---|
| 536 | n/a | will be called first. |
|---|
| 537 | n/a | |
|---|
| 538 | n/a | Any positional arguments after the callback will be passed to |
|---|
| 539 | n/a | the callback when it is called. |
|---|
| 540 | n/a | """ |
|---|
| 541 | n/a | timer = self.call_at(self.time() + delay, callback, *args) |
|---|
| 542 | n/a | if timer._source_traceback: |
|---|
| 543 | n/a | del timer._source_traceback[-1] |
|---|
| 544 | n/a | return timer |
|---|
| 545 | n/a | |
|---|
| 546 | n/a | def call_at(self, when, callback, *args): |
|---|
| 547 | n/a | """Like call_later(), but uses an absolute time. |
|---|
| 548 | n/a | |
|---|
| 549 | n/a | Absolute time corresponds to the event loop's time() method. |
|---|
| 550 | n/a | """ |
|---|
| 551 | n/a | self._check_closed() |
|---|
| 552 | n/a | if self._debug: |
|---|
| 553 | n/a | self._check_thread() |
|---|
| 554 | n/a | self._check_callback(callback, 'call_at') |
|---|
| 555 | n/a | timer = events.TimerHandle(when, callback, args, self) |
|---|
| 556 | n/a | if timer._source_traceback: |
|---|
| 557 | n/a | del timer._source_traceback[-1] |
|---|
| 558 | n/a | heapq.heappush(self._scheduled, timer) |
|---|
| 559 | n/a | timer._scheduled = True |
|---|
| 560 | n/a | return timer |
|---|
| 561 | n/a | |
|---|
| 562 | n/a | def call_soon(self, callback, *args): |
|---|
| 563 | n/a | """Arrange for a callback to be called as soon as possible. |
|---|
| 564 | n/a | |
|---|
| 565 | n/a | This operates as a FIFO queue: callbacks are called in the |
|---|
| 566 | n/a | order in which they are registered. Each callback will be |
|---|
| 567 | n/a | called exactly once. |
|---|
| 568 | n/a | |
|---|
| 569 | n/a | Any positional arguments after the callback will be passed to |
|---|
| 570 | n/a | the callback when it is called. |
|---|
| 571 | n/a | """ |
|---|
| 572 | n/a | self._check_closed() |
|---|
| 573 | n/a | if self._debug: |
|---|
| 574 | n/a | self._check_thread() |
|---|
| 575 | n/a | self._check_callback(callback, 'call_soon') |
|---|
| 576 | n/a | handle = self._call_soon(callback, args) |
|---|
| 577 | n/a | if handle._source_traceback: |
|---|
| 578 | n/a | del handle._source_traceback[-1] |
|---|
| 579 | n/a | return handle |
|---|
| 580 | n/a | |
|---|
| 581 | n/a | def _check_callback(self, callback, method): |
|---|
| 582 | n/a | if (coroutines.iscoroutine(callback) or |
|---|
| 583 | n/a | coroutines.iscoroutinefunction(callback)): |
|---|
| 584 | n/a | raise TypeError( |
|---|
| 585 | n/a | "coroutines cannot be used with {}()".format(method)) |
|---|
| 586 | n/a | if not callable(callback): |
|---|
| 587 | n/a | raise TypeError( |
|---|
| 588 | n/a | 'a callable object was expected by {}(), got {!r}'.format( |
|---|
| 589 | n/a | method, callback)) |
|---|
| 590 | n/a | |
|---|
| 591 | n/a | |
|---|
| 592 | n/a | def _call_soon(self, callback, args): |
|---|
| 593 | n/a | handle = events.Handle(callback, args, self) |
|---|
| 594 | n/a | if handle._source_traceback: |
|---|
| 595 | n/a | del handle._source_traceback[-1] |
|---|
| 596 | n/a | self._ready.append(handle) |
|---|
| 597 | n/a | return handle |
|---|
| 598 | n/a | |
|---|
| 599 | n/a | def _check_thread(self): |
|---|
| 600 | n/a | """Check that the current thread is the thread running the event loop. |
|---|
| 601 | n/a | |
|---|
| 602 | n/a | Non-thread-safe methods of this class make this assumption and will |
|---|
| 603 | n/a | likely behave incorrectly when the assumption is violated. |
|---|
| 604 | n/a | |
|---|
| 605 | n/a | Should only be called when (self._debug == True). The caller is |
|---|
| 606 | n/a | responsible for checking this condition for performance reasons. |
|---|
| 607 | n/a | """ |
|---|
| 608 | n/a | if self._thread_id is None: |
|---|
| 609 | n/a | return |
|---|
| 610 | n/a | thread_id = threading.get_ident() |
|---|
| 611 | n/a | if thread_id != self._thread_id: |
|---|
| 612 | n/a | raise RuntimeError( |
|---|
| 613 | n/a | "Non-thread-safe operation invoked on an event loop other " |
|---|
| 614 | n/a | "than the current one") |
|---|
| 615 | n/a | |
|---|
| 616 | n/a | def call_soon_threadsafe(self, callback, *args): |
|---|
| 617 | n/a | """Like call_soon(), but thread-safe.""" |
|---|
| 618 | n/a | self._check_closed() |
|---|
| 619 | n/a | if self._debug: |
|---|
| 620 | n/a | self._check_callback(callback, 'call_soon_threadsafe') |
|---|
| 621 | n/a | handle = self._call_soon(callback, args) |
|---|
| 622 | n/a | if handle._source_traceback: |
|---|
| 623 | n/a | del handle._source_traceback[-1] |
|---|
| 624 | n/a | self._write_to_self() |
|---|
| 625 | n/a | return handle |
|---|
| 626 | n/a | |
|---|
| 627 | n/a | def run_in_executor(self, executor, func, *args): |
|---|
| 628 | n/a | self._check_closed() |
|---|
| 629 | n/a | if self._debug: |
|---|
| 630 | n/a | self._check_callback(func, 'run_in_executor') |
|---|
| 631 | n/a | if executor is None: |
|---|
| 632 | n/a | executor = self._default_executor |
|---|
| 633 | n/a | if executor is None: |
|---|
| 634 | n/a | executor = concurrent.futures.ThreadPoolExecutor() |
|---|
| 635 | n/a | self._default_executor = executor |
|---|
| 636 | n/a | return futures.wrap_future(executor.submit(func, *args), loop=self) |
|---|
| 637 | n/a | |
|---|
| 638 | n/a | def set_default_executor(self, executor): |
|---|
| 639 | n/a | self._default_executor = executor |
|---|
| 640 | n/a | |
|---|
| 641 | n/a | def _getaddrinfo_debug(self, host, port, family, type, proto, flags): |
|---|
| 642 | n/a | msg = ["%s:%r" % (host, port)] |
|---|
| 643 | n/a | if family: |
|---|
| 644 | n/a | msg.append('family=%r' % family) |
|---|
| 645 | n/a | if type: |
|---|
| 646 | n/a | msg.append('type=%r' % type) |
|---|
| 647 | n/a | if proto: |
|---|
| 648 | n/a | msg.append('proto=%r' % proto) |
|---|
| 649 | n/a | if flags: |
|---|
| 650 | n/a | msg.append('flags=%r' % flags) |
|---|
| 651 | n/a | msg = ', '.join(msg) |
|---|
| 652 | n/a | logger.debug('Get address info %s', msg) |
|---|
| 653 | n/a | |
|---|
| 654 | n/a | t0 = self.time() |
|---|
| 655 | n/a | addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags) |
|---|
| 656 | n/a | dt = self.time() - t0 |
|---|
| 657 | n/a | |
|---|
| 658 | n/a | msg = ('Getting address info %s took %.3f ms: %r' |
|---|
| 659 | n/a | % (msg, dt * 1e3, addrinfo)) |
|---|
| 660 | n/a | if dt >= self.slow_callback_duration: |
|---|
| 661 | n/a | logger.info(msg) |
|---|
| 662 | n/a | else: |
|---|
| 663 | n/a | logger.debug(msg) |
|---|
| 664 | n/a | return addrinfo |
|---|
| 665 | n/a | |
|---|
| 666 | n/a | def getaddrinfo(self, host, port, *, |
|---|
| 667 | n/a | family=0, type=0, proto=0, flags=0): |
|---|
| 668 | n/a | if self._debug: |
|---|
| 669 | n/a | return self.run_in_executor(None, self._getaddrinfo_debug, |
|---|
| 670 | n/a | host, port, family, type, proto, flags) |
|---|
| 671 | n/a | else: |
|---|
| 672 | n/a | return self.run_in_executor(None, socket.getaddrinfo, |
|---|
| 673 | n/a | host, port, family, type, proto, flags) |
|---|
| 674 | n/a | |
|---|
| 675 | n/a | def getnameinfo(self, sockaddr, flags=0): |
|---|
| 676 | n/a | return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags) |
|---|
| 677 | n/a | |
|---|
| 678 | n/a | @coroutine |
|---|
| 679 | n/a | def create_connection(self, protocol_factory, host=None, port=None, *, |
|---|
| 680 | n/a | ssl=None, family=0, proto=0, flags=0, sock=None, |
|---|
| 681 | n/a | local_addr=None, server_hostname=None): |
|---|
| 682 | n/a | """Connect to a TCP server. |
|---|
| 683 | n/a | |
|---|
| 684 | n/a | Create a streaming transport connection to a given Internet host and |
|---|
| 685 | n/a | port: socket family AF_INET or socket.AF_INET6 depending on host (or |
|---|
| 686 | n/a | family if specified), socket type SOCK_STREAM. protocol_factory must be |
|---|
| 687 | n/a | a callable returning a protocol instance. |
|---|
| 688 | n/a | |
|---|
| 689 | n/a | This method is a coroutine which will try to establish the connection |
|---|
| 690 | n/a | in the background. When successful, the coroutine returns a |
|---|
| 691 | n/a | (transport, protocol) pair. |
|---|
| 692 | n/a | """ |
|---|
| 693 | n/a | if server_hostname is not None and not ssl: |
|---|
| 694 | n/a | raise ValueError('server_hostname is only meaningful with ssl') |
|---|
| 695 | n/a | |
|---|
| 696 | n/a | if server_hostname is None and ssl: |
|---|
| 697 | n/a | # Use host as default for server_hostname. It is an error |
|---|
| 698 | n/a | # if host is empty or not set, e.g. when an |
|---|
| 699 | n/a | # already-connected socket was passed or when only a port |
|---|
| 700 | n/a | # is given. To avoid this error, you can pass |
|---|
| 701 | n/a | # server_hostname='' -- this will bypass the hostname |
|---|
| 702 | n/a | # check. (This also means that if host is a numeric |
|---|
| 703 | n/a | # IP/IPv6 address, we will attempt to verify that exact |
|---|
| 704 | n/a | # address; this will probably fail, but it is possible to |
|---|
| 705 | n/a | # create a certificate for a specific IP address, so we |
|---|
| 706 | n/a | # don't judge it here.) |
|---|
| 707 | n/a | if not host: |
|---|
| 708 | n/a | raise ValueError('You must set server_hostname ' |
|---|
| 709 | n/a | 'when using ssl without a host') |
|---|
| 710 | n/a | server_hostname = host |
|---|
| 711 | n/a | |
|---|
| 712 | n/a | if host is not None or port is not None: |
|---|
| 713 | n/a | if sock is not None: |
|---|
| 714 | n/a | raise ValueError( |
|---|
| 715 | n/a | 'host/port and sock can not be specified at the same time') |
|---|
| 716 | n/a | |
|---|
| 717 | n/a | f1 = _ensure_resolved((host, port), family=family, |
|---|
| 718 | n/a | type=socket.SOCK_STREAM, proto=proto, |
|---|
| 719 | n/a | flags=flags, loop=self) |
|---|
| 720 | n/a | fs = [f1] |
|---|
| 721 | n/a | if local_addr is not None: |
|---|
| 722 | n/a | f2 = _ensure_resolved(local_addr, family=family, |
|---|
| 723 | n/a | type=socket.SOCK_STREAM, proto=proto, |
|---|
| 724 | n/a | flags=flags, loop=self) |
|---|
| 725 | n/a | fs.append(f2) |
|---|
| 726 | n/a | else: |
|---|
| 727 | n/a | f2 = None |
|---|
| 728 | n/a | |
|---|
| 729 | n/a | yield from tasks.wait(fs, loop=self) |
|---|
| 730 | n/a | |
|---|
| 731 | n/a | infos = f1.result() |
|---|
| 732 | n/a | if not infos: |
|---|
| 733 | n/a | raise OSError('getaddrinfo() returned empty list') |
|---|
| 734 | n/a | if f2 is not None: |
|---|
| 735 | n/a | laddr_infos = f2.result() |
|---|
| 736 | n/a | if not laddr_infos: |
|---|
| 737 | n/a | raise OSError('getaddrinfo() returned empty list') |
|---|
| 738 | n/a | |
|---|
| 739 | n/a | exceptions = [] |
|---|
| 740 | n/a | for family, type, proto, cname, address in infos: |
|---|
| 741 | n/a | try: |
|---|
| 742 | n/a | sock = socket.socket(family=family, type=type, proto=proto) |
|---|
| 743 | n/a | sock.setblocking(False) |
|---|
| 744 | n/a | if f2 is not None: |
|---|
| 745 | n/a | for _, _, _, _, laddr in laddr_infos: |
|---|
| 746 | n/a | try: |
|---|
| 747 | n/a | sock.bind(laddr) |
|---|
| 748 | n/a | break |
|---|
| 749 | n/a | except OSError as exc: |
|---|
| 750 | n/a | exc = OSError( |
|---|
| 751 | n/a | exc.errno, 'error while ' |
|---|
| 752 | n/a | 'attempting to bind on address ' |
|---|
| 753 | n/a | '{!r}: {}'.format( |
|---|
| 754 | n/a | laddr, exc.strerror.lower())) |
|---|
| 755 | n/a | exceptions.append(exc) |
|---|
| 756 | n/a | else: |
|---|
| 757 | n/a | sock.close() |
|---|
| 758 | n/a | sock = None |
|---|
| 759 | n/a | continue |
|---|
| 760 | n/a | if self._debug: |
|---|
| 761 | n/a | logger.debug("connect %r to %r", sock, address) |
|---|
| 762 | n/a | yield from self.sock_connect(sock, address) |
|---|
| 763 | n/a | except OSError as exc: |
|---|
| 764 | n/a | if sock is not None: |
|---|
| 765 | n/a | sock.close() |
|---|
| 766 | n/a | exceptions.append(exc) |
|---|
| 767 | n/a | except: |
|---|
| 768 | n/a | if sock is not None: |
|---|
| 769 | n/a | sock.close() |
|---|
| 770 | n/a | raise |
|---|
| 771 | n/a | else: |
|---|
| 772 | n/a | break |
|---|
| 773 | n/a | else: |
|---|
| 774 | n/a | if len(exceptions) == 1: |
|---|
| 775 | n/a | raise exceptions[0] |
|---|
| 776 | n/a | else: |
|---|
| 777 | n/a | # If they all have the same str(), raise one. |
|---|
| 778 | n/a | model = str(exceptions[0]) |
|---|
| 779 | n/a | if all(str(exc) == model for exc in exceptions): |
|---|
| 780 | n/a | raise exceptions[0] |
|---|
| 781 | n/a | # Raise a combined exception so the user can see all |
|---|
| 782 | n/a | # the various error messages. |
|---|
| 783 | n/a | raise OSError('Multiple exceptions: {}'.format( |
|---|
| 784 | n/a | ', '.join(str(exc) for exc in exceptions))) |
|---|
| 785 | n/a | |
|---|
| 786 | n/a | else: |
|---|
| 787 | n/a | if sock is None: |
|---|
| 788 | n/a | raise ValueError( |
|---|
| 789 | n/a | 'host and port was not specified and no sock specified') |
|---|
| 790 | n/a | if not _is_stream_socket(sock): |
|---|
| 791 | n/a | # We allow AF_INET, AF_INET6, AF_UNIX as long as they |
|---|
| 792 | n/a | # are SOCK_STREAM. |
|---|
| 793 | n/a | # We support passing AF_UNIX sockets even though we have |
|---|
| 794 | n/a | # a dedicated API for that: create_unix_connection. |
|---|
| 795 | n/a | # Disallowing AF_UNIX in this method, breaks backwards |
|---|
| 796 | n/a | # compatibility. |
|---|
| 797 | n/a | raise ValueError( |
|---|
| 798 | n/a | 'A Stream Socket was expected, got {!r}'.format(sock)) |
|---|
| 799 | n/a | |
|---|
| 800 | n/a | transport, protocol = yield from self._create_connection_transport( |
|---|
| 801 | n/a | sock, protocol_factory, ssl, server_hostname) |
|---|
| 802 | n/a | if self._debug: |
|---|
| 803 | n/a | # Get the socket from the transport because SSL transport closes |
|---|
| 804 | n/a | # the old socket and creates a new SSL socket |
|---|
| 805 | n/a | sock = transport.get_extra_info('socket') |
|---|
| 806 | n/a | logger.debug("%r connected to %s:%r: (%r, %r)", |
|---|
| 807 | n/a | sock, host, port, transport, protocol) |
|---|
| 808 | n/a | return transport, protocol |
|---|
| 809 | n/a | |
|---|
| 810 | n/a | @coroutine |
|---|
| 811 | n/a | def _create_connection_transport(self, sock, protocol_factory, ssl, |
|---|
| 812 | n/a | server_hostname, server_side=False): |
|---|
| 813 | n/a | |
|---|
| 814 | n/a | sock.setblocking(False) |
|---|
| 815 | n/a | |
|---|
| 816 | n/a | protocol = protocol_factory() |
|---|
| 817 | n/a | waiter = self.create_future() |
|---|
| 818 | n/a | if ssl: |
|---|
| 819 | n/a | sslcontext = None if isinstance(ssl, bool) else ssl |
|---|
| 820 | n/a | transport = self._make_ssl_transport( |
|---|
| 821 | n/a | sock, protocol, sslcontext, waiter, |
|---|
| 822 | n/a | server_side=server_side, server_hostname=server_hostname) |
|---|
| 823 | n/a | else: |
|---|
| 824 | n/a | transport = self._make_socket_transport(sock, protocol, waiter) |
|---|
| 825 | n/a | |
|---|
| 826 | n/a | try: |
|---|
| 827 | n/a | yield from waiter |
|---|
| 828 | n/a | except: |
|---|
| 829 | n/a | transport.close() |
|---|
| 830 | n/a | raise |
|---|
| 831 | n/a | |
|---|
| 832 | n/a | return transport, protocol |
|---|
| 833 | n/a | |
|---|
| 834 | n/a | @coroutine |
|---|
| 835 | n/a | def create_datagram_endpoint(self, protocol_factory, |
|---|
| 836 | n/a | local_addr=None, remote_addr=None, *, |
|---|
| 837 | n/a | family=0, proto=0, flags=0, |
|---|
| 838 | n/a | reuse_address=None, reuse_port=None, |
|---|
| 839 | n/a | allow_broadcast=None, sock=None): |
|---|
| 840 | n/a | """Create datagram connection.""" |
|---|
| 841 | n/a | if sock is not None: |
|---|
| 842 | n/a | if not _is_dgram_socket(sock): |
|---|
| 843 | n/a | raise ValueError( |
|---|
| 844 | n/a | 'A UDP Socket was expected, got {!r}'.format(sock)) |
|---|
| 845 | n/a | if (local_addr or remote_addr or |
|---|
| 846 | n/a | family or proto or flags or |
|---|
| 847 | n/a | reuse_address or reuse_port or allow_broadcast): |
|---|
| 848 | n/a | # show the problematic kwargs in exception msg |
|---|
| 849 | n/a | opts = dict(local_addr=local_addr, remote_addr=remote_addr, |
|---|
| 850 | n/a | family=family, proto=proto, flags=flags, |
|---|
| 851 | n/a | reuse_address=reuse_address, reuse_port=reuse_port, |
|---|
| 852 | n/a | allow_broadcast=allow_broadcast) |
|---|
| 853 | n/a | problems = ', '.join( |
|---|
| 854 | n/a | '{}={}'.format(k, v) for k, v in opts.items() if v) |
|---|
| 855 | n/a | raise ValueError( |
|---|
| 856 | n/a | 'socket modifier keyword arguments can not be used ' |
|---|
| 857 | n/a | 'when sock is specified. ({})'.format(problems)) |
|---|
| 858 | n/a | sock.setblocking(False) |
|---|
| 859 | n/a | r_addr = None |
|---|
| 860 | n/a | else: |
|---|
| 861 | n/a | if not (local_addr or remote_addr): |
|---|
| 862 | n/a | if family == 0: |
|---|
| 863 | n/a | raise ValueError('unexpected address family') |
|---|
| 864 | n/a | addr_pairs_info = (((family, proto), (None, None)),) |
|---|
| 865 | n/a | else: |
|---|
| 866 | n/a | # join address by (family, protocol) |
|---|
| 867 | n/a | addr_infos = collections.OrderedDict() |
|---|
| 868 | n/a | for idx, addr in ((0, local_addr), (1, remote_addr)): |
|---|
| 869 | n/a | if addr is not None: |
|---|
| 870 | n/a | assert isinstance(addr, tuple) and len(addr) == 2, ( |
|---|
| 871 | n/a | '2-tuple is expected') |
|---|
| 872 | n/a | |
|---|
| 873 | n/a | infos = yield from _ensure_resolved( |
|---|
| 874 | n/a | addr, family=family, type=socket.SOCK_DGRAM, |
|---|
| 875 | n/a | proto=proto, flags=flags, loop=self) |
|---|
| 876 | n/a | if not infos: |
|---|
| 877 | n/a | raise OSError('getaddrinfo() returned empty list') |
|---|
| 878 | n/a | |
|---|
| 879 | n/a | for fam, _, pro, _, address in infos: |
|---|
| 880 | n/a | key = (fam, pro) |
|---|
| 881 | n/a | if key not in addr_infos: |
|---|
| 882 | n/a | addr_infos[key] = [None, None] |
|---|
| 883 | n/a | addr_infos[key][idx] = address |
|---|
| 884 | n/a | |
|---|
| 885 | n/a | # each addr has to have info for each (family, proto) pair |
|---|
| 886 | n/a | addr_pairs_info = [ |
|---|
| 887 | n/a | (key, addr_pair) for key, addr_pair in addr_infos.items() |
|---|
| 888 | n/a | if not ((local_addr and addr_pair[0] is None) or |
|---|
| 889 | n/a | (remote_addr and addr_pair[1] is None))] |
|---|
| 890 | n/a | |
|---|
| 891 | n/a | if not addr_pairs_info: |
|---|
| 892 | n/a | raise ValueError('can not get address information') |
|---|
| 893 | n/a | |
|---|
| 894 | n/a | exceptions = [] |
|---|
| 895 | n/a | |
|---|
| 896 | n/a | if reuse_address is None: |
|---|
| 897 | n/a | reuse_address = os.name == 'posix' and sys.platform != 'cygwin' |
|---|
| 898 | n/a | |
|---|
| 899 | n/a | for ((family, proto), |
|---|
| 900 | n/a | (local_address, remote_address)) in addr_pairs_info: |
|---|
| 901 | n/a | sock = None |
|---|
| 902 | n/a | r_addr = None |
|---|
| 903 | n/a | try: |
|---|
| 904 | n/a | sock = socket.socket( |
|---|
| 905 | n/a | family=family, type=socket.SOCK_DGRAM, proto=proto) |
|---|
| 906 | n/a | if reuse_address: |
|---|
| 907 | n/a | sock.setsockopt( |
|---|
| 908 | n/a | socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) |
|---|
| 909 | n/a | if reuse_port: |
|---|
| 910 | n/a | _set_reuseport(sock) |
|---|
| 911 | n/a | if allow_broadcast: |
|---|
| 912 | n/a | sock.setsockopt( |
|---|
| 913 | n/a | socket.SOL_SOCKET, socket.SO_BROADCAST, 1) |
|---|
| 914 | n/a | sock.setblocking(False) |
|---|
| 915 | n/a | |
|---|
| 916 | n/a | if local_addr: |
|---|
| 917 | n/a | sock.bind(local_address) |
|---|
| 918 | n/a | if remote_addr: |
|---|
| 919 | n/a | yield from self.sock_connect(sock, remote_address) |
|---|
| 920 | n/a | r_addr = remote_address |
|---|
| 921 | n/a | except OSError as exc: |
|---|
| 922 | n/a | if sock is not None: |
|---|
| 923 | n/a | sock.close() |
|---|
| 924 | n/a | exceptions.append(exc) |
|---|
| 925 | n/a | except: |
|---|
| 926 | n/a | if sock is not None: |
|---|
| 927 | n/a | sock.close() |
|---|
| 928 | n/a | raise |
|---|
| 929 | n/a | else: |
|---|
| 930 | n/a | break |
|---|
| 931 | n/a | else: |
|---|
| 932 | n/a | raise exceptions[0] |
|---|
| 933 | n/a | |
|---|
| 934 | n/a | protocol = protocol_factory() |
|---|
| 935 | n/a | waiter = self.create_future() |
|---|
| 936 | n/a | transport = self._make_datagram_transport( |
|---|
| 937 | n/a | sock, protocol, r_addr, waiter) |
|---|
| 938 | n/a | if self._debug: |
|---|
| 939 | n/a | if local_addr: |
|---|
| 940 | n/a | logger.info("Datagram endpoint local_addr=%r remote_addr=%r " |
|---|
| 941 | n/a | "created: (%r, %r)", |
|---|
| 942 | n/a | local_addr, remote_addr, transport, protocol) |
|---|
| 943 | n/a | else: |
|---|
| 944 | n/a | logger.debug("Datagram endpoint remote_addr=%r created: " |
|---|
| 945 | n/a | "(%r, %r)", |
|---|
| 946 | n/a | remote_addr, transport, protocol) |
|---|
| 947 | n/a | |
|---|
| 948 | n/a | try: |
|---|
| 949 | n/a | yield from waiter |
|---|
| 950 | n/a | except: |
|---|
| 951 | n/a | transport.close() |
|---|
| 952 | n/a | raise |
|---|
| 953 | n/a | |
|---|
| 954 | n/a | return transport, protocol |
|---|
| 955 | n/a | |
|---|
| 956 | n/a | @coroutine |
|---|
| 957 | n/a | def _create_server_getaddrinfo(self, host, port, family, flags): |
|---|
| 958 | n/a | infos = yield from _ensure_resolved((host, port), family=family, |
|---|
| 959 | n/a | type=socket.SOCK_STREAM, |
|---|
| 960 | n/a | flags=flags, loop=self) |
|---|
| 961 | n/a | if not infos: |
|---|
| 962 | n/a | raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) |
|---|
| 963 | n/a | return infos |
|---|
| 964 | n/a | |
|---|
| 965 | n/a | @coroutine |
|---|
| 966 | n/a | def create_server(self, protocol_factory, host=None, port=None, |
|---|
| 967 | n/a | *, |
|---|
| 968 | n/a | family=socket.AF_UNSPEC, |
|---|
| 969 | n/a | flags=socket.AI_PASSIVE, |
|---|
| 970 | n/a | sock=None, |
|---|
| 971 | n/a | backlog=100, |
|---|
| 972 | n/a | ssl=None, |
|---|
| 973 | n/a | reuse_address=None, |
|---|
| 974 | n/a | reuse_port=None): |
|---|
| 975 | n/a | """Create a TCP server. |
|---|
| 976 | n/a | |
|---|
| 977 | n/a | The host parameter can be a string, in that case the TCP server is bound |
|---|
| 978 | n/a | to host and port. |
|---|
| 979 | n/a | |
|---|
| 980 | n/a | The host parameter can also be a sequence of strings and in that case |
|---|
| 981 | n/a | the TCP server is bound to all hosts of the sequence. If a host |
|---|
| 982 | n/a | appears multiple times (possibly indirectly e.g. when hostnames |
|---|
| 983 | n/a | resolve to the same IP address), the server is only bound once to that |
|---|
| 984 | n/a | host. |
|---|
| 985 | n/a | |
|---|
| 986 | n/a | Return a Server object which can be used to stop the service. |
|---|
| 987 | n/a | |
|---|
| 988 | n/a | This method is a coroutine. |
|---|
| 989 | n/a | """ |
|---|
| 990 | n/a | if isinstance(ssl, bool): |
|---|
| 991 | n/a | raise TypeError('ssl argument must be an SSLContext or None') |
|---|
| 992 | n/a | if host is not None or port is not None: |
|---|
| 993 | n/a | if sock is not None: |
|---|
| 994 | n/a | raise ValueError( |
|---|
| 995 | n/a | 'host/port and sock can not be specified at the same time') |
|---|
| 996 | n/a | |
|---|
| 997 | n/a | AF_INET6 = getattr(socket, 'AF_INET6', 0) |
|---|
| 998 | n/a | if reuse_address is None: |
|---|
| 999 | n/a | reuse_address = os.name == 'posix' and sys.platform != 'cygwin' |
|---|
| 1000 | n/a | sockets = [] |
|---|
| 1001 | n/a | if host == '': |
|---|
| 1002 | n/a | hosts = [None] |
|---|
| 1003 | n/a | elif (isinstance(host, str) or |
|---|
| 1004 | n/a | not isinstance(host, collections.Iterable)): |
|---|
| 1005 | n/a | hosts = [host] |
|---|
| 1006 | n/a | else: |
|---|
| 1007 | n/a | hosts = host |
|---|
| 1008 | n/a | |
|---|
| 1009 | n/a | fs = [self._create_server_getaddrinfo(host, port, family=family, |
|---|
| 1010 | n/a | flags=flags) |
|---|
| 1011 | n/a | for host in hosts] |
|---|
| 1012 | n/a | infos = yield from tasks.gather(*fs, loop=self) |
|---|
| 1013 | n/a | infos = set(itertools.chain.from_iterable(infos)) |
|---|
| 1014 | n/a | |
|---|
| 1015 | n/a | completed = False |
|---|
| 1016 | n/a | try: |
|---|
| 1017 | n/a | for res in infos: |
|---|
| 1018 | n/a | af, socktype, proto, canonname, sa = res |
|---|
| 1019 | n/a | try: |
|---|
| 1020 | n/a | sock = socket.socket(af, socktype, proto) |
|---|
| 1021 | n/a | except socket.error: |
|---|
| 1022 | n/a | # Assume it's a bad family/type/protocol combination. |
|---|
| 1023 | n/a | if self._debug: |
|---|
| 1024 | n/a | logger.warning('create_server() failed to create ' |
|---|
| 1025 | n/a | 'socket.socket(%r, %r, %r)', |
|---|
| 1026 | n/a | af, socktype, proto, exc_info=True) |
|---|
| 1027 | n/a | continue |
|---|
| 1028 | n/a | sockets.append(sock) |
|---|
| 1029 | n/a | if reuse_address: |
|---|
| 1030 | n/a | sock.setsockopt( |
|---|
| 1031 | n/a | socket.SOL_SOCKET, socket.SO_REUSEADDR, True) |
|---|
| 1032 | n/a | if reuse_port: |
|---|
| 1033 | n/a | _set_reuseport(sock) |
|---|
| 1034 | n/a | # Disable IPv4/IPv6 dual stack support (enabled by |
|---|
| 1035 | n/a | # default on Linux) which makes a single socket |
|---|
| 1036 | n/a | # listen on both address families. |
|---|
| 1037 | n/a | if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'): |
|---|
| 1038 | n/a | sock.setsockopt(socket.IPPROTO_IPV6, |
|---|
| 1039 | n/a | socket.IPV6_V6ONLY, |
|---|
| 1040 | n/a | True) |
|---|
| 1041 | n/a | try: |
|---|
| 1042 | n/a | sock.bind(sa) |
|---|
| 1043 | n/a | except OSError as err: |
|---|
| 1044 | n/a | raise OSError(err.errno, 'error while attempting ' |
|---|
| 1045 | n/a | 'to bind on address %r: %s' |
|---|
| 1046 | n/a | % (sa, err.strerror.lower())) |
|---|
| 1047 | n/a | completed = True |
|---|
| 1048 | n/a | finally: |
|---|
| 1049 | n/a | if not completed: |
|---|
| 1050 | n/a | for sock in sockets: |
|---|
| 1051 | n/a | sock.close() |
|---|
| 1052 | n/a | else: |
|---|
| 1053 | n/a | if sock is None: |
|---|
| 1054 | n/a | raise ValueError('Neither host/port nor sock were specified') |
|---|
| 1055 | n/a | if not _is_stream_socket(sock): |
|---|
| 1056 | n/a | raise ValueError( |
|---|
| 1057 | n/a | 'A Stream Socket was expected, got {!r}'.format(sock)) |
|---|
| 1058 | n/a | sockets = [sock] |
|---|
| 1059 | n/a | |
|---|
| 1060 | n/a | server = Server(self, sockets) |
|---|
| 1061 | n/a | for sock in sockets: |
|---|
| 1062 | n/a | sock.listen(backlog) |
|---|
| 1063 | n/a | sock.setblocking(False) |
|---|
| 1064 | n/a | self._start_serving(protocol_factory, sock, ssl, server, backlog) |
|---|
| 1065 | n/a | if self._debug: |
|---|
| 1066 | n/a | logger.info("%r is serving", server) |
|---|
| 1067 | n/a | return server |
|---|
| 1068 | n/a | |
|---|
| 1069 | n/a | @coroutine |
|---|
| 1070 | n/a | def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None): |
|---|
| 1071 | n/a | """Handle an accepted connection. |
|---|
| 1072 | n/a | |
|---|
| 1073 | n/a | This is used by servers that accept connections outside of |
|---|
| 1074 | n/a | asyncio but that use asyncio to handle connections. |
|---|
| 1075 | n/a | |
|---|
| 1076 | n/a | This method is a coroutine. When completed, the coroutine |
|---|
| 1077 | n/a | returns a (transport, protocol) pair. |
|---|
| 1078 | n/a | """ |
|---|
| 1079 | n/a | if not _is_stream_socket(sock): |
|---|
| 1080 | n/a | raise ValueError( |
|---|
| 1081 | n/a | 'A Stream Socket was expected, got {!r}'.format(sock)) |
|---|
| 1082 | n/a | |
|---|
| 1083 | n/a | transport, protocol = yield from self._create_connection_transport( |
|---|
| 1084 | n/a | sock, protocol_factory, ssl, '', server_side=True) |
|---|
| 1085 | n/a | if self._debug: |
|---|
| 1086 | n/a | # Get the socket from the transport because SSL transport closes |
|---|
| 1087 | n/a | # the old socket and creates a new SSL socket |
|---|
| 1088 | n/a | sock = transport.get_extra_info('socket') |
|---|
| 1089 | n/a | logger.debug("%r handled: (%r, %r)", sock, transport, protocol) |
|---|
| 1090 | n/a | return transport, protocol |
|---|
| 1091 | n/a | |
|---|
| 1092 | n/a | @coroutine |
|---|
| 1093 | n/a | def connect_read_pipe(self, protocol_factory, pipe): |
|---|
| 1094 | n/a | protocol = protocol_factory() |
|---|
| 1095 | n/a | waiter = self.create_future() |
|---|
| 1096 | n/a | transport = self._make_read_pipe_transport(pipe, protocol, waiter) |
|---|
| 1097 | n/a | |
|---|
| 1098 | n/a | try: |
|---|
| 1099 | n/a | yield from waiter |
|---|
| 1100 | n/a | except: |
|---|
| 1101 | n/a | transport.close() |
|---|
| 1102 | n/a | raise |
|---|
| 1103 | n/a | |
|---|
| 1104 | n/a | if self._debug: |
|---|
| 1105 | n/a | logger.debug('Read pipe %r connected: (%r, %r)', |
|---|
| 1106 | n/a | pipe.fileno(), transport, protocol) |
|---|
| 1107 | n/a | return transport, protocol |
|---|
| 1108 | n/a | |
|---|
| 1109 | n/a | @coroutine |
|---|
| 1110 | n/a | def connect_write_pipe(self, protocol_factory, pipe): |
|---|
| 1111 | n/a | protocol = protocol_factory() |
|---|
| 1112 | n/a | waiter = self.create_future() |
|---|
| 1113 | n/a | transport = self._make_write_pipe_transport(pipe, protocol, waiter) |
|---|
| 1114 | n/a | |
|---|
| 1115 | n/a | try: |
|---|
| 1116 | n/a | yield from waiter |
|---|
| 1117 | n/a | except: |
|---|
| 1118 | n/a | transport.close() |
|---|
| 1119 | n/a | raise |
|---|
| 1120 | n/a | |
|---|
| 1121 | n/a | if self._debug: |
|---|
| 1122 | n/a | logger.debug('Write pipe %r connected: (%r, %r)', |
|---|
| 1123 | n/a | pipe.fileno(), transport, protocol) |
|---|
| 1124 | n/a | return transport, protocol |
|---|
| 1125 | n/a | |
|---|
| 1126 | n/a | def _log_subprocess(self, msg, stdin, stdout, stderr): |
|---|
| 1127 | n/a | info = [msg] |
|---|
| 1128 | n/a | if stdin is not None: |
|---|
| 1129 | n/a | info.append('stdin=%s' % _format_pipe(stdin)) |
|---|
| 1130 | n/a | if stdout is not None and stderr == subprocess.STDOUT: |
|---|
| 1131 | n/a | info.append('stdout=stderr=%s' % _format_pipe(stdout)) |
|---|
| 1132 | n/a | else: |
|---|
| 1133 | n/a | if stdout is not None: |
|---|
| 1134 | n/a | info.append('stdout=%s' % _format_pipe(stdout)) |
|---|
| 1135 | n/a | if stderr is not None: |
|---|
| 1136 | n/a | info.append('stderr=%s' % _format_pipe(stderr)) |
|---|
| 1137 | n/a | logger.debug(' '.join(info)) |
|---|
| 1138 | n/a | |
|---|
| 1139 | n/a | @coroutine |
|---|
| 1140 | n/a | def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, |
|---|
| 1141 | n/a | stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
|---|
| 1142 | n/a | universal_newlines=False, shell=True, bufsize=0, |
|---|
| 1143 | n/a | **kwargs): |
|---|
| 1144 | n/a | if not isinstance(cmd, (bytes, str)): |
|---|
| 1145 | n/a | raise ValueError("cmd must be a string") |
|---|
| 1146 | n/a | if universal_newlines: |
|---|
| 1147 | n/a | raise ValueError("universal_newlines must be False") |
|---|
| 1148 | n/a | if not shell: |
|---|
| 1149 | n/a | raise ValueError("shell must be True") |
|---|
| 1150 | n/a | if bufsize != 0: |
|---|
| 1151 | n/a | raise ValueError("bufsize must be 0") |
|---|
| 1152 | n/a | protocol = protocol_factory() |
|---|
| 1153 | n/a | if self._debug: |
|---|
| 1154 | n/a | # don't log parameters: they may contain sensitive information |
|---|
| 1155 | n/a | # (password) and may be too long |
|---|
| 1156 | n/a | debug_log = 'run shell command %r' % cmd |
|---|
| 1157 | n/a | self._log_subprocess(debug_log, stdin, stdout, stderr) |
|---|
| 1158 | n/a | transport = yield from self._make_subprocess_transport( |
|---|
| 1159 | n/a | protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) |
|---|
| 1160 | n/a | if self._debug: |
|---|
| 1161 | n/a | logger.info('%s: %r', debug_log, transport) |
|---|
| 1162 | n/a | return transport, protocol |
|---|
| 1163 | n/a | |
|---|
| 1164 | n/a | @coroutine |
|---|
| 1165 | n/a | def subprocess_exec(self, protocol_factory, program, *args, |
|---|
| 1166 | n/a | stdin=subprocess.PIPE, stdout=subprocess.PIPE, |
|---|
| 1167 | n/a | stderr=subprocess.PIPE, universal_newlines=False, |
|---|
| 1168 | n/a | shell=False, bufsize=0, **kwargs): |
|---|
| 1169 | n/a | if universal_newlines: |
|---|
| 1170 | n/a | raise ValueError("universal_newlines must be False") |
|---|
| 1171 | n/a | if shell: |
|---|
| 1172 | n/a | raise ValueError("shell must be False") |
|---|
| 1173 | n/a | if bufsize != 0: |
|---|
| 1174 | n/a | raise ValueError("bufsize must be 0") |
|---|
| 1175 | n/a | popen_args = (program,) + args |
|---|
| 1176 | n/a | for arg in popen_args: |
|---|
| 1177 | n/a | if not isinstance(arg, (str, bytes)): |
|---|
| 1178 | n/a | raise TypeError("program arguments must be " |
|---|
| 1179 | n/a | "a bytes or text string, not %s" |
|---|
| 1180 | n/a | % type(arg).__name__) |
|---|
| 1181 | n/a | protocol = protocol_factory() |
|---|
| 1182 | n/a | if self._debug: |
|---|
| 1183 | n/a | # don't log parameters: they may contain sensitive information |
|---|
| 1184 | n/a | # (password) and may be too long |
|---|
| 1185 | n/a | debug_log = 'execute program %r' % program |
|---|
| 1186 | n/a | self._log_subprocess(debug_log, stdin, stdout, stderr) |
|---|
| 1187 | n/a | transport = yield from self._make_subprocess_transport( |
|---|
| 1188 | n/a | protocol, popen_args, False, stdin, stdout, stderr, |
|---|
| 1189 | n/a | bufsize, **kwargs) |
|---|
| 1190 | n/a | if self._debug: |
|---|
| 1191 | n/a | logger.info('%s: %r', debug_log, transport) |
|---|
| 1192 | n/a | return transport, protocol |
|---|
| 1193 | n/a | |
|---|
| 1194 | n/a | def get_exception_handler(self): |
|---|
| 1195 | n/a | """Return an exception handler, or None if the default one is in use. |
|---|
| 1196 | n/a | """ |
|---|
| 1197 | n/a | return self._exception_handler |
|---|
| 1198 | n/a | |
|---|
| 1199 | n/a | def set_exception_handler(self, handler): |
|---|
| 1200 | n/a | """Set handler as the new event loop exception handler. |
|---|
| 1201 | n/a | |
|---|
| 1202 | n/a | If handler is None, the default exception handler will |
|---|
| 1203 | n/a | be set. |
|---|
| 1204 | n/a | |
|---|
| 1205 | n/a | If handler is a callable object, it should have a |
|---|
| 1206 | n/a | signature matching '(loop, context)', where 'loop' |
|---|
| 1207 | n/a | will be a reference to the active event loop, 'context' |
|---|
| 1208 | n/a | will be a dict object (see `call_exception_handler()` |
|---|
| 1209 | n/a | documentation for details about context). |
|---|
| 1210 | n/a | """ |
|---|
| 1211 | n/a | if handler is not None and not callable(handler): |
|---|
| 1212 | n/a | raise TypeError('A callable object or None is expected, ' |
|---|
| 1213 | n/a | 'got {!r}'.format(handler)) |
|---|
| 1214 | n/a | self._exception_handler = handler |
|---|
| 1215 | n/a | |
|---|
| 1216 | n/a | def default_exception_handler(self, context): |
|---|
| 1217 | n/a | """Default exception handler. |
|---|
| 1218 | n/a | |
|---|
| 1219 | n/a | This is called when an exception occurs and no exception |
|---|
| 1220 | n/a | handler is set, and can be called by a custom exception |
|---|
| 1221 | n/a | handler that wants to defer to the default behavior. |
|---|
| 1222 | n/a | |
|---|
| 1223 | n/a | The context parameter has the same meaning as in |
|---|
| 1224 | n/a | `call_exception_handler()`. |
|---|
| 1225 | n/a | """ |
|---|
| 1226 | n/a | message = context.get('message') |
|---|
| 1227 | n/a | if not message: |
|---|
| 1228 | n/a | message = 'Unhandled exception in event loop' |
|---|
| 1229 | n/a | |
|---|
| 1230 | n/a | exception = context.get('exception') |
|---|
| 1231 | n/a | if exception is not None: |
|---|
| 1232 | n/a | exc_info = (type(exception), exception, exception.__traceback__) |
|---|
| 1233 | n/a | else: |
|---|
| 1234 | n/a | exc_info = False |
|---|
| 1235 | n/a | |
|---|
| 1236 | n/a | if ('source_traceback' not in context |
|---|
| 1237 | n/a | and self._current_handle is not None |
|---|
| 1238 | n/a | and self._current_handle._source_traceback): |
|---|
| 1239 | n/a | context['handle_traceback'] = self._current_handle._source_traceback |
|---|
| 1240 | n/a | |
|---|
| 1241 | n/a | log_lines = [message] |
|---|
| 1242 | n/a | for key in sorted(context): |
|---|
| 1243 | n/a | if key in {'message', 'exception'}: |
|---|
| 1244 | n/a | continue |
|---|
| 1245 | n/a | value = context[key] |
|---|
| 1246 | n/a | if key == 'source_traceback': |
|---|
| 1247 | n/a | tb = ''.join(traceback.format_list(value)) |
|---|
| 1248 | n/a | value = 'Object created at (most recent call last):\n' |
|---|
| 1249 | n/a | value += tb.rstrip() |
|---|
| 1250 | n/a | elif key == 'handle_traceback': |
|---|
| 1251 | n/a | tb = ''.join(traceback.format_list(value)) |
|---|
| 1252 | n/a | value = 'Handle created at (most recent call last):\n' |
|---|
| 1253 | n/a | value += tb.rstrip() |
|---|
| 1254 | n/a | else: |
|---|
| 1255 | n/a | value = repr(value) |
|---|
| 1256 | n/a | log_lines.append('{}: {}'.format(key, value)) |
|---|
| 1257 | n/a | |
|---|
| 1258 | n/a | logger.error('\n'.join(log_lines), exc_info=exc_info) |
|---|
| 1259 | n/a | |
|---|
| 1260 | n/a | def call_exception_handler(self, context): |
|---|
| 1261 | n/a | """Call the current event loop's exception handler. |
|---|
| 1262 | n/a | |
|---|
| 1263 | n/a | The context argument is a dict containing the following keys: |
|---|
| 1264 | n/a | |
|---|
| 1265 | n/a | - 'message': Error message; |
|---|
| 1266 | n/a | - 'exception' (optional): Exception object; |
|---|
| 1267 | n/a | - 'future' (optional): Future instance; |
|---|
| 1268 | n/a | - 'handle' (optional): Handle instance; |
|---|
| 1269 | n/a | - 'protocol' (optional): Protocol instance; |
|---|
| 1270 | n/a | - 'transport' (optional): Transport instance; |
|---|
| 1271 | n/a | - 'socket' (optional): Socket instance; |
|---|
| 1272 | n/a | - 'asyncgen' (optional): Asynchronous generator that caused |
|---|
| 1273 | n/a | the exception. |
|---|
| 1274 | n/a | |
|---|
| 1275 | n/a | New keys maybe introduced in the future. |
|---|
| 1276 | n/a | |
|---|
| 1277 | n/a | Note: do not overload this method in an event loop subclass. |
|---|
| 1278 | n/a | For custom exception handling, use the |
|---|
| 1279 | n/a | `set_exception_handler()` method. |
|---|
| 1280 | n/a | """ |
|---|
| 1281 | n/a | if self._exception_handler is None: |
|---|
| 1282 | n/a | try: |
|---|
| 1283 | n/a | self.default_exception_handler(context) |
|---|
| 1284 | n/a | except Exception: |
|---|
| 1285 | n/a | # Second protection layer for unexpected errors |
|---|
| 1286 | n/a | # in the default implementation, as well as for subclassed |
|---|
| 1287 | n/a | # event loops with overloaded "default_exception_handler". |
|---|
| 1288 | n/a | logger.error('Exception in default exception handler', |
|---|
| 1289 | n/a | exc_info=True) |
|---|
| 1290 | n/a | else: |
|---|
| 1291 | n/a | try: |
|---|
| 1292 | n/a | self._exception_handler(self, context) |
|---|
| 1293 | n/a | except Exception as exc: |
|---|
| 1294 | n/a | # Exception in the user set custom exception handler. |
|---|
| 1295 | n/a | try: |
|---|
| 1296 | n/a | # Let's try default handler. |
|---|
| 1297 | n/a | self.default_exception_handler({ |
|---|
| 1298 | n/a | 'message': 'Unhandled error in exception handler', |
|---|
| 1299 | n/a | 'exception': exc, |
|---|
| 1300 | n/a | 'context': context, |
|---|
| 1301 | n/a | }) |
|---|
| 1302 | n/a | except Exception: |
|---|
| 1303 | n/a | # Guard 'default_exception_handler' in case it is |
|---|
| 1304 | n/a | # overloaded. |
|---|
| 1305 | n/a | logger.error('Exception in default exception handler ' |
|---|
| 1306 | n/a | 'while handling an unexpected error ' |
|---|
| 1307 | n/a | 'in custom exception handler', |
|---|
| 1308 | n/a | exc_info=True) |
|---|
| 1309 | n/a | |
|---|
| 1310 | n/a | def _add_callback(self, handle): |
|---|
| 1311 | n/a | """Add a Handle to _scheduled (TimerHandle) or _ready.""" |
|---|
| 1312 | n/a | assert isinstance(handle, events.Handle), 'A Handle is required here' |
|---|
| 1313 | n/a | if handle._cancelled: |
|---|
| 1314 | n/a | return |
|---|
| 1315 | n/a | assert not isinstance(handle, events.TimerHandle) |
|---|
| 1316 | n/a | self._ready.append(handle) |
|---|
| 1317 | n/a | |
|---|
| 1318 | n/a | def _add_callback_signalsafe(self, handle): |
|---|
| 1319 | n/a | """Like _add_callback() but called from a signal handler.""" |
|---|
| 1320 | n/a | self._add_callback(handle) |
|---|
| 1321 | n/a | self._write_to_self() |
|---|
| 1322 | n/a | |
|---|
| 1323 | n/a | def _timer_handle_cancelled(self, handle): |
|---|
| 1324 | n/a | """Notification that a TimerHandle has been cancelled.""" |
|---|
| 1325 | n/a | if handle._scheduled: |
|---|
| 1326 | n/a | self._timer_cancelled_count += 1 |
|---|
| 1327 | n/a | |
|---|
| 1328 | n/a | def _run_once(self): |
|---|
| 1329 | n/a | """Run one full iteration of the event loop. |
|---|
| 1330 | n/a | |
|---|
| 1331 | n/a | This calls all currently ready callbacks, polls for I/O, |
|---|
| 1332 | n/a | schedules the resulting callbacks, and finally schedules |
|---|
| 1333 | n/a | 'call_later' callbacks. |
|---|
| 1334 | n/a | """ |
|---|
| 1335 | n/a | |
|---|
| 1336 | n/a | sched_count = len(self._scheduled) |
|---|
| 1337 | n/a | if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and |
|---|
| 1338 | n/a | self._timer_cancelled_count / sched_count > |
|---|
| 1339 | n/a | _MIN_CANCELLED_TIMER_HANDLES_FRACTION): |
|---|
| 1340 | n/a | # Remove delayed calls that were cancelled if their number |
|---|
| 1341 | n/a | # is too high |
|---|
| 1342 | n/a | new_scheduled = [] |
|---|
| 1343 | n/a | for handle in self._scheduled: |
|---|
| 1344 | n/a | if handle._cancelled: |
|---|
| 1345 | n/a | handle._scheduled = False |
|---|
| 1346 | n/a | else: |
|---|
| 1347 | n/a | new_scheduled.append(handle) |
|---|
| 1348 | n/a | |
|---|
| 1349 | n/a | heapq.heapify(new_scheduled) |
|---|
| 1350 | n/a | self._scheduled = new_scheduled |
|---|
| 1351 | n/a | self._timer_cancelled_count = 0 |
|---|
| 1352 | n/a | else: |
|---|
| 1353 | n/a | # Remove delayed calls that were cancelled from head of queue. |
|---|
| 1354 | n/a | while self._scheduled and self._scheduled[0]._cancelled: |
|---|
| 1355 | n/a | self._timer_cancelled_count -= 1 |
|---|
| 1356 | n/a | handle = heapq.heappop(self._scheduled) |
|---|
| 1357 | n/a | handle._scheduled = False |
|---|
| 1358 | n/a | |
|---|
| 1359 | n/a | timeout = None |
|---|
| 1360 | n/a | if self._ready or self._stopping: |
|---|
| 1361 | n/a | timeout = 0 |
|---|
| 1362 | n/a | elif self._scheduled: |
|---|
| 1363 | n/a | # Compute the desired timeout. |
|---|
| 1364 | n/a | when = self._scheduled[0]._when |
|---|
| 1365 | n/a | timeout = max(0, when - self.time()) |
|---|
| 1366 | n/a | |
|---|
| 1367 | n/a | if self._debug and timeout != 0: |
|---|
| 1368 | n/a | t0 = self.time() |
|---|
| 1369 | n/a | event_list = self._selector.select(timeout) |
|---|
| 1370 | n/a | dt = self.time() - t0 |
|---|
| 1371 | n/a | if dt >= 1.0: |
|---|
| 1372 | n/a | level = logging.INFO |
|---|
| 1373 | n/a | else: |
|---|
| 1374 | n/a | level = logging.DEBUG |
|---|
| 1375 | n/a | nevent = len(event_list) |
|---|
| 1376 | n/a | if timeout is None: |
|---|
| 1377 | n/a | logger.log(level, 'poll took %.3f ms: %s events', |
|---|
| 1378 | n/a | dt * 1e3, nevent) |
|---|
| 1379 | n/a | elif nevent: |
|---|
| 1380 | n/a | logger.log(level, |
|---|
| 1381 | n/a | 'poll %.3f ms took %.3f ms: %s events', |
|---|
| 1382 | n/a | timeout * 1e3, dt * 1e3, nevent) |
|---|
| 1383 | n/a | elif dt >= 1.0: |
|---|
| 1384 | n/a | logger.log(level, |
|---|
| 1385 | n/a | 'poll %.3f ms took %.3f ms: timeout', |
|---|
| 1386 | n/a | timeout * 1e3, dt * 1e3) |
|---|
| 1387 | n/a | else: |
|---|
| 1388 | n/a | event_list = self._selector.select(timeout) |
|---|
| 1389 | n/a | self._process_events(event_list) |
|---|
| 1390 | n/a | |
|---|
| 1391 | n/a | # Handle 'later' callbacks that are ready. |
|---|
| 1392 | n/a | end_time = self.time() + self._clock_resolution |
|---|
| 1393 | n/a | while self._scheduled: |
|---|
| 1394 | n/a | handle = self._scheduled[0] |
|---|
| 1395 | n/a | if handle._when >= end_time: |
|---|
| 1396 | n/a | break |
|---|
| 1397 | n/a | handle = heapq.heappop(self._scheduled) |
|---|
| 1398 | n/a | handle._scheduled = False |
|---|
| 1399 | n/a | self._ready.append(handle) |
|---|
| 1400 | n/a | |
|---|
| 1401 | n/a | # This is the only place where callbacks are actually *called*. |
|---|
| 1402 | n/a | # All other places just add them to ready. |
|---|
| 1403 | n/a | # Note: We run all currently scheduled callbacks, but not any |
|---|
| 1404 | n/a | # callbacks scheduled by callbacks run this time around -- |
|---|
| 1405 | n/a | # they will be run the next time (after another I/O poll). |
|---|
| 1406 | n/a | # Use an idiom that is thread-safe without using locks. |
|---|
| 1407 | n/a | ntodo = len(self._ready) |
|---|
| 1408 | n/a | for i in range(ntodo): |
|---|
| 1409 | n/a | handle = self._ready.popleft() |
|---|
| 1410 | n/a | if handle._cancelled: |
|---|
| 1411 | n/a | continue |
|---|
| 1412 | n/a | if self._debug: |
|---|
| 1413 | n/a | try: |
|---|
| 1414 | n/a | self._current_handle = handle |
|---|
| 1415 | n/a | t0 = self.time() |
|---|
| 1416 | n/a | handle._run() |
|---|
| 1417 | n/a | dt = self.time() - t0 |
|---|
| 1418 | n/a | if dt >= self.slow_callback_duration: |
|---|
| 1419 | n/a | logger.warning('Executing %s took %.3f seconds', |
|---|
| 1420 | n/a | _format_handle(handle), dt) |
|---|
| 1421 | n/a | finally: |
|---|
| 1422 | n/a | self._current_handle = None |
|---|
| 1423 | n/a | else: |
|---|
| 1424 | n/a | handle._run() |
|---|
| 1425 | n/a | handle = None # Needed to break cycles when an exception occurs. |
|---|
| 1426 | n/a | |
|---|
| 1427 | n/a | def _set_coroutine_wrapper(self, enabled): |
|---|
| 1428 | n/a | try: |
|---|
| 1429 | n/a | set_wrapper = sys.set_coroutine_wrapper |
|---|
| 1430 | n/a | get_wrapper = sys.get_coroutine_wrapper |
|---|
| 1431 | n/a | except AttributeError: |
|---|
| 1432 | n/a | return |
|---|
| 1433 | n/a | |
|---|
| 1434 | n/a | enabled = bool(enabled) |
|---|
| 1435 | n/a | if self._coroutine_wrapper_set == enabled: |
|---|
| 1436 | n/a | return |
|---|
| 1437 | n/a | |
|---|
| 1438 | n/a | wrapper = coroutines.debug_wrapper |
|---|
| 1439 | n/a | current_wrapper = get_wrapper() |
|---|
| 1440 | n/a | |
|---|
| 1441 | n/a | if enabled: |
|---|
| 1442 | n/a | if current_wrapper not in (None, wrapper): |
|---|
| 1443 | n/a | warnings.warn( |
|---|
| 1444 | n/a | "loop.set_debug(True): cannot set debug coroutine " |
|---|
| 1445 | n/a | "wrapper; another wrapper is already set %r" % |
|---|
| 1446 | n/a | current_wrapper, RuntimeWarning) |
|---|
| 1447 | n/a | else: |
|---|
| 1448 | n/a | set_wrapper(wrapper) |
|---|
| 1449 | n/a | self._coroutine_wrapper_set = True |
|---|
| 1450 | n/a | else: |
|---|
| 1451 | n/a | if current_wrapper not in (None, wrapper): |
|---|
| 1452 | n/a | warnings.warn( |
|---|
| 1453 | n/a | "loop.set_debug(False): cannot unset debug coroutine " |
|---|
| 1454 | n/a | "wrapper; another wrapper was set %r" % |
|---|
| 1455 | n/a | current_wrapper, RuntimeWarning) |
|---|
| 1456 | n/a | else: |
|---|
| 1457 | n/a | set_wrapper(None) |
|---|
| 1458 | n/a | self._coroutine_wrapper_set = False |
|---|
| 1459 | n/a | |
|---|
| 1460 | n/a | def get_debug(self): |
|---|
| 1461 | n/a | return self._debug |
|---|
| 1462 | n/a | |
|---|
| 1463 | n/a | def set_debug(self, enabled): |
|---|
| 1464 | n/a | self._debug = enabled |
|---|
| 1465 | n/a | |
|---|
| 1466 | n/a | if self.is_running(): |
|---|
| 1467 | n/a | self._set_coroutine_wrapper(enabled) |
|---|