1 | n/a | """Stream-related things.""" |
---|
2 | n/a | |
---|
3 | n/a | __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', |
---|
4 | n/a | 'open_connection', 'start_server', |
---|
5 | n/a | 'IncompleteReadError', |
---|
6 | n/a | 'LimitOverrunError', |
---|
7 | n/a | ] |
---|
8 | n/a | |
---|
9 | n/a | import socket |
---|
10 | n/a | |
---|
11 | n/a | if hasattr(socket, 'AF_UNIX'): |
---|
12 | n/a | __all__.extend(['open_unix_connection', 'start_unix_server']) |
---|
13 | n/a | |
---|
14 | n/a | from . import coroutines |
---|
15 | n/a | from . import compat |
---|
16 | n/a | from . import events |
---|
17 | n/a | from . import protocols |
---|
18 | n/a | from .coroutines import coroutine |
---|
19 | n/a | from .log import logger |
---|
20 | n/a | |
---|
21 | n/a | |
---|
22 | n/a | _DEFAULT_LIMIT = 2 ** 16 |
---|
23 | n/a | |
---|
24 | n/a | |
---|
25 | n/a | class IncompleteReadError(EOFError): |
---|
26 | n/a | """ |
---|
27 | n/a | Incomplete read error. Attributes: |
---|
28 | n/a | |
---|
29 | n/a | - partial: read bytes string before the end of stream was reached |
---|
30 | n/a | - expected: total number of expected bytes (or None if unknown) |
---|
31 | n/a | """ |
---|
32 | n/a | def __init__(self, partial, expected): |
---|
33 | n/a | super().__init__("%d bytes read on a total of %r expected bytes" |
---|
34 | n/a | % (len(partial), expected)) |
---|
35 | n/a | self.partial = partial |
---|
36 | n/a | self.expected = expected |
---|
37 | n/a | |
---|
38 | n/a | |
---|
39 | n/a | class LimitOverrunError(Exception): |
---|
40 | n/a | """Reached the buffer limit while looking for a separator. |
---|
41 | n/a | |
---|
42 | n/a | Attributes: |
---|
43 | n/a | - consumed: total number of to be consumed bytes. |
---|
44 | n/a | """ |
---|
45 | n/a | def __init__(self, message, consumed): |
---|
46 | n/a | super().__init__(message) |
---|
47 | n/a | self.consumed = consumed |
---|
48 | n/a | |
---|
49 | n/a | |
---|
50 | n/a | @coroutine |
---|
51 | n/a | def open_connection(host=None, port=None, *, |
---|
52 | n/a | loop=None, limit=_DEFAULT_LIMIT, **kwds): |
---|
53 | n/a | """A wrapper for create_connection() returning a (reader, writer) pair. |
---|
54 | n/a | |
---|
55 | n/a | The reader returned is a StreamReader instance; the writer is a |
---|
56 | n/a | StreamWriter instance. |
---|
57 | n/a | |
---|
58 | n/a | The arguments are all the usual arguments to create_connection() |
---|
59 | n/a | except protocol_factory; most common are positional host and port, |
---|
60 | n/a | with various optional keyword arguments following. |
---|
61 | n/a | |
---|
62 | n/a | Additional optional keyword arguments are loop (to set the event loop |
---|
63 | n/a | instance to use) and limit (to set the buffer limit passed to the |
---|
64 | n/a | StreamReader). |
---|
65 | n/a | |
---|
66 | n/a | (If you want to customize the StreamReader and/or |
---|
67 | n/a | StreamReaderProtocol classes, just copy the code -- there's |
---|
68 | n/a | really nothing special here except some convenience.) |
---|
69 | n/a | """ |
---|
70 | n/a | if loop is None: |
---|
71 | n/a | loop = events.get_event_loop() |
---|
72 | n/a | reader = StreamReader(limit=limit, loop=loop) |
---|
73 | n/a | protocol = StreamReaderProtocol(reader, loop=loop) |
---|
74 | n/a | transport, _ = yield from loop.create_connection( |
---|
75 | n/a | lambda: protocol, host, port, **kwds) |
---|
76 | n/a | writer = StreamWriter(transport, protocol, reader, loop) |
---|
77 | n/a | return reader, writer |
---|
78 | n/a | |
---|
79 | n/a | |
---|
80 | n/a | @coroutine |
---|
81 | n/a | def start_server(client_connected_cb, host=None, port=None, *, |
---|
82 | n/a | loop=None, limit=_DEFAULT_LIMIT, **kwds): |
---|
83 | n/a | """Start a socket server, call back for each client connected. |
---|
84 | n/a | |
---|
85 | n/a | The first parameter, `client_connected_cb`, takes two parameters: |
---|
86 | n/a | client_reader, client_writer. client_reader is a StreamReader |
---|
87 | n/a | object, while client_writer is a StreamWriter object. This |
---|
88 | n/a | parameter can either be a plain callback function or a coroutine; |
---|
89 | n/a | if it is a coroutine, it will be automatically converted into a |
---|
90 | n/a | Task. |
---|
91 | n/a | |
---|
92 | n/a | The rest of the arguments are all the usual arguments to |
---|
93 | n/a | loop.create_server() except protocol_factory; most common are |
---|
94 | n/a | positional host and port, with various optional keyword arguments |
---|
95 | n/a | following. The return value is the same as loop.create_server(). |
---|
96 | n/a | |
---|
97 | n/a | Additional optional keyword arguments are loop (to set the event loop |
---|
98 | n/a | instance to use) and limit (to set the buffer limit passed to the |
---|
99 | n/a | StreamReader). |
---|
100 | n/a | |
---|
101 | n/a | The return value is the same as loop.create_server(), i.e. a |
---|
102 | n/a | Server object which can be used to stop the service. |
---|
103 | n/a | """ |
---|
104 | n/a | if loop is None: |
---|
105 | n/a | loop = events.get_event_loop() |
---|
106 | n/a | |
---|
107 | n/a | def factory(): |
---|
108 | n/a | reader = StreamReader(limit=limit, loop=loop) |
---|
109 | n/a | protocol = StreamReaderProtocol(reader, client_connected_cb, |
---|
110 | n/a | loop=loop) |
---|
111 | n/a | return protocol |
---|
112 | n/a | |
---|
113 | n/a | return (yield from loop.create_server(factory, host, port, **kwds)) |
---|
114 | n/a | |
---|
115 | n/a | |
---|
116 | n/a | if hasattr(socket, 'AF_UNIX'): |
---|
117 | n/a | # UNIX Domain Sockets are supported on this platform |
---|
118 | n/a | |
---|
119 | n/a | @coroutine |
---|
120 | n/a | def open_unix_connection(path=None, *, |
---|
121 | n/a | loop=None, limit=_DEFAULT_LIMIT, **kwds): |
---|
122 | n/a | """Similar to `open_connection` but works with UNIX Domain Sockets.""" |
---|
123 | n/a | if loop is None: |
---|
124 | n/a | loop = events.get_event_loop() |
---|
125 | n/a | reader = StreamReader(limit=limit, loop=loop) |
---|
126 | n/a | protocol = StreamReaderProtocol(reader, loop=loop) |
---|
127 | n/a | transport, _ = yield from loop.create_unix_connection( |
---|
128 | n/a | lambda: protocol, path, **kwds) |
---|
129 | n/a | writer = StreamWriter(transport, protocol, reader, loop) |
---|
130 | n/a | return reader, writer |
---|
131 | n/a | |
---|
132 | n/a | @coroutine |
---|
133 | n/a | def start_unix_server(client_connected_cb, path=None, *, |
---|
134 | n/a | loop=None, limit=_DEFAULT_LIMIT, **kwds): |
---|
135 | n/a | """Similar to `start_server` but works with UNIX Domain Sockets.""" |
---|
136 | n/a | if loop is None: |
---|
137 | n/a | loop = events.get_event_loop() |
---|
138 | n/a | |
---|
139 | n/a | def factory(): |
---|
140 | n/a | reader = StreamReader(limit=limit, loop=loop) |
---|
141 | n/a | protocol = StreamReaderProtocol(reader, client_connected_cb, |
---|
142 | n/a | loop=loop) |
---|
143 | n/a | return protocol |
---|
144 | n/a | |
---|
145 | n/a | return (yield from loop.create_unix_server(factory, path, **kwds)) |
---|
146 | n/a | |
---|
147 | n/a | |
---|
148 | n/a | class FlowControlMixin(protocols.Protocol): |
---|
149 | n/a | """Reusable flow control logic for StreamWriter.drain(). |
---|
150 | n/a | |
---|
151 | n/a | This implements the protocol methods pause_writing(), |
---|
152 | n/a | resume_reading() and connection_lost(). If the subclass overrides |
---|
153 | n/a | these it must call the super methods. |
---|
154 | n/a | |
---|
155 | n/a | StreamWriter.drain() must wait for _drain_helper() coroutine. |
---|
156 | n/a | """ |
---|
157 | n/a | |
---|
158 | n/a | def __init__(self, loop=None): |
---|
159 | n/a | if loop is None: |
---|
160 | n/a | self._loop = events.get_event_loop() |
---|
161 | n/a | else: |
---|
162 | n/a | self._loop = loop |
---|
163 | n/a | self._paused = False |
---|
164 | n/a | self._drain_waiter = None |
---|
165 | n/a | self._connection_lost = False |
---|
166 | n/a | |
---|
167 | n/a | def pause_writing(self): |
---|
168 | n/a | assert not self._paused |
---|
169 | n/a | self._paused = True |
---|
170 | n/a | if self._loop.get_debug(): |
---|
171 | n/a | logger.debug("%r pauses writing", self) |
---|
172 | n/a | |
---|
173 | n/a | def resume_writing(self): |
---|
174 | n/a | assert self._paused |
---|
175 | n/a | self._paused = False |
---|
176 | n/a | if self._loop.get_debug(): |
---|
177 | n/a | logger.debug("%r resumes writing", self) |
---|
178 | n/a | |
---|
179 | n/a | waiter = self._drain_waiter |
---|
180 | n/a | if waiter is not None: |
---|
181 | n/a | self._drain_waiter = None |
---|
182 | n/a | if not waiter.done(): |
---|
183 | n/a | waiter.set_result(None) |
---|
184 | n/a | |
---|
185 | n/a | def connection_lost(self, exc): |
---|
186 | n/a | self._connection_lost = True |
---|
187 | n/a | # Wake up the writer if currently paused. |
---|
188 | n/a | if not self._paused: |
---|
189 | n/a | return |
---|
190 | n/a | waiter = self._drain_waiter |
---|
191 | n/a | if waiter is None: |
---|
192 | n/a | return |
---|
193 | n/a | self._drain_waiter = None |
---|
194 | n/a | if waiter.done(): |
---|
195 | n/a | return |
---|
196 | n/a | if exc is None: |
---|
197 | n/a | waiter.set_result(None) |
---|
198 | n/a | else: |
---|
199 | n/a | waiter.set_exception(exc) |
---|
200 | n/a | |
---|
201 | n/a | @coroutine |
---|
202 | n/a | def _drain_helper(self): |
---|
203 | n/a | if self._connection_lost: |
---|
204 | n/a | raise ConnectionResetError('Connection lost') |
---|
205 | n/a | if not self._paused: |
---|
206 | n/a | return |
---|
207 | n/a | waiter = self._drain_waiter |
---|
208 | n/a | assert waiter is None or waiter.cancelled() |
---|
209 | n/a | waiter = self._loop.create_future() |
---|
210 | n/a | self._drain_waiter = waiter |
---|
211 | n/a | yield from waiter |
---|
212 | n/a | |
---|
213 | n/a | |
---|
214 | n/a | class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): |
---|
215 | n/a | """Helper class to adapt between Protocol and StreamReader. |
---|
216 | n/a | |
---|
217 | n/a | (This is a helper class instead of making StreamReader itself a |
---|
218 | n/a | Protocol subclass, because the StreamReader has other potential |
---|
219 | n/a | uses, and to prevent the user of the StreamReader to accidentally |
---|
220 | n/a | call inappropriate methods of the protocol.) |
---|
221 | n/a | """ |
---|
222 | n/a | |
---|
223 | n/a | def __init__(self, stream_reader, client_connected_cb=None, loop=None): |
---|
224 | n/a | super().__init__(loop=loop) |
---|
225 | n/a | self._stream_reader = stream_reader |
---|
226 | n/a | self._stream_writer = None |
---|
227 | n/a | self._client_connected_cb = client_connected_cb |
---|
228 | n/a | self._over_ssl = False |
---|
229 | n/a | |
---|
230 | n/a | def connection_made(self, transport): |
---|
231 | n/a | self._stream_reader.set_transport(transport) |
---|
232 | n/a | self._over_ssl = transport.get_extra_info('sslcontext') is not None |
---|
233 | n/a | if self._client_connected_cb is not None: |
---|
234 | n/a | self._stream_writer = StreamWriter(transport, self, |
---|
235 | n/a | self._stream_reader, |
---|
236 | n/a | self._loop) |
---|
237 | n/a | res = self._client_connected_cb(self._stream_reader, |
---|
238 | n/a | self._stream_writer) |
---|
239 | n/a | if coroutines.iscoroutine(res): |
---|
240 | n/a | self._loop.create_task(res) |
---|
241 | n/a | |
---|
242 | n/a | def connection_lost(self, exc): |
---|
243 | n/a | if self._stream_reader is not None: |
---|
244 | n/a | if exc is None: |
---|
245 | n/a | self._stream_reader.feed_eof() |
---|
246 | n/a | else: |
---|
247 | n/a | self._stream_reader.set_exception(exc) |
---|
248 | n/a | super().connection_lost(exc) |
---|
249 | n/a | self._stream_reader = None |
---|
250 | n/a | self._stream_writer = None |
---|
251 | n/a | |
---|
252 | n/a | def data_received(self, data): |
---|
253 | n/a | self._stream_reader.feed_data(data) |
---|
254 | n/a | |
---|
255 | n/a | def eof_received(self): |
---|
256 | n/a | self._stream_reader.feed_eof() |
---|
257 | n/a | if self._over_ssl: |
---|
258 | n/a | # Prevent a warning in SSLProtocol.eof_received: |
---|
259 | n/a | # "returning true from eof_received() |
---|
260 | n/a | # has no effect when using ssl" |
---|
261 | n/a | return False |
---|
262 | n/a | return True |
---|
263 | n/a | |
---|
264 | n/a | |
---|
265 | n/a | class StreamWriter: |
---|
266 | n/a | """Wraps a Transport. |
---|
267 | n/a | |
---|
268 | n/a | This exposes write(), writelines(), [can_]write_eof(), |
---|
269 | n/a | get_extra_info() and close(). It adds drain() which returns an |
---|
270 | n/a | optional Future on which you can wait for flow control. It also |
---|
271 | n/a | adds a transport property which references the Transport |
---|
272 | n/a | directly. |
---|
273 | n/a | """ |
---|
274 | n/a | |
---|
275 | n/a | def __init__(self, transport, protocol, reader, loop): |
---|
276 | n/a | self._transport = transport |
---|
277 | n/a | self._protocol = protocol |
---|
278 | n/a | # drain() expects that the reader has an exception() method |
---|
279 | n/a | assert reader is None or isinstance(reader, StreamReader) |
---|
280 | n/a | self._reader = reader |
---|
281 | n/a | self._loop = loop |
---|
282 | n/a | |
---|
283 | n/a | def __repr__(self): |
---|
284 | n/a | info = [self.__class__.__name__, 'transport=%r' % self._transport] |
---|
285 | n/a | if self._reader is not None: |
---|
286 | n/a | info.append('reader=%r' % self._reader) |
---|
287 | n/a | return '<%s>' % ' '.join(info) |
---|
288 | n/a | |
---|
289 | n/a | @property |
---|
290 | n/a | def transport(self): |
---|
291 | n/a | return self._transport |
---|
292 | n/a | |
---|
293 | n/a | def write(self, data): |
---|
294 | n/a | self._transport.write(data) |
---|
295 | n/a | |
---|
296 | n/a | def writelines(self, data): |
---|
297 | n/a | self._transport.writelines(data) |
---|
298 | n/a | |
---|
299 | n/a | def write_eof(self): |
---|
300 | n/a | return self._transport.write_eof() |
---|
301 | n/a | |
---|
302 | n/a | def can_write_eof(self): |
---|
303 | n/a | return self._transport.can_write_eof() |
---|
304 | n/a | |
---|
305 | n/a | def close(self): |
---|
306 | n/a | return self._transport.close() |
---|
307 | n/a | |
---|
308 | n/a | def get_extra_info(self, name, default=None): |
---|
309 | n/a | return self._transport.get_extra_info(name, default) |
---|
310 | n/a | |
---|
311 | n/a | @coroutine |
---|
312 | n/a | def drain(self): |
---|
313 | n/a | """Flush the write buffer. |
---|
314 | n/a | |
---|
315 | n/a | The intended use is to write |
---|
316 | n/a | |
---|
317 | n/a | w.write(data) |
---|
318 | n/a | yield from w.drain() |
---|
319 | n/a | """ |
---|
320 | n/a | if self._reader is not None: |
---|
321 | n/a | exc = self._reader.exception() |
---|
322 | n/a | if exc is not None: |
---|
323 | n/a | raise exc |
---|
324 | n/a | if self._transport is not None: |
---|
325 | n/a | if self._transport.is_closing(): |
---|
326 | n/a | # Yield to the event loop so connection_lost() may be |
---|
327 | n/a | # called. Without this, _drain_helper() would return |
---|
328 | n/a | # immediately, and code that calls |
---|
329 | n/a | # write(...); yield from drain() |
---|
330 | n/a | # in a loop would never call connection_lost(), so it |
---|
331 | n/a | # would not see an error when the socket is closed. |
---|
332 | n/a | yield |
---|
333 | n/a | yield from self._protocol._drain_helper() |
---|
334 | n/a | |
---|
335 | n/a | |
---|
336 | n/a | class StreamReader: |
---|
337 | n/a | |
---|
338 | n/a | def __init__(self, limit=_DEFAULT_LIMIT, loop=None): |
---|
339 | n/a | # The line length limit is a security feature; |
---|
340 | n/a | # it also doubles as half the buffer limit. |
---|
341 | n/a | |
---|
342 | n/a | if limit <= 0: |
---|
343 | n/a | raise ValueError('Limit cannot be <= 0') |
---|
344 | n/a | |
---|
345 | n/a | self._limit = limit |
---|
346 | n/a | if loop is None: |
---|
347 | n/a | self._loop = events.get_event_loop() |
---|
348 | n/a | else: |
---|
349 | n/a | self._loop = loop |
---|
350 | n/a | self._buffer = bytearray() |
---|
351 | n/a | self._eof = False # Whether we're done. |
---|
352 | n/a | self._waiter = None # A future used by _wait_for_data() |
---|
353 | n/a | self._exception = None |
---|
354 | n/a | self._transport = None |
---|
355 | n/a | self._paused = False |
---|
356 | n/a | |
---|
357 | n/a | def __repr__(self): |
---|
358 | n/a | info = ['StreamReader'] |
---|
359 | n/a | if self._buffer: |
---|
360 | n/a | info.append('%d bytes' % len(self._buffer)) |
---|
361 | n/a | if self._eof: |
---|
362 | n/a | info.append('eof') |
---|
363 | n/a | if self._limit != _DEFAULT_LIMIT: |
---|
364 | n/a | info.append('l=%d' % self._limit) |
---|
365 | n/a | if self._waiter: |
---|
366 | n/a | info.append('w=%r' % self._waiter) |
---|
367 | n/a | if self._exception: |
---|
368 | n/a | info.append('e=%r' % self._exception) |
---|
369 | n/a | if self._transport: |
---|
370 | n/a | info.append('t=%r' % self._transport) |
---|
371 | n/a | if self._paused: |
---|
372 | n/a | info.append('paused') |
---|
373 | n/a | return '<%s>' % ' '.join(info) |
---|
374 | n/a | |
---|
375 | n/a | def exception(self): |
---|
376 | n/a | return self._exception |
---|
377 | n/a | |
---|
378 | n/a | def set_exception(self, exc): |
---|
379 | n/a | self._exception = exc |
---|
380 | n/a | |
---|
381 | n/a | waiter = self._waiter |
---|
382 | n/a | if waiter is not None: |
---|
383 | n/a | self._waiter = None |
---|
384 | n/a | if not waiter.cancelled(): |
---|
385 | n/a | waiter.set_exception(exc) |
---|
386 | n/a | |
---|
387 | n/a | def _wakeup_waiter(self): |
---|
388 | n/a | """Wakeup read*() functions waiting for data or EOF.""" |
---|
389 | n/a | waiter = self._waiter |
---|
390 | n/a | if waiter is not None: |
---|
391 | n/a | self._waiter = None |
---|
392 | n/a | if not waiter.cancelled(): |
---|
393 | n/a | waiter.set_result(None) |
---|
394 | n/a | |
---|
395 | n/a | def set_transport(self, transport): |
---|
396 | n/a | assert self._transport is None, 'Transport already set' |
---|
397 | n/a | self._transport = transport |
---|
398 | n/a | |
---|
399 | n/a | def _maybe_resume_transport(self): |
---|
400 | n/a | if self._paused and len(self._buffer) <= self._limit: |
---|
401 | n/a | self._paused = False |
---|
402 | n/a | self._transport.resume_reading() |
---|
403 | n/a | |
---|
404 | n/a | def feed_eof(self): |
---|
405 | n/a | self._eof = True |
---|
406 | n/a | self._wakeup_waiter() |
---|
407 | n/a | |
---|
408 | n/a | def at_eof(self): |
---|
409 | n/a | """Return True if the buffer is empty and 'feed_eof' was called.""" |
---|
410 | n/a | return self._eof and not self._buffer |
---|
411 | n/a | |
---|
412 | n/a | def feed_data(self, data): |
---|
413 | n/a | assert not self._eof, 'feed_data after feed_eof' |
---|
414 | n/a | |
---|
415 | n/a | if not data: |
---|
416 | n/a | return |
---|
417 | n/a | |
---|
418 | n/a | self._buffer.extend(data) |
---|
419 | n/a | self._wakeup_waiter() |
---|
420 | n/a | |
---|
421 | n/a | if (self._transport is not None and |
---|
422 | n/a | not self._paused and |
---|
423 | n/a | len(self._buffer) > 2 * self._limit): |
---|
424 | n/a | try: |
---|
425 | n/a | self._transport.pause_reading() |
---|
426 | n/a | except NotImplementedError: |
---|
427 | n/a | # The transport can't be paused. |
---|
428 | n/a | # We'll just have to buffer all data. |
---|
429 | n/a | # Forget the transport so we don't keep trying. |
---|
430 | n/a | self._transport = None |
---|
431 | n/a | else: |
---|
432 | n/a | self._paused = True |
---|
433 | n/a | |
---|
434 | n/a | @coroutine |
---|
435 | n/a | def _wait_for_data(self, func_name): |
---|
436 | n/a | """Wait until feed_data() or feed_eof() is called. |
---|
437 | n/a | |
---|
438 | n/a | If stream was paused, automatically resume it. |
---|
439 | n/a | """ |
---|
440 | n/a | # StreamReader uses a future to link the protocol feed_data() method |
---|
441 | n/a | # to a read coroutine. Running two read coroutines at the same time |
---|
442 | n/a | # would have an unexpected behaviour. It would not possible to know |
---|
443 | n/a | # which coroutine would get the next data. |
---|
444 | n/a | if self._waiter is not None: |
---|
445 | n/a | raise RuntimeError('%s() called while another coroutine is ' |
---|
446 | n/a | 'already waiting for incoming data' % func_name) |
---|
447 | n/a | |
---|
448 | n/a | assert not self._eof, '_wait_for_data after EOF' |
---|
449 | n/a | |
---|
450 | n/a | # Waiting for data while paused will make deadlock, so prevent it. |
---|
451 | n/a | # This is essential for readexactly(n) for case when n > self._limit. |
---|
452 | n/a | if self._paused: |
---|
453 | n/a | self._paused = False |
---|
454 | n/a | self._transport.resume_reading() |
---|
455 | n/a | |
---|
456 | n/a | self._waiter = self._loop.create_future() |
---|
457 | n/a | try: |
---|
458 | n/a | yield from self._waiter |
---|
459 | n/a | finally: |
---|
460 | n/a | self._waiter = None |
---|
461 | n/a | |
---|
462 | n/a | @coroutine |
---|
463 | n/a | def readline(self): |
---|
464 | n/a | """Read chunk of data from the stream until newline (b'\n') is found. |
---|
465 | n/a | |
---|
466 | n/a | On success, return chunk that ends with newline. If only partial |
---|
467 | n/a | line can be read due to EOF, return incomplete line without |
---|
468 | n/a | terminating newline. When EOF was reached while no bytes read, empty |
---|
469 | n/a | bytes object is returned. |
---|
470 | n/a | |
---|
471 | n/a | If limit is reached, ValueError will be raised. In that case, if |
---|
472 | n/a | newline was found, complete line including newline will be removed |
---|
473 | n/a | from internal buffer. Else, internal buffer will be cleared. Limit is |
---|
474 | n/a | compared against part of the line without newline. |
---|
475 | n/a | |
---|
476 | n/a | If stream was paused, this function will automatically resume it if |
---|
477 | n/a | needed. |
---|
478 | n/a | """ |
---|
479 | n/a | sep = b'\n' |
---|
480 | n/a | seplen = len(sep) |
---|
481 | n/a | try: |
---|
482 | n/a | line = yield from self.readuntil(sep) |
---|
483 | n/a | except IncompleteReadError as e: |
---|
484 | n/a | return e.partial |
---|
485 | n/a | except LimitOverrunError as e: |
---|
486 | n/a | if self._buffer.startswith(sep, e.consumed): |
---|
487 | n/a | del self._buffer[:e.consumed + seplen] |
---|
488 | n/a | else: |
---|
489 | n/a | self._buffer.clear() |
---|
490 | n/a | self._maybe_resume_transport() |
---|
491 | n/a | raise ValueError(e.args[0]) |
---|
492 | n/a | return line |
---|
493 | n/a | |
---|
494 | n/a | @coroutine |
---|
495 | n/a | def readuntil(self, separator=b'\n'): |
---|
496 | n/a | """Read data from the stream until ``separator`` is found. |
---|
497 | n/a | |
---|
498 | n/a | On success, the data and separator will be removed from the |
---|
499 | n/a | internal buffer (consumed). Returned data will include the |
---|
500 | n/a | separator at the end. |
---|
501 | n/a | |
---|
502 | n/a | Configured stream limit is used to check result. Limit sets the |
---|
503 | n/a | maximal length of data that can be returned, not counting the |
---|
504 | n/a | separator. |
---|
505 | n/a | |
---|
506 | n/a | If an EOF occurs and the complete separator is still not found, |
---|
507 | n/a | an IncompleteReadError exception will be raised, and the internal |
---|
508 | n/a | buffer will be reset. The IncompleteReadError.partial attribute |
---|
509 | n/a | may contain the separator partially. |
---|
510 | n/a | |
---|
511 | n/a | If the data cannot be read because of over limit, a |
---|
512 | n/a | LimitOverrunError exception will be raised, and the data |
---|
513 | n/a | will be left in the internal buffer, so it can be read again. |
---|
514 | n/a | """ |
---|
515 | n/a | seplen = len(separator) |
---|
516 | n/a | if seplen == 0: |
---|
517 | n/a | raise ValueError('Separator should be at least one-byte string') |
---|
518 | n/a | |
---|
519 | n/a | if self._exception is not None: |
---|
520 | n/a | raise self._exception |
---|
521 | n/a | |
---|
522 | n/a | # Consume whole buffer except last bytes, which length is |
---|
523 | n/a | # one less than seplen. Let's check corner cases with |
---|
524 | n/a | # separator='SEPARATOR': |
---|
525 | n/a | # * we have received almost complete separator (without last |
---|
526 | n/a | # byte). i.e buffer='some textSEPARATO'. In this case we |
---|
527 | n/a | # can safely consume len(separator) - 1 bytes. |
---|
528 | n/a | # * last byte of buffer is first byte of separator, i.e. |
---|
529 | n/a | # buffer='abcdefghijklmnopqrS'. We may safely consume |
---|
530 | n/a | # everything except that last byte, but this require to |
---|
531 | n/a | # analyze bytes of buffer that match partial separator. |
---|
532 | n/a | # This is slow and/or require FSM. For this case our |
---|
533 | n/a | # implementation is not optimal, since require rescanning |
---|
534 | n/a | # of data that is known to not belong to separator. In |
---|
535 | n/a | # real world, separator will not be so long to notice |
---|
536 | n/a | # performance problems. Even when reading MIME-encoded |
---|
537 | n/a | # messages :) |
---|
538 | n/a | |
---|
539 | n/a | # `offset` is the number of bytes from the beginning of the buffer |
---|
540 | n/a | # where there is no occurrence of `separator`. |
---|
541 | n/a | offset = 0 |
---|
542 | n/a | |
---|
543 | n/a | # Loop until we find `separator` in the buffer, exceed the buffer size, |
---|
544 | n/a | # or an EOF has happened. |
---|
545 | n/a | while True: |
---|
546 | n/a | buflen = len(self._buffer) |
---|
547 | n/a | |
---|
548 | n/a | # Check if we now have enough data in the buffer for `separator` to |
---|
549 | n/a | # fit. |
---|
550 | n/a | if buflen - offset >= seplen: |
---|
551 | n/a | isep = self._buffer.find(separator, offset) |
---|
552 | n/a | |
---|
553 | n/a | if isep != -1: |
---|
554 | n/a | # `separator` is in the buffer. `isep` will be used later |
---|
555 | n/a | # to retrieve the data. |
---|
556 | n/a | break |
---|
557 | n/a | |
---|
558 | n/a | # see upper comment for explanation. |
---|
559 | n/a | offset = buflen + 1 - seplen |
---|
560 | n/a | if offset > self._limit: |
---|
561 | n/a | raise LimitOverrunError( |
---|
562 | n/a | 'Separator is not found, and chunk exceed the limit', |
---|
563 | n/a | offset) |
---|
564 | n/a | |
---|
565 | n/a | # Complete message (with full separator) may be present in buffer |
---|
566 | n/a | # even when EOF flag is set. This may happen when the last chunk |
---|
567 | n/a | # adds data which makes separator be found. That's why we check for |
---|
568 | n/a | # EOF *ater* inspecting the buffer. |
---|
569 | n/a | if self._eof: |
---|
570 | n/a | chunk = bytes(self._buffer) |
---|
571 | n/a | self._buffer.clear() |
---|
572 | n/a | raise IncompleteReadError(chunk, None) |
---|
573 | n/a | |
---|
574 | n/a | # _wait_for_data() will resume reading if stream was paused. |
---|
575 | n/a | yield from self._wait_for_data('readuntil') |
---|
576 | n/a | |
---|
577 | n/a | if isep > self._limit: |
---|
578 | n/a | raise LimitOverrunError( |
---|
579 | n/a | 'Separator is found, but chunk is longer than limit', isep) |
---|
580 | n/a | |
---|
581 | n/a | chunk = self._buffer[:isep + seplen] |
---|
582 | n/a | del self._buffer[:isep + seplen] |
---|
583 | n/a | self._maybe_resume_transport() |
---|
584 | n/a | return bytes(chunk) |
---|
585 | n/a | |
---|
586 | n/a | @coroutine |
---|
587 | n/a | def read(self, n=-1): |
---|
588 | n/a | """Read up to `n` bytes from the stream. |
---|
589 | n/a | |
---|
590 | n/a | If n is not provided, or set to -1, read until EOF and return all read |
---|
591 | n/a | bytes. If the EOF was received and the internal buffer is empty, return |
---|
592 | n/a | an empty bytes object. |
---|
593 | n/a | |
---|
594 | n/a | If n is zero, return empty bytes object immediately. |
---|
595 | n/a | |
---|
596 | n/a | If n is positive, this function try to read `n` bytes, and may return |
---|
597 | n/a | less or equal bytes than requested, but at least one byte. If EOF was |
---|
598 | n/a | received before any byte is read, this function returns empty byte |
---|
599 | n/a | object. |
---|
600 | n/a | |
---|
601 | n/a | Returned value is not limited with limit, configured at stream |
---|
602 | n/a | creation. |
---|
603 | n/a | |
---|
604 | n/a | If stream was paused, this function will automatically resume it if |
---|
605 | n/a | needed. |
---|
606 | n/a | """ |
---|
607 | n/a | |
---|
608 | n/a | if self._exception is not None: |
---|
609 | n/a | raise self._exception |
---|
610 | n/a | |
---|
611 | n/a | if n == 0: |
---|
612 | n/a | return b'' |
---|
613 | n/a | |
---|
614 | n/a | if n < 0: |
---|
615 | n/a | # This used to just loop creating a new waiter hoping to |
---|
616 | n/a | # collect everything in self._buffer, but that would |
---|
617 | n/a | # deadlock if the subprocess sends more than self.limit |
---|
618 | n/a | # bytes. So just call self.read(self._limit) until EOF. |
---|
619 | n/a | blocks = [] |
---|
620 | n/a | while True: |
---|
621 | n/a | block = yield from self.read(self._limit) |
---|
622 | n/a | if not block: |
---|
623 | n/a | break |
---|
624 | n/a | blocks.append(block) |
---|
625 | n/a | return b''.join(blocks) |
---|
626 | n/a | |
---|
627 | n/a | if not self._buffer and not self._eof: |
---|
628 | n/a | yield from self._wait_for_data('read') |
---|
629 | n/a | |
---|
630 | n/a | # This will work right even if buffer is less than n bytes |
---|
631 | n/a | data = bytes(self._buffer[:n]) |
---|
632 | n/a | del self._buffer[:n] |
---|
633 | n/a | |
---|
634 | n/a | self._maybe_resume_transport() |
---|
635 | n/a | return data |
---|
636 | n/a | |
---|
637 | n/a | @coroutine |
---|
638 | n/a | def readexactly(self, n): |
---|
639 | n/a | """Read exactly `n` bytes. |
---|
640 | n/a | |
---|
641 | n/a | Raise an IncompleteReadError if EOF is reached before `n` bytes can be |
---|
642 | n/a | read. The IncompleteReadError.partial attribute of the exception will |
---|
643 | n/a | contain the partial read bytes. |
---|
644 | n/a | |
---|
645 | n/a | if n is zero, return empty bytes object. |
---|
646 | n/a | |
---|
647 | n/a | Returned value is not limited with limit, configured at stream |
---|
648 | n/a | creation. |
---|
649 | n/a | |
---|
650 | n/a | If stream was paused, this function will automatically resume it if |
---|
651 | n/a | needed. |
---|
652 | n/a | """ |
---|
653 | n/a | if n < 0: |
---|
654 | n/a | raise ValueError('readexactly size can not be less than zero') |
---|
655 | n/a | |
---|
656 | n/a | if self._exception is not None: |
---|
657 | n/a | raise self._exception |
---|
658 | n/a | |
---|
659 | n/a | if n == 0: |
---|
660 | n/a | return b'' |
---|
661 | n/a | |
---|
662 | n/a | while len(self._buffer) < n: |
---|
663 | n/a | if self._eof: |
---|
664 | n/a | incomplete = bytes(self._buffer) |
---|
665 | n/a | self._buffer.clear() |
---|
666 | n/a | raise IncompleteReadError(incomplete, n) |
---|
667 | n/a | |
---|
668 | n/a | yield from self._wait_for_data('readexactly') |
---|
669 | n/a | |
---|
670 | n/a | if len(self._buffer) == n: |
---|
671 | n/a | data = bytes(self._buffer) |
---|
672 | n/a | self._buffer.clear() |
---|
673 | n/a | else: |
---|
674 | n/a | data = bytes(self._buffer[:n]) |
---|
675 | n/a | del self._buffer[:n] |
---|
676 | n/a | self._maybe_resume_transport() |
---|
677 | n/a | return data |
---|
678 | n/a | |
---|
679 | n/a | if compat.PY35: |
---|
680 | n/a | @coroutine |
---|
681 | n/a | def __aiter__(self): |
---|
682 | n/a | return self |
---|
683 | n/a | |
---|
684 | n/a | @coroutine |
---|
685 | n/a | def __anext__(self): |
---|
686 | n/a | val = yield from self.readline() |
---|
687 | n/a | if val == b'': |
---|
688 | n/a | raise StopAsyncIteration |
---|
689 | n/a | return val |
---|
690 | n/a | |
---|
691 | n/a | if compat.PY352: |
---|
692 | n/a | # In Python 3.5.2 and greater, __aiter__ should return |
---|
693 | n/a | # the asynchronous iterator directly. |
---|
694 | n/a | def __aiter__(self): |
---|
695 | n/a | return self |
---|