1 | n/a | # Copyright 2009 Brian Quinlan. All Rights Reserved. |
---|
2 | n/a | # Licensed to PSF under a Contributor Agreement. |
---|
3 | n/a | |
---|
4 | n/a | """Implements ProcessPoolExecutor. |
---|
5 | n/a | |
---|
6 | n/a | The follow diagram and text describe the data-flow through the system: |
---|
7 | n/a | |
---|
8 | n/a | |======================= In-process =====================|== Out-of-process ==| |
---|
9 | n/a | |
---|
10 | n/a | +----------+ +----------+ +--------+ +-----------+ +---------+ |
---|
11 | n/a | | | => | Work Ids | => | | => | Call Q | => | | |
---|
12 | n/a | | | +----------+ | | +-----------+ | | |
---|
13 | n/a | | | | ... | | | | ... | | | |
---|
14 | n/a | | | | 6 | | | | 5, call() | | | |
---|
15 | n/a | | | | 7 | | | | ... | | | |
---|
16 | n/a | | Process | | ... | | Local | +-----------+ | Process | |
---|
17 | n/a | | Pool | +----------+ | Worker | | #1..n | |
---|
18 | n/a | | Executor | | Thread | | | |
---|
19 | n/a | | | +----------- + | | +-----------+ | | |
---|
20 | n/a | | | <=> | Work Items | <=> | | <= | Result Q | <= | | |
---|
21 | n/a | | | +------------+ | | +-----------+ | | |
---|
22 | n/a | | | | 6: call() | | | | ... | | | |
---|
23 | n/a | | | | future | | | | 4, result | | | |
---|
24 | n/a | | | | ... | | | | 3, except | | | |
---|
25 | n/a | +----------+ +------------+ +--------+ +-----------+ +---------+ |
---|
26 | n/a | |
---|
27 | n/a | Executor.submit() called: |
---|
28 | n/a | - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict |
---|
29 | n/a | - adds the id of the _WorkItem to the "Work Ids" queue |
---|
30 | n/a | |
---|
31 | n/a | Local worker thread: |
---|
32 | n/a | - reads work ids from the "Work Ids" queue and looks up the corresponding |
---|
33 | n/a | WorkItem from the "Work Items" dict: if the work item has been cancelled then |
---|
34 | n/a | it is simply removed from the dict, otherwise it is repackaged as a |
---|
35 | n/a | _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" |
---|
36 | n/a | until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because |
---|
37 | n/a | calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). |
---|
38 | n/a | - reads _ResultItems from "Result Q", updates the future stored in the |
---|
39 | n/a | "Work Items" dict and deletes the dict entry |
---|
40 | n/a | |
---|
41 | n/a | Process #1..n: |
---|
42 | n/a | - reads _CallItems from "Call Q", executes the calls, and puts the resulting |
---|
43 | n/a | _ResultItems in "Result Q" |
---|
44 | n/a | """ |
---|
45 | n/a | |
---|
46 | n/a | __author__ = 'Brian Quinlan (brian@sweetapp.com)' |
---|
47 | n/a | |
---|
48 | n/a | import atexit |
---|
49 | n/a | import os |
---|
50 | n/a | from concurrent.futures import _base |
---|
51 | n/a | import queue |
---|
52 | n/a | from queue import Full |
---|
53 | n/a | import multiprocessing |
---|
54 | n/a | from multiprocessing import SimpleQueue |
---|
55 | n/a | from multiprocessing.connection import wait |
---|
56 | n/a | import threading |
---|
57 | n/a | import weakref |
---|
58 | n/a | from functools import partial |
---|
59 | n/a | import itertools |
---|
60 | n/a | import traceback |
---|
61 | n/a | |
---|
62 | n/a | # Workers are created as daemon threads and processes. This is done to allow the |
---|
63 | n/a | # interpreter to exit when there are still idle processes in a |
---|
64 | n/a | # ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, |
---|
65 | n/a | # allowing workers to die with the interpreter has two undesirable properties: |
---|
66 | n/a | # - The workers would still be running during interpreter shutdown, |
---|
67 | n/a | # meaning that they would fail in unpredictable ways. |
---|
68 | n/a | # - The workers could be killed while evaluating a work item, which could |
---|
69 | n/a | # be bad if the callable being evaluated has external side-effects e.g. |
---|
70 | n/a | # writing to a file. |
---|
71 | n/a | # |
---|
72 | n/a | # To work around this problem, an exit handler is installed which tells the |
---|
73 | n/a | # workers to exit when their work queues are empty and then waits until the |
---|
74 | n/a | # threads/processes finish. |
---|
75 | n/a | |
---|
76 | n/a | _threads_queues = weakref.WeakKeyDictionary() |
---|
77 | n/a | _shutdown = False |
---|
78 | n/a | |
---|
79 | n/a | def _python_exit(): |
---|
80 | n/a | global _shutdown |
---|
81 | n/a | _shutdown = True |
---|
82 | n/a | items = list(_threads_queues.items()) |
---|
83 | n/a | for t, q in items: |
---|
84 | n/a | q.put(None) |
---|
85 | n/a | for t, q in items: |
---|
86 | n/a | t.join() |
---|
87 | n/a | |
---|
88 | n/a | # Controls how many more calls than processes will be queued in the call queue. |
---|
89 | n/a | # A smaller number will mean that processes spend more time idle waiting for |
---|
90 | n/a | # work while a larger number will make Future.cancel() succeed less frequently |
---|
91 | n/a | # (Futures in the call queue cannot be cancelled). |
---|
92 | n/a | EXTRA_QUEUED_CALLS = 1 |
---|
93 | n/a | |
---|
94 | n/a | # Hack to embed stringification of remote traceback in local traceback |
---|
95 | n/a | |
---|
96 | n/a | class _RemoteTraceback(Exception): |
---|
97 | n/a | def __init__(self, tb): |
---|
98 | n/a | self.tb = tb |
---|
99 | n/a | def __str__(self): |
---|
100 | n/a | return self.tb |
---|
101 | n/a | |
---|
102 | n/a | class _ExceptionWithTraceback: |
---|
103 | n/a | def __init__(self, exc, tb): |
---|
104 | n/a | tb = traceback.format_exception(type(exc), exc, tb) |
---|
105 | n/a | tb = ''.join(tb) |
---|
106 | n/a | self.exc = exc |
---|
107 | n/a | self.tb = '\n"""\n%s"""' % tb |
---|
108 | n/a | def __reduce__(self): |
---|
109 | n/a | return _rebuild_exc, (self.exc, self.tb) |
---|
110 | n/a | |
---|
111 | n/a | def _rebuild_exc(exc, tb): |
---|
112 | n/a | exc.__cause__ = _RemoteTraceback(tb) |
---|
113 | n/a | return exc |
---|
114 | n/a | |
---|
115 | n/a | class _WorkItem(object): |
---|
116 | n/a | def __init__(self, future, fn, args, kwargs): |
---|
117 | n/a | self.future = future |
---|
118 | n/a | self.fn = fn |
---|
119 | n/a | self.args = args |
---|
120 | n/a | self.kwargs = kwargs |
---|
121 | n/a | |
---|
122 | n/a | class _ResultItem(object): |
---|
123 | n/a | def __init__(self, work_id, exception=None, result=None): |
---|
124 | n/a | self.work_id = work_id |
---|
125 | n/a | self.exception = exception |
---|
126 | n/a | self.result = result |
---|
127 | n/a | |
---|
128 | n/a | class _CallItem(object): |
---|
129 | n/a | def __init__(self, work_id, fn, args, kwargs): |
---|
130 | n/a | self.work_id = work_id |
---|
131 | n/a | self.fn = fn |
---|
132 | n/a | self.args = args |
---|
133 | n/a | self.kwargs = kwargs |
---|
134 | n/a | |
---|
135 | n/a | def _get_chunks(*iterables, chunksize): |
---|
136 | n/a | """ Iterates over zip()ed iterables in chunks. """ |
---|
137 | n/a | it = zip(*iterables) |
---|
138 | n/a | while True: |
---|
139 | n/a | chunk = tuple(itertools.islice(it, chunksize)) |
---|
140 | n/a | if not chunk: |
---|
141 | n/a | return |
---|
142 | n/a | yield chunk |
---|
143 | n/a | |
---|
144 | n/a | def _process_chunk(fn, chunk): |
---|
145 | n/a | """ Processes a chunk of an iterable passed to map. |
---|
146 | n/a | |
---|
147 | n/a | Runs the function passed to map() on a chunk of the |
---|
148 | n/a | iterable passed to map. |
---|
149 | n/a | |
---|
150 | n/a | This function is run in a separate process. |
---|
151 | n/a | |
---|
152 | n/a | """ |
---|
153 | n/a | return [fn(*args) for args in chunk] |
---|
154 | n/a | |
---|
155 | n/a | def _process_worker(call_queue, result_queue): |
---|
156 | n/a | """Evaluates calls from call_queue and places the results in result_queue. |
---|
157 | n/a | |
---|
158 | n/a | This worker is run in a separate process. |
---|
159 | n/a | |
---|
160 | n/a | Args: |
---|
161 | n/a | call_queue: A multiprocessing.Queue of _CallItems that will be read and |
---|
162 | n/a | evaluated by the worker. |
---|
163 | n/a | result_queue: A multiprocessing.Queue of _ResultItems that will written |
---|
164 | n/a | to by the worker. |
---|
165 | n/a | shutdown: A multiprocessing.Event that will be set as a signal to the |
---|
166 | n/a | worker that it should exit when call_queue is empty. |
---|
167 | n/a | """ |
---|
168 | n/a | while True: |
---|
169 | n/a | call_item = call_queue.get(block=True) |
---|
170 | n/a | if call_item is None: |
---|
171 | n/a | # Wake up queue management thread |
---|
172 | n/a | result_queue.put(os.getpid()) |
---|
173 | n/a | return |
---|
174 | n/a | try: |
---|
175 | n/a | r = call_item.fn(*call_item.args, **call_item.kwargs) |
---|
176 | n/a | except BaseException as e: |
---|
177 | n/a | exc = _ExceptionWithTraceback(e, e.__traceback__) |
---|
178 | n/a | result_queue.put(_ResultItem(call_item.work_id, exception=exc)) |
---|
179 | n/a | else: |
---|
180 | n/a | result_queue.put(_ResultItem(call_item.work_id, |
---|
181 | n/a | result=r)) |
---|
182 | n/a | |
---|
183 | n/a | def _add_call_item_to_queue(pending_work_items, |
---|
184 | n/a | work_ids, |
---|
185 | n/a | call_queue): |
---|
186 | n/a | """Fills call_queue with _WorkItems from pending_work_items. |
---|
187 | n/a | |
---|
188 | n/a | This function never blocks. |
---|
189 | n/a | |
---|
190 | n/a | Args: |
---|
191 | n/a | pending_work_items: A dict mapping work ids to _WorkItems e.g. |
---|
192 | n/a | {5: <_WorkItem...>, 6: <_WorkItem...>, ...} |
---|
193 | n/a | work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids |
---|
194 | n/a | are consumed and the corresponding _WorkItems from |
---|
195 | n/a | pending_work_items are transformed into _CallItems and put in |
---|
196 | n/a | call_queue. |
---|
197 | n/a | call_queue: A multiprocessing.Queue that will be filled with _CallItems |
---|
198 | n/a | derived from _WorkItems. |
---|
199 | n/a | """ |
---|
200 | n/a | while True: |
---|
201 | n/a | if call_queue.full(): |
---|
202 | n/a | return |
---|
203 | n/a | try: |
---|
204 | n/a | work_id = work_ids.get(block=False) |
---|
205 | n/a | except queue.Empty: |
---|
206 | n/a | return |
---|
207 | n/a | else: |
---|
208 | n/a | work_item = pending_work_items[work_id] |
---|
209 | n/a | |
---|
210 | n/a | if work_item.future.set_running_or_notify_cancel(): |
---|
211 | n/a | call_queue.put(_CallItem(work_id, |
---|
212 | n/a | work_item.fn, |
---|
213 | n/a | work_item.args, |
---|
214 | n/a | work_item.kwargs), |
---|
215 | n/a | block=True) |
---|
216 | n/a | else: |
---|
217 | n/a | del pending_work_items[work_id] |
---|
218 | n/a | continue |
---|
219 | n/a | |
---|
220 | n/a | def _queue_management_worker(executor_reference, |
---|
221 | n/a | processes, |
---|
222 | n/a | pending_work_items, |
---|
223 | n/a | work_ids_queue, |
---|
224 | n/a | call_queue, |
---|
225 | n/a | result_queue): |
---|
226 | n/a | """Manages the communication between this process and the worker processes. |
---|
227 | n/a | |
---|
228 | n/a | This function is run in a local thread. |
---|
229 | n/a | |
---|
230 | n/a | Args: |
---|
231 | n/a | executor_reference: A weakref.ref to the ProcessPoolExecutor that owns |
---|
232 | n/a | this thread. Used to determine if the ProcessPoolExecutor has been |
---|
233 | n/a | garbage collected and that this function can exit. |
---|
234 | n/a | process: A list of the multiprocessing.Process instances used as |
---|
235 | n/a | workers. |
---|
236 | n/a | pending_work_items: A dict mapping work ids to _WorkItems e.g. |
---|
237 | n/a | {5: <_WorkItem...>, 6: <_WorkItem...>, ...} |
---|
238 | n/a | work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). |
---|
239 | n/a | call_queue: A multiprocessing.Queue that will be filled with _CallItems |
---|
240 | n/a | derived from _WorkItems for processing by the process workers. |
---|
241 | n/a | result_queue: A multiprocessing.Queue of _ResultItems generated by the |
---|
242 | n/a | process workers. |
---|
243 | n/a | """ |
---|
244 | n/a | executor = None |
---|
245 | n/a | |
---|
246 | n/a | def shutting_down(): |
---|
247 | n/a | return _shutdown or executor is None or executor._shutdown_thread |
---|
248 | n/a | |
---|
249 | n/a | def shutdown_worker(): |
---|
250 | n/a | # This is an upper bound |
---|
251 | n/a | nb_children_alive = sum(p.is_alive() for p in processes.values()) |
---|
252 | n/a | for i in range(0, nb_children_alive): |
---|
253 | n/a | call_queue.put_nowait(None) |
---|
254 | n/a | # Release the queue's resources as soon as possible. |
---|
255 | n/a | call_queue.close() |
---|
256 | n/a | # If .join() is not called on the created processes then |
---|
257 | n/a | # some multiprocessing.Queue methods may deadlock on Mac OS X. |
---|
258 | n/a | for p in processes.values(): |
---|
259 | n/a | p.join() |
---|
260 | n/a | |
---|
261 | n/a | reader = result_queue._reader |
---|
262 | n/a | |
---|
263 | n/a | while True: |
---|
264 | n/a | _add_call_item_to_queue(pending_work_items, |
---|
265 | n/a | work_ids_queue, |
---|
266 | n/a | call_queue) |
---|
267 | n/a | |
---|
268 | n/a | sentinels = [p.sentinel for p in processes.values()] |
---|
269 | n/a | assert sentinels |
---|
270 | n/a | ready = wait([reader] + sentinels) |
---|
271 | n/a | if reader in ready: |
---|
272 | n/a | result_item = reader.recv() |
---|
273 | n/a | else: |
---|
274 | n/a | # Mark the process pool broken so that submits fail right now. |
---|
275 | n/a | executor = executor_reference() |
---|
276 | n/a | if executor is not None: |
---|
277 | n/a | executor._broken = True |
---|
278 | n/a | executor._shutdown_thread = True |
---|
279 | n/a | executor = None |
---|
280 | n/a | # All futures in flight must be marked failed |
---|
281 | n/a | for work_id, work_item in pending_work_items.items(): |
---|
282 | n/a | work_item.future.set_exception( |
---|
283 | n/a | BrokenProcessPool( |
---|
284 | n/a | "A process in the process pool was " |
---|
285 | n/a | "terminated abruptly while the future was " |
---|
286 | n/a | "running or pending." |
---|
287 | n/a | )) |
---|
288 | n/a | # Delete references to object. See issue16284 |
---|
289 | n/a | del work_item |
---|
290 | n/a | pending_work_items.clear() |
---|
291 | n/a | # Terminate remaining workers forcibly: the queues or their |
---|
292 | n/a | # locks may be in a dirty state and block forever. |
---|
293 | n/a | for p in processes.values(): |
---|
294 | n/a | p.terminate() |
---|
295 | n/a | shutdown_worker() |
---|
296 | n/a | return |
---|
297 | n/a | if isinstance(result_item, int): |
---|
298 | n/a | # Clean shutdown of a worker using its PID |
---|
299 | n/a | # (avoids marking the executor broken) |
---|
300 | n/a | assert shutting_down() |
---|
301 | n/a | p = processes.pop(result_item) |
---|
302 | n/a | p.join() |
---|
303 | n/a | if not processes: |
---|
304 | n/a | shutdown_worker() |
---|
305 | n/a | return |
---|
306 | n/a | elif result_item is not None: |
---|
307 | n/a | work_item = pending_work_items.pop(result_item.work_id, None) |
---|
308 | n/a | # work_item can be None if another process terminated (see above) |
---|
309 | n/a | if work_item is not None: |
---|
310 | n/a | if result_item.exception: |
---|
311 | n/a | work_item.future.set_exception(result_item.exception) |
---|
312 | n/a | else: |
---|
313 | n/a | work_item.future.set_result(result_item.result) |
---|
314 | n/a | # Delete references to object. See issue16284 |
---|
315 | n/a | del work_item |
---|
316 | n/a | # Check whether we should start shutting down. |
---|
317 | n/a | executor = executor_reference() |
---|
318 | n/a | # No more work items can be added if: |
---|
319 | n/a | # - The interpreter is shutting down OR |
---|
320 | n/a | # - The executor that owns this worker has been collected OR |
---|
321 | n/a | # - The executor that owns this worker has been shutdown. |
---|
322 | n/a | if shutting_down(): |
---|
323 | n/a | try: |
---|
324 | n/a | # Since no new work items can be added, it is safe to shutdown |
---|
325 | n/a | # this thread if there are no pending work items. |
---|
326 | n/a | if not pending_work_items: |
---|
327 | n/a | shutdown_worker() |
---|
328 | n/a | return |
---|
329 | n/a | except Full: |
---|
330 | n/a | # This is not a problem: we will eventually be woken up (in |
---|
331 | n/a | # result_queue.get()) and be able to send a sentinel again. |
---|
332 | n/a | pass |
---|
333 | n/a | executor = None |
---|
334 | n/a | |
---|
335 | n/a | _system_limits_checked = False |
---|
336 | n/a | _system_limited = None |
---|
337 | n/a | def _check_system_limits(): |
---|
338 | n/a | global _system_limits_checked, _system_limited |
---|
339 | n/a | if _system_limits_checked: |
---|
340 | n/a | if _system_limited: |
---|
341 | n/a | raise NotImplementedError(_system_limited) |
---|
342 | n/a | _system_limits_checked = True |
---|
343 | n/a | try: |
---|
344 | n/a | nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") |
---|
345 | n/a | except (AttributeError, ValueError): |
---|
346 | n/a | # sysconf not available or setting not available |
---|
347 | n/a | return |
---|
348 | n/a | if nsems_max == -1: |
---|
349 | n/a | # indetermined limit, assume that limit is determined |
---|
350 | n/a | # by available memory only |
---|
351 | n/a | return |
---|
352 | n/a | if nsems_max >= 256: |
---|
353 | n/a | # minimum number of semaphores available |
---|
354 | n/a | # according to POSIX |
---|
355 | n/a | return |
---|
356 | n/a | _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max |
---|
357 | n/a | raise NotImplementedError(_system_limited) |
---|
358 | n/a | |
---|
359 | n/a | |
---|
360 | n/a | class BrokenProcessPool(RuntimeError): |
---|
361 | n/a | """ |
---|
362 | n/a | Raised when a process in a ProcessPoolExecutor terminated abruptly |
---|
363 | n/a | while a future was in the running state. |
---|
364 | n/a | """ |
---|
365 | n/a | |
---|
366 | n/a | |
---|
367 | n/a | class ProcessPoolExecutor(_base.Executor): |
---|
368 | n/a | def __init__(self, max_workers=None): |
---|
369 | n/a | """Initializes a new ProcessPoolExecutor instance. |
---|
370 | n/a | |
---|
371 | n/a | Args: |
---|
372 | n/a | max_workers: The maximum number of processes that can be used to |
---|
373 | n/a | execute the given calls. If None or not given then as many |
---|
374 | n/a | worker processes will be created as the machine has processors. |
---|
375 | n/a | """ |
---|
376 | n/a | _check_system_limits() |
---|
377 | n/a | |
---|
378 | n/a | if max_workers is None: |
---|
379 | n/a | self._max_workers = os.cpu_count() or 1 |
---|
380 | n/a | else: |
---|
381 | n/a | if max_workers <= 0: |
---|
382 | n/a | raise ValueError("max_workers must be greater than 0") |
---|
383 | n/a | |
---|
384 | n/a | self._max_workers = max_workers |
---|
385 | n/a | |
---|
386 | n/a | # Make the call queue slightly larger than the number of processes to |
---|
387 | n/a | # prevent the worker processes from idling. But don't make it too big |
---|
388 | n/a | # because futures in the call queue cannot be cancelled. |
---|
389 | n/a | self._call_queue = multiprocessing.Queue(self._max_workers + |
---|
390 | n/a | EXTRA_QUEUED_CALLS) |
---|
391 | n/a | # Killed worker processes can produce spurious "broken pipe" |
---|
392 | n/a | # tracebacks in the queue's own worker thread. But we detect killed |
---|
393 | n/a | # processes anyway, so silence the tracebacks. |
---|
394 | n/a | self._call_queue._ignore_epipe = True |
---|
395 | n/a | self._result_queue = SimpleQueue() |
---|
396 | n/a | self._work_ids = queue.Queue() |
---|
397 | n/a | self._queue_management_thread = None |
---|
398 | n/a | # Map of pids to processes |
---|
399 | n/a | self._processes = {} |
---|
400 | n/a | |
---|
401 | n/a | # Shutdown is a two-step process. |
---|
402 | n/a | self._shutdown_thread = False |
---|
403 | n/a | self._shutdown_lock = threading.Lock() |
---|
404 | n/a | self._broken = False |
---|
405 | n/a | self._queue_count = 0 |
---|
406 | n/a | self._pending_work_items = {} |
---|
407 | n/a | |
---|
408 | n/a | def _start_queue_management_thread(self): |
---|
409 | n/a | # When the executor gets lost, the weakref callback will wake up |
---|
410 | n/a | # the queue management thread. |
---|
411 | n/a | def weakref_cb(_, q=self._result_queue): |
---|
412 | n/a | q.put(None) |
---|
413 | n/a | if self._queue_management_thread is None: |
---|
414 | n/a | # Start the processes so that their sentinels are known. |
---|
415 | n/a | self._adjust_process_count() |
---|
416 | n/a | self._queue_management_thread = threading.Thread( |
---|
417 | n/a | target=_queue_management_worker, |
---|
418 | n/a | args=(weakref.ref(self, weakref_cb), |
---|
419 | n/a | self._processes, |
---|
420 | n/a | self._pending_work_items, |
---|
421 | n/a | self._work_ids, |
---|
422 | n/a | self._call_queue, |
---|
423 | n/a | self._result_queue)) |
---|
424 | n/a | self._queue_management_thread.daemon = True |
---|
425 | n/a | self._queue_management_thread.start() |
---|
426 | n/a | _threads_queues[self._queue_management_thread] = self._result_queue |
---|
427 | n/a | |
---|
428 | n/a | def _adjust_process_count(self): |
---|
429 | n/a | for _ in range(len(self._processes), self._max_workers): |
---|
430 | n/a | p = multiprocessing.Process( |
---|
431 | n/a | target=_process_worker, |
---|
432 | n/a | args=(self._call_queue, |
---|
433 | n/a | self._result_queue)) |
---|
434 | n/a | p.start() |
---|
435 | n/a | self._processes[p.pid] = p |
---|
436 | n/a | |
---|
437 | n/a | def submit(self, fn, *args, **kwargs): |
---|
438 | n/a | with self._shutdown_lock: |
---|
439 | n/a | if self._broken: |
---|
440 | n/a | raise BrokenProcessPool('A child process terminated ' |
---|
441 | n/a | 'abruptly, the process pool is not usable anymore') |
---|
442 | n/a | if self._shutdown_thread: |
---|
443 | n/a | raise RuntimeError('cannot schedule new futures after shutdown') |
---|
444 | n/a | |
---|
445 | n/a | f = _base.Future() |
---|
446 | n/a | w = _WorkItem(f, fn, args, kwargs) |
---|
447 | n/a | |
---|
448 | n/a | self._pending_work_items[self._queue_count] = w |
---|
449 | n/a | self._work_ids.put(self._queue_count) |
---|
450 | n/a | self._queue_count += 1 |
---|
451 | n/a | # Wake up queue management thread |
---|
452 | n/a | self._result_queue.put(None) |
---|
453 | n/a | |
---|
454 | n/a | self._start_queue_management_thread() |
---|
455 | n/a | return f |
---|
456 | n/a | submit.__doc__ = _base.Executor.submit.__doc__ |
---|
457 | n/a | |
---|
458 | n/a | def map(self, fn, *iterables, timeout=None, chunksize=1): |
---|
459 | n/a | """Returns an iterator equivalent to map(fn, iter). |
---|
460 | n/a | |
---|
461 | n/a | Args: |
---|
462 | n/a | fn: A callable that will take as many arguments as there are |
---|
463 | n/a | passed iterables. |
---|
464 | n/a | timeout: The maximum number of seconds to wait. If None, then there |
---|
465 | n/a | is no limit on the wait time. |
---|
466 | n/a | chunksize: If greater than one, the iterables will be chopped into |
---|
467 | n/a | chunks of size chunksize and submitted to the process pool. |
---|
468 | n/a | If set to one, the items in the list will be sent one at a time. |
---|
469 | n/a | |
---|
470 | n/a | Returns: |
---|
471 | n/a | An iterator equivalent to: map(func, *iterables) but the calls may |
---|
472 | n/a | be evaluated out-of-order. |
---|
473 | n/a | |
---|
474 | n/a | Raises: |
---|
475 | n/a | TimeoutError: If the entire result iterator could not be generated |
---|
476 | n/a | before the given timeout. |
---|
477 | n/a | Exception: If fn(*args) raises for any values. |
---|
478 | n/a | """ |
---|
479 | n/a | if chunksize < 1: |
---|
480 | n/a | raise ValueError("chunksize must be >= 1.") |
---|
481 | n/a | |
---|
482 | n/a | results = super().map(partial(_process_chunk, fn), |
---|
483 | n/a | _get_chunks(*iterables, chunksize=chunksize), |
---|
484 | n/a | timeout=timeout) |
---|
485 | n/a | return itertools.chain.from_iterable(results) |
---|
486 | n/a | |
---|
487 | n/a | def shutdown(self, wait=True): |
---|
488 | n/a | with self._shutdown_lock: |
---|
489 | n/a | self._shutdown_thread = True |
---|
490 | n/a | if self._queue_management_thread: |
---|
491 | n/a | # Wake up queue management thread |
---|
492 | n/a | self._result_queue.put(None) |
---|
493 | n/a | if wait: |
---|
494 | n/a | self._queue_management_thread.join() |
---|
495 | n/a | # To reduce the risk of opening too many files, remove references to |
---|
496 | n/a | # objects that use file descriptors. |
---|
497 | n/a | self._queue_management_thread = None |
---|
498 | n/a | self._call_queue = None |
---|
499 | n/a | self._result_queue = None |
---|
500 | n/a | self._processes = None |
---|
501 | n/a | shutdown.__doc__ = _base.Executor.shutdown.__doc__ |
---|
502 | n/a | |
---|
503 | n/a | atexit.register(_python_exit) |
---|