| 1 | n/a | # |
|---|
| 2 | n/a | # Module providing the `Pool` class for managing a process pool |
|---|
| 3 | n/a | # |
|---|
| 4 | n/a | # multiprocessing/pool.py |
|---|
| 5 | n/a | # |
|---|
| 6 | n/a | # Copyright (c) 2006-2008, R Oudkerk |
|---|
| 7 | n/a | # Licensed to PSF under a Contributor Agreement. |
|---|
| 8 | n/a | # |
|---|
| 9 | n/a | |
|---|
| 10 | n/a | __all__ = ['Pool', 'ThreadPool'] |
|---|
| 11 | n/a | |
|---|
| 12 | n/a | # |
|---|
| 13 | n/a | # Imports |
|---|
| 14 | n/a | # |
|---|
| 15 | n/a | |
|---|
| 16 | n/a | import threading |
|---|
| 17 | n/a | import queue |
|---|
| 18 | n/a | import itertools |
|---|
| 19 | n/a | import collections |
|---|
| 20 | n/a | import os |
|---|
| 21 | n/a | import time |
|---|
| 22 | n/a | import traceback |
|---|
| 23 | n/a | |
|---|
| 24 | n/a | # If threading is available then ThreadPool should be provided. Therefore |
|---|
| 25 | n/a | # we avoid top-level imports which are liable to fail on some systems. |
|---|
| 26 | n/a | from . import util |
|---|
| 27 | n/a | from . import get_context, TimeoutError |
|---|
| 28 | n/a | |
|---|
| 29 | n/a | # |
|---|
| 30 | n/a | # Constants representing the state of a pool |
|---|
| 31 | n/a | # |
|---|
| 32 | n/a | |
|---|
| 33 | n/a | RUN = 0 |
|---|
| 34 | n/a | CLOSE = 1 |
|---|
| 35 | n/a | TERMINATE = 2 |
|---|
| 36 | n/a | |
|---|
| 37 | n/a | # |
|---|
| 38 | n/a | # Miscellaneous |
|---|
| 39 | n/a | # |
|---|
| 40 | n/a | |
|---|
| 41 | n/a | job_counter = itertools.count() |
|---|
| 42 | n/a | |
|---|
| 43 | n/a | def mapstar(args): |
|---|
| 44 | n/a | return list(map(*args)) |
|---|
| 45 | n/a | |
|---|
| 46 | n/a | def starmapstar(args): |
|---|
| 47 | n/a | return list(itertools.starmap(args[0], args[1])) |
|---|
| 48 | n/a | |
|---|
| 49 | n/a | # |
|---|
| 50 | n/a | # Hack to embed stringification of remote traceback in local traceback |
|---|
| 51 | n/a | # |
|---|
| 52 | n/a | |
|---|
| 53 | n/a | class RemoteTraceback(Exception): |
|---|
| 54 | n/a | def __init__(self, tb): |
|---|
| 55 | n/a | self.tb = tb |
|---|
| 56 | n/a | def __str__(self): |
|---|
| 57 | n/a | return self.tb |
|---|
| 58 | n/a | |
|---|
| 59 | n/a | class ExceptionWithTraceback: |
|---|
| 60 | n/a | def __init__(self, exc, tb): |
|---|
| 61 | n/a | tb = traceback.format_exception(type(exc), exc, tb) |
|---|
| 62 | n/a | tb = ''.join(tb) |
|---|
| 63 | n/a | self.exc = exc |
|---|
| 64 | n/a | self.tb = '\n"""\n%s"""' % tb |
|---|
| 65 | n/a | def __reduce__(self): |
|---|
| 66 | n/a | return rebuild_exc, (self.exc, self.tb) |
|---|
| 67 | n/a | |
|---|
| 68 | n/a | def rebuild_exc(exc, tb): |
|---|
| 69 | n/a | exc.__cause__ = RemoteTraceback(tb) |
|---|
| 70 | n/a | return exc |
|---|
| 71 | n/a | |
|---|
| 72 | n/a | # |
|---|
| 73 | n/a | # Code run by worker processes |
|---|
| 74 | n/a | # |
|---|
| 75 | n/a | |
|---|
| 76 | n/a | class MaybeEncodingError(Exception): |
|---|
| 77 | n/a | """Wraps possible unpickleable errors, so they can be |
|---|
| 78 | n/a | safely sent through the socket.""" |
|---|
| 79 | n/a | |
|---|
| 80 | n/a | def __init__(self, exc, value): |
|---|
| 81 | n/a | self.exc = repr(exc) |
|---|
| 82 | n/a | self.value = repr(value) |
|---|
| 83 | n/a | super(MaybeEncodingError, self).__init__(self.exc, self.value) |
|---|
| 84 | n/a | |
|---|
| 85 | n/a | def __str__(self): |
|---|
| 86 | n/a | return "Error sending result: '%s'. Reason: '%s'" % (self.value, |
|---|
| 87 | n/a | self.exc) |
|---|
| 88 | n/a | |
|---|
| 89 | n/a | def __repr__(self): |
|---|
| 90 | n/a | return "<%s: %s>" % (self.__class__.__name__, self) |
|---|
| 91 | n/a | |
|---|
| 92 | n/a | |
|---|
| 93 | n/a | def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, |
|---|
| 94 | n/a | wrap_exception=False): |
|---|
| 95 | n/a | assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) |
|---|
| 96 | n/a | put = outqueue.put |
|---|
| 97 | n/a | get = inqueue.get |
|---|
| 98 | n/a | if hasattr(inqueue, '_writer'): |
|---|
| 99 | n/a | inqueue._writer.close() |
|---|
| 100 | n/a | outqueue._reader.close() |
|---|
| 101 | n/a | |
|---|
| 102 | n/a | if initializer is not None: |
|---|
| 103 | n/a | initializer(*initargs) |
|---|
| 104 | n/a | |
|---|
| 105 | n/a | completed = 0 |
|---|
| 106 | n/a | while maxtasks is None or (maxtasks and completed < maxtasks): |
|---|
| 107 | n/a | try: |
|---|
| 108 | n/a | task = get() |
|---|
| 109 | n/a | except (EOFError, OSError): |
|---|
| 110 | n/a | util.debug('worker got EOFError or OSError -- exiting') |
|---|
| 111 | n/a | break |
|---|
| 112 | n/a | |
|---|
| 113 | n/a | if task is None: |
|---|
| 114 | n/a | util.debug('worker got sentinel -- exiting') |
|---|
| 115 | n/a | break |
|---|
| 116 | n/a | |
|---|
| 117 | n/a | job, i, func, args, kwds = task |
|---|
| 118 | n/a | try: |
|---|
| 119 | n/a | result = (True, func(*args, **kwds)) |
|---|
| 120 | n/a | except Exception as e: |
|---|
| 121 | n/a | if wrap_exception: |
|---|
| 122 | n/a | e = ExceptionWithTraceback(e, e.__traceback__) |
|---|
| 123 | n/a | result = (False, e) |
|---|
| 124 | n/a | try: |
|---|
| 125 | n/a | put((job, i, result)) |
|---|
| 126 | n/a | except Exception as e: |
|---|
| 127 | n/a | wrapped = MaybeEncodingError(e, result[1]) |
|---|
| 128 | n/a | util.debug("Possible encoding error while sending result: %s" % ( |
|---|
| 129 | n/a | wrapped)) |
|---|
| 130 | n/a | put((job, i, (False, wrapped))) |
|---|
| 131 | n/a | completed += 1 |
|---|
| 132 | n/a | util.debug('worker exiting after %d tasks' % completed) |
|---|
| 133 | n/a | |
|---|
| 134 | n/a | # |
|---|
| 135 | n/a | # Class representing a process pool |
|---|
| 136 | n/a | # |
|---|
| 137 | n/a | |
|---|
| 138 | n/a | class Pool(object): |
|---|
| 139 | n/a | ''' |
|---|
| 140 | n/a | Class which supports an async version of applying functions to arguments. |
|---|
| 141 | n/a | ''' |
|---|
| 142 | n/a | _wrap_exception = True |
|---|
| 143 | n/a | |
|---|
| 144 | n/a | def Process(self, *args, **kwds): |
|---|
| 145 | n/a | return self._ctx.Process(*args, **kwds) |
|---|
| 146 | n/a | |
|---|
| 147 | n/a | def __init__(self, processes=None, initializer=None, initargs=(), |
|---|
| 148 | n/a | maxtasksperchild=None, context=None): |
|---|
| 149 | n/a | self._ctx = context or get_context() |
|---|
| 150 | n/a | self._setup_queues() |
|---|
| 151 | n/a | self._taskqueue = queue.Queue() |
|---|
| 152 | n/a | self._cache = {} |
|---|
| 153 | n/a | self._state = RUN |
|---|
| 154 | n/a | self._maxtasksperchild = maxtasksperchild |
|---|
| 155 | n/a | self._initializer = initializer |
|---|
| 156 | n/a | self._initargs = initargs |
|---|
| 157 | n/a | |
|---|
| 158 | n/a | if processes is None: |
|---|
| 159 | n/a | processes = os.cpu_count() or 1 |
|---|
| 160 | n/a | if processes < 1: |
|---|
| 161 | n/a | raise ValueError("Number of processes must be at least 1") |
|---|
| 162 | n/a | |
|---|
| 163 | n/a | if initializer is not None and not callable(initializer): |
|---|
| 164 | n/a | raise TypeError('initializer must be a callable') |
|---|
| 165 | n/a | |
|---|
| 166 | n/a | self._processes = processes |
|---|
| 167 | n/a | self._pool = [] |
|---|
| 168 | n/a | self._repopulate_pool() |
|---|
| 169 | n/a | |
|---|
| 170 | n/a | self._worker_handler = threading.Thread( |
|---|
| 171 | n/a | target=Pool._handle_workers, |
|---|
| 172 | n/a | args=(self, ) |
|---|
| 173 | n/a | ) |
|---|
| 174 | n/a | self._worker_handler.daemon = True |
|---|
| 175 | n/a | self._worker_handler._state = RUN |
|---|
| 176 | n/a | self._worker_handler.start() |
|---|
| 177 | n/a | |
|---|
| 178 | n/a | |
|---|
| 179 | n/a | self._task_handler = threading.Thread( |
|---|
| 180 | n/a | target=Pool._handle_tasks, |
|---|
| 181 | n/a | args=(self._taskqueue, self._quick_put, self._outqueue, |
|---|
| 182 | n/a | self._pool, self._cache) |
|---|
| 183 | n/a | ) |
|---|
| 184 | n/a | self._task_handler.daemon = True |
|---|
| 185 | n/a | self._task_handler._state = RUN |
|---|
| 186 | n/a | self._task_handler.start() |
|---|
| 187 | n/a | |
|---|
| 188 | n/a | self._result_handler = threading.Thread( |
|---|
| 189 | n/a | target=Pool._handle_results, |
|---|
| 190 | n/a | args=(self._outqueue, self._quick_get, self._cache) |
|---|
| 191 | n/a | ) |
|---|
| 192 | n/a | self._result_handler.daemon = True |
|---|
| 193 | n/a | self._result_handler._state = RUN |
|---|
| 194 | n/a | self._result_handler.start() |
|---|
| 195 | n/a | |
|---|
| 196 | n/a | self._terminate = util.Finalize( |
|---|
| 197 | n/a | self, self._terminate_pool, |
|---|
| 198 | n/a | args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, |
|---|
| 199 | n/a | self._worker_handler, self._task_handler, |
|---|
| 200 | n/a | self._result_handler, self._cache), |
|---|
| 201 | n/a | exitpriority=15 |
|---|
| 202 | n/a | ) |
|---|
| 203 | n/a | |
|---|
| 204 | n/a | def _join_exited_workers(self): |
|---|
| 205 | n/a | """Cleanup after any worker processes which have exited due to reaching |
|---|
| 206 | n/a | their specified lifetime. Returns True if any workers were cleaned up. |
|---|
| 207 | n/a | """ |
|---|
| 208 | n/a | cleaned = False |
|---|
| 209 | n/a | for i in reversed(range(len(self._pool))): |
|---|
| 210 | n/a | worker = self._pool[i] |
|---|
| 211 | n/a | if worker.exitcode is not None: |
|---|
| 212 | n/a | # worker exited |
|---|
| 213 | n/a | util.debug('cleaning up worker %d' % i) |
|---|
| 214 | n/a | worker.join() |
|---|
| 215 | n/a | cleaned = True |
|---|
| 216 | n/a | del self._pool[i] |
|---|
| 217 | n/a | return cleaned |
|---|
| 218 | n/a | |
|---|
| 219 | n/a | def _repopulate_pool(self): |
|---|
| 220 | n/a | """Bring the number of pool processes up to the specified number, |
|---|
| 221 | n/a | for use after reaping workers which have exited. |
|---|
| 222 | n/a | """ |
|---|
| 223 | n/a | for i in range(self._processes - len(self._pool)): |
|---|
| 224 | n/a | w = self.Process(target=worker, |
|---|
| 225 | n/a | args=(self._inqueue, self._outqueue, |
|---|
| 226 | n/a | self._initializer, |
|---|
| 227 | n/a | self._initargs, self._maxtasksperchild, |
|---|
| 228 | n/a | self._wrap_exception) |
|---|
| 229 | n/a | ) |
|---|
| 230 | n/a | self._pool.append(w) |
|---|
| 231 | n/a | w.name = w.name.replace('Process', 'PoolWorker') |
|---|
| 232 | n/a | w.daemon = True |
|---|
| 233 | n/a | w.start() |
|---|
| 234 | n/a | util.debug('added worker') |
|---|
| 235 | n/a | |
|---|
| 236 | n/a | def _maintain_pool(self): |
|---|
| 237 | n/a | """Clean up any exited workers and start replacements for them. |
|---|
| 238 | n/a | """ |
|---|
| 239 | n/a | if self._join_exited_workers(): |
|---|
| 240 | n/a | self._repopulate_pool() |
|---|
| 241 | n/a | |
|---|
| 242 | n/a | def _setup_queues(self): |
|---|
| 243 | n/a | self._inqueue = self._ctx.SimpleQueue() |
|---|
| 244 | n/a | self._outqueue = self._ctx.SimpleQueue() |
|---|
| 245 | n/a | self._quick_put = self._inqueue._writer.send |
|---|
| 246 | n/a | self._quick_get = self._outqueue._reader.recv |
|---|
| 247 | n/a | |
|---|
| 248 | n/a | def apply(self, func, args=(), kwds={}): |
|---|
| 249 | n/a | ''' |
|---|
| 250 | n/a | Equivalent of `func(*args, **kwds)`. |
|---|
| 251 | n/a | ''' |
|---|
| 252 | n/a | assert self._state == RUN |
|---|
| 253 | n/a | return self.apply_async(func, args, kwds).get() |
|---|
| 254 | n/a | |
|---|
| 255 | n/a | def map(self, func, iterable, chunksize=None): |
|---|
| 256 | n/a | ''' |
|---|
| 257 | n/a | Apply `func` to each element in `iterable`, collecting the results |
|---|
| 258 | n/a | in a list that is returned. |
|---|
| 259 | n/a | ''' |
|---|
| 260 | n/a | return self._map_async(func, iterable, mapstar, chunksize).get() |
|---|
| 261 | n/a | |
|---|
| 262 | n/a | def starmap(self, func, iterable, chunksize=None): |
|---|
| 263 | n/a | ''' |
|---|
| 264 | n/a | Like `map()` method but the elements of the `iterable` are expected to |
|---|
| 265 | n/a | be iterables as well and will be unpacked as arguments. Hence |
|---|
| 266 | n/a | `func` and (a, b) becomes func(a, b). |
|---|
| 267 | n/a | ''' |
|---|
| 268 | n/a | return self._map_async(func, iterable, starmapstar, chunksize).get() |
|---|
| 269 | n/a | |
|---|
| 270 | n/a | def starmap_async(self, func, iterable, chunksize=None, callback=None, |
|---|
| 271 | n/a | error_callback=None): |
|---|
| 272 | n/a | ''' |
|---|
| 273 | n/a | Asynchronous version of `starmap()` method. |
|---|
| 274 | n/a | ''' |
|---|
| 275 | n/a | return self._map_async(func, iterable, starmapstar, chunksize, |
|---|
| 276 | n/a | callback, error_callback) |
|---|
| 277 | n/a | |
|---|
| 278 | n/a | def imap(self, func, iterable, chunksize=1): |
|---|
| 279 | n/a | ''' |
|---|
| 280 | n/a | Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. |
|---|
| 281 | n/a | ''' |
|---|
| 282 | n/a | if self._state != RUN: |
|---|
| 283 | n/a | raise ValueError("Pool not running") |
|---|
| 284 | n/a | if chunksize == 1: |
|---|
| 285 | n/a | result = IMapIterator(self._cache) |
|---|
| 286 | n/a | self._taskqueue.put((((result._job, i, func, (x,), {}) |
|---|
| 287 | n/a | for i, x in enumerate(iterable)), result._set_length)) |
|---|
| 288 | n/a | return result |
|---|
| 289 | n/a | else: |
|---|
| 290 | n/a | assert chunksize > 1 |
|---|
| 291 | n/a | task_batches = Pool._get_tasks(func, iterable, chunksize) |
|---|
| 292 | n/a | result = IMapIterator(self._cache) |
|---|
| 293 | n/a | self._taskqueue.put((((result._job, i, mapstar, (x,), {}) |
|---|
| 294 | n/a | for i, x in enumerate(task_batches)), result._set_length)) |
|---|
| 295 | n/a | return (item for chunk in result for item in chunk) |
|---|
| 296 | n/a | |
|---|
| 297 | n/a | def imap_unordered(self, func, iterable, chunksize=1): |
|---|
| 298 | n/a | ''' |
|---|
| 299 | n/a | Like `imap()` method but ordering of results is arbitrary. |
|---|
| 300 | n/a | ''' |
|---|
| 301 | n/a | if self._state != RUN: |
|---|
| 302 | n/a | raise ValueError("Pool not running") |
|---|
| 303 | n/a | if chunksize == 1: |
|---|
| 304 | n/a | result = IMapUnorderedIterator(self._cache) |
|---|
| 305 | n/a | self._taskqueue.put((((result._job, i, func, (x,), {}) |
|---|
| 306 | n/a | for i, x in enumerate(iterable)), result._set_length)) |
|---|
| 307 | n/a | return result |
|---|
| 308 | n/a | else: |
|---|
| 309 | n/a | assert chunksize > 1 |
|---|
| 310 | n/a | task_batches = Pool._get_tasks(func, iterable, chunksize) |
|---|
| 311 | n/a | result = IMapUnorderedIterator(self._cache) |
|---|
| 312 | n/a | self._taskqueue.put((((result._job, i, mapstar, (x,), {}) |
|---|
| 313 | n/a | for i, x in enumerate(task_batches)), result._set_length)) |
|---|
| 314 | n/a | return (item for chunk in result for item in chunk) |
|---|
| 315 | n/a | |
|---|
| 316 | n/a | def apply_async(self, func, args=(), kwds={}, callback=None, |
|---|
| 317 | n/a | error_callback=None): |
|---|
| 318 | n/a | ''' |
|---|
| 319 | n/a | Asynchronous version of `apply()` method. |
|---|
| 320 | n/a | ''' |
|---|
| 321 | n/a | if self._state != RUN: |
|---|
| 322 | n/a | raise ValueError("Pool not running") |
|---|
| 323 | n/a | result = ApplyResult(self._cache, callback, error_callback) |
|---|
| 324 | n/a | self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) |
|---|
| 325 | n/a | return result |
|---|
| 326 | n/a | |
|---|
| 327 | n/a | def map_async(self, func, iterable, chunksize=None, callback=None, |
|---|
| 328 | n/a | error_callback=None): |
|---|
| 329 | n/a | ''' |
|---|
| 330 | n/a | Asynchronous version of `map()` method. |
|---|
| 331 | n/a | ''' |
|---|
| 332 | n/a | return self._map_async(func, iterable, mapstar, chunksize, callback, |
|---|
| 333 | n/a | error_callback) |
|---|
| 334 | n/a | |
|---|
| 335 | n/a | def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, |
|---|
| 336 | n/a | error_callback=None): |
|---|
| 337 | n/a | ''' |
|---|
| 338 | n/a | Helper function to implement map, starmap and their async counterparts. |
|---|
| 339 | n/a | ''' |
|---|
| 340 | n/a | if self._state != RUN: |
|---|
| 341 | n/a | raise ValueError("Pool not running") |
|---|
| 342 | n/a | if not hasattr(iterable, '__len__'): |
|---|
| 343 | n/a | iterable = list(iterable) |
|---|
| 344 | n/a | |
|---|
| 345 | n/a | if chunksize is None: |
|---|
| 346 | n/a | chunksize, extra = divmod(len(iterable), len(self._pool) * 4) |
|---|
| 347 | n/a | if extra: |
|---|
| 348 | n/a | chunksize += 1 |
|---|
| 349 | n/a | if len(iterable) == 0: |
|---|
| 350 | n/a | chunksize = 0 |
|---|
| 351 | n/a | |
|---|
| 352 | n/a | task_batches = Pool._get_tasks(func, iterable, chunksize) |
|---|
| 353 | n/a | result = MapResult(self._cache, chunksize, len(iterable), callback, |
|---|
| 354 | n/a | error_callback=error_callback) |
|---|
| 355 | n/a | self._taskqueue.put((((result._job, i, mapper, (x,), {}) |
|---|
| 356 | n/a | for i, x in enumerate(task_batches)), None)) |
|---|
| 357 | n/a | return result |
|---|
| 358 | n/a | |
|---|
| 359 | n/a | @staticmethod |
|---|
| 360 | n/a | def _handle_workers(pool): |
|---|
| 361 | n/a | thread = threading.current_thread() |
|---|
| 362 | n/a | |
|---|
| 363 | n/a | # Keep maintaining workers until the cache gets drained, unless the pool |
|---|
| 364 | n/a | # is terminated. |
|---|
| 365 | n/a | while thread._state == RUN or (pool._cache and thread._state != TERMINATE): |
|---|
| 366 | n/a | pool._maintain_pool() |
|---|
| 367 | n/a | time.sleep(0.1) |
|---|
| 368 | n/a | # send sentinel to stop workers |
|---|
| 369 | n/a | pool._taskqueue.put(None) |
|---|
| 370 | n/a | util.debug('worker handler exiting') |
|---|
| 371 | n/a | |
|---|
| 372 | n/a | @staticmethod |
|---|
| 373 | n/a | def _handle_tasks(taskqueue, put, outqueue, pool, cache): |
|---|
| 374 | n/a | thread = threading.current_thread() |
|---|
| 375 | n/a | |
|---|
| 376 | n/a | for taskseq, set_length in iter(taskqueue.get, None): |
|---|
| 377 | n/a | task = None |
|---|
| 378 | n/a | i = -1 |
|---|
| 379 | n/a | try: |
|---|
| 380 | n/a | for i, task in enumerate(taskseq): |
|---|
| 381 | n/a | if thread._state: |
|---|
| 382 | n/a | util.debug('task handler found thread._state != RUN') |
|---|
| 383 | n/a | break |
|---|
| 384 | n/a | try: |
|---|
| 385 | n/a | put(task) |
|---|
| 386 | n/a | except Exception as e: |
|---|
| 387 | n/a | job, ind = task[:2] |
|---|
| 388 | n/a | try: |
|---|
| 389 | n/a | cache[job]._set(ind, (False, e)) |
|---|
| 390 | n/a | except KeyError: |
|---|
| 391 | n/a | pass |
|---|
| 392 | n/a | else: |
|---|
| 393 | n/a | if set_length: |
|---|
| 394 | n/a | util.debug('doing set_length()') |
|---|
| 395 | n/a | set_length(i+1) |
|---|
| 396 | n/a | continue |
|---|
| 397 | n/a | break |
|---|
| 398 | n/a | except Exception as ex: |
|---|
| 399 | n/a | job, ind = task[:2] if task else (0, 0) |
|---|
| 400 | n/a | if job in cache: |
|---|
| 401 | n/a | cache[job]._set(ind + 1, (False, ex)) |
|---|
| 402 | n/a | if set_length: |
|---|
| 403 | n/a | util.debug('doing set_length()') |
|---|
| 404 | n/a | set_length(i+1) |
|---|
| 405 | n/a | else: |
|---|
| 406 | n/a | util.debug('task handler got sentinel') |
|---|
| 407 | n/a | |
|---|
| 408 | n/a | |
|---|
| 409 | n/a | try: |
|---|
| 410 | n/a | # tell result handler to finish when cache is empty |
|---|
| 411 | n/a | util.debug('task handler sending sentinel to result handler') |
|---|
| 412 | n/a | outqueue.put(None) |
|---|
| 413 | n/a | |
|---|
| 414 | n/a | # tell workers there is no more work |
|---|
| 415 | n/a | util.debug('task handler sending sentinel to workers') |
|---|
| 416 | n/a | for p in pool: |
|---|
| 417 | n/a | put(None) |
|---|
| 418 | n/a | except OSError: |
|---|
| 419 | n/a | util.debug('task handler got OSError when sending sentinels') |
|---|
| 420 | n/a | |
|---|
| 421 | n/a | util.debug('task handler exiting') |
|---|
| 422 | n/a | |
|---|
| 423 | n/a | @staticmethod |
|---|
| 424 | n/a | def _handle_results(outqueue, get, cache): |
|---|
| 425 | n/a | thread = threading.current_thread() |
|---|
| 426 | n/a | |
|---|
| 427 | n/a | while 1: |
|---|
| 428 | n/a | try: |
|---|
| 429 | n/a | task = get() |
|---|
| 430 | n/a | except (OSError, EOFError): |
|---|
| 431 | n/a | util.debug('result handler got EOFError/OSError -- exiting') |
|---|
| 432 | n/a | return |
|---|
| 433 | n/a | |
|---|
| 434 | n/a | if thread._state: |
|---|
| 435 | n/a | assert thread._state == TERMINATE |
|---|
| 436 | n/a | util.debug('result handler found thread._state=TERMINATE') |
|---|
| 437 | n/a | break |
|---|
| 438 | n/a | |
|---|
| 439 | n/a | if task is None: |
|---|
| 440 | n/a | util.debug('result handler got sentinel') |
|---|
| 441 | n/a | break |
|---|
| 442 | n/a | |
|---|
| 443 | n/a | job, i, obj = task |
|---|
| 444 | n/a | try: |
|---|
| 445 | n/a | cache[job]._set(i, obj) |
|---|
| 446 | n/a | except KeyError: |
|---|
| 447 | n/a | pass |
|---|
| 448 | n/a | |
|---|
| 449 | n/a | while cache and thread._state != TERMINATE: |
|---|
| 450 | n/a | try: |
|---|
| 451 | n/a | task = get() |
|---|
| 452 | n/a | except (OSError, EOFError): |
|---|
| 453 | n/a | util.debug('result handler got EOFError/OSError -- exiting') |
|---|
| 454 | n/a | return |
|---|
| 455 | n/a | |
|---|
| 456 | n/a | if task is None: |
|---|
| 457 | n/a | util.debug('result handler ignoring extra sentinel') |
|---|
| 458 | n/a | continue |
|---|
| 459 | n/a | job, i, obj = task |
|---|
| 460 | n/a | try: |
|---|
| 461 | n/a | cache[job]._set(i, obj) |
|---|
| 462 | n/a | except KeyError: |
|---|
| 463 | n/a | pass |
|---|
| 464 | n/a | |
|---|
| 465 | n/a | if hasattr(outqueue, '_reader'): |
|---|
| 466 | n/a | util.debug('ensuring that outqueue is not full') |
|---|
| 467 | n/a | # If we don't make room available in outqueue then |
|---|
| 468 | n/a | # attempts to add the sentinel (None) to outqueue may |
|---|
| 469 | n/a | # block. There is guaranteed to be no more than 2 sentinels. |
|---|
| 470 | n/a | try: |
|---|
| 471 | n/a | for i in range(10): |
|---|
| 472 | n/a | if not outqueue._reader.poll(): |
|---|
| 473 | n/a | break |
|---|
| 474 | n/a | get() |
|---|
| 475 | n/a | except (OSError, EOFError): |
|---|
| 476 | n/a | pass |
|---|
| 477 | n/a | |
|---|
| 478 | n/a | util.debug('result handler exiting: len(cache)=%s, thread._state=%s', |
|---|
| 479 | n/a | len(cache), thread._state) |
|---|
| 480 | n/a | |
|---|
| 481 | n/a | @staticmethod |
|---|
| 482 | n/a | def _get_tasks(func, it, size): |
|---|
| 483 | n/a | it = iter(it) |
|---|
| 484 | n/a | while 1: |
|---|
| 485 | n/a | x = tuple(itertools.islice(it, size)) |
|---|
| 486 | n/a | if not x: |
|---|
| 487 | n/a | return |
|---|
| 488 | n/a | yield (func, x) |
|---|
| 489 | n/a | |
|---|
| 490 | n/a | def __reduce__(self): |
|---|
| 491 | n/a | raise NotImplementedError( |
|---|
| 492 | n/a | 'pool objects cannot be passed between processes or pickled' |
|---|
| 493 | n/a | ) |
|---|
| 494 | n/a | |
|---|
| 495 | n/a | def close(self): |
|---|
| 496 | n/a | util.debug('closing pool') |
|---|
| 497 | n/a | if self._state == RUN: |
|---|
| 498 | n/a | self._state = CLOSE |
|---|
| 499 | n/a | self._worker_handler._state = CLOSE |
|---|
| 500 | n/a | |
|---|
| 501 | n/a | def terminate(self): |
|---|
| 502 | n/a | util.debug('terminating pool') |
|---|
| 503 | n/a | self._state = TERMINATE |
|---|
| 504 | n/a | self._worker_handler._state = TERMINATE |
|---|
| 505 | n/a | self._terminate() |
|---|
| 506 | n/a | |
|---|
| 507 | n/a | def join(self): |
|---|
| 508 | n/a | util.debug('joining pool') |
|---|
| 509 | n/a | assert self._state in (CLOSE, TERMINATE) |
|---|
| 510 | n/a | self._worker_handler.join() |
|---|
| 511 | n/a | self._task_handler.join() |
|---|
| 512 | n/a | self._result_handler.join() |
|---|
| 513 | n/a | for p in self._pool: |
|---|
| 514 | n/a | p.join() |
|---|
| 515 | n/a | |
|---|
| 516 | n/a | @staticmethod |
|---|
| 517 | n/a | def _help_stuff_finish(inqueue, task_handler, size): |
|---|
| 518 | n/a | # task_handler may be blocked trying to put items on inqueue |
|---|
| 519 | n/a | util.debug('removing tasks from inqueue until task handler finished') |
|---|
| 520 | n/a | inqueue._rlock.acquire() |
|---|
| 521 | n/a | while task_handler.is_alive() and inqueue._reader.poll(): |
|---|
| 522 | n/a | inqueue._reader.recv() |
|---|
| 523 | n/a | time.sleep(0) |
|---|
| 524 | n/a | |
|---|
| 525 | n/a | @classmethod |
|---|
| 526 | n/a | def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, |
|---|
| 527 | n/a | worker_handler, task_handler, result_handler, cache): |
|---|
| 528 | n/a | # this is guaranteed to only be called once |
|---|
| 529 | n/a | util.debug('finalizing pool') |
|---|
| 530 | n/a | |
|---|
| 531 | n/a | worker_handler._state = TERMINATE |
|---|
| 532 | n/a | task_handler._state = TERMINATE |
|---|
| 533 | n/a | |
|---|
| 534 | n/a | util.debug('helping task handler/workers to finish') |
|---|
| 535 | n/a | cls._help_stuff_finish(inqueue, task_handler, len(pool)) |
|---|
| 536 | n/a | |
|---|
| 537 | n/a | assert result_handler.is_alive() or len(cache) == 0 |
|---|
| 538 | n/a | |
|---|
| 539 | n/a | result_handler._state = TERMINATE |
|---|
| 540 | n/a | outqueue.put(None) # sentinel |
|---|
| 541 | n/a | |
|---|
| 542 | n/a | # We must wait for the worker handler to exit before terminating |
|---|
| 543 | n/a | # workers because we don't want workers to be restarted behind our back. |
|---|
| 544 | n/a | util.debug('joining worker handler') |
|---|
| 545 | n/a | if threading.current_thread() is not worker_handler: |
|---|
| 546 | n/a | worker_handler.join() |
|---|
| 547 | n/a | |
|---|
| 548 | n/a | # Terminate workers which haven't already finished. |
|---|
| 549 | n/a | if pool and hasattr(pool[0], 'terminate'): |
|---|
| 550 | n/a | util.debug('terminating workers') |
|---|
| 551 | n/a | for p in pool: |
|---|
| 552 | n/a | if p.exitcode is None: |
|---|
| 553 | n/a | p.terminate() |
|---|
| 554 | n/a | |
|---|
| 555 | n/a | util.debug('joining task handler') |
|---|
| 556 | n/a | if threading.current_thread() is not task_handler: |
|---|
| 557 | n/a | task_handler.join() |
|---|
| 558 | n/a | |
|---|
| 559 | n/a | util.debug('joining result handler') |
|---|
| 560 | n/a | if threading.current_thread() is not result_handler: |
|---|
| 561 | n/a | result_handler.join() |
|---|
| 562 | n/a | |
|---|
| 563 | n/a | if pool and hasattr(pool[0], 'terminate'): |
|---|
| 564 | n/a | util.debug('joining pool workers') |
|---|
| 565 | n/a | for p in pool: |
|---|
| 566 | n/a | if p.is_alive(): |
|---|
| 567 | n/a | # worker has not yet exited |
|---|
| 568 | n/a | util.debug('cleaning up worker %d' % p.pid) |
|---|
| 569 | n/a | p.join() |
|---|
| 570 | n/a | |
|---|
| 571 | n/a | def __enter__(self): |
|---|
| 572 | n/a | return self |
|---|
| 573 | n/a | |
|---|
| 574 | n/a | def __exit__(self, exc_type, exc_val, exc_tb): |
|---|
| 575 | n/a | self.terminate() |
|---|
| 576 | n/a | |
|---|
| 577 | n/a | # |
|---|
| 578 | n/a | # Class whose instances are returned by `Pool.apply_async()` |
|---|
| 579 | n/a | # |
|---|
| 580 | n/a | |
|---|
| 581 | n/a | class ApplyResult(object): |
|---|
| 582 | n/a | |
|---|
| 583 | n/a | def __init__(self, cache, callback, error_callback): |
|---|
| 584 | n/a | self._event = threading.Event() |
|---|
| 585 | n/a | self._job = next(job_counter) |
|---|
| 586 | n/a | self._cache = cache |
|---|
| 587 | n/a | self._callback = callback |
|---|
| 588 | n/a | self._error_callback = error_callback |
|---|
| 589 | n/a | cache[self._job] = self |
|---|
| 590 | n/a | |
|---|
| 591 | n/a | def ready(self): |
|---|
| 592 | n/a | return self._event.is_set() |
|---|
| 593 | n/a | |
|---|
| 594 | n/a | def successful(self): |
|---|
| 595 | n/a | assert self.ready() |
|---|
| 596 | n/a | return self._success |
|---|
| 597 | n/a | |
|---|
| 598 | n/a | def wait(self, timeout=None): |
|---|
| 599 | n/a | self._event.wait(timeout) |
|---|
| 600 | n/a | |
|---|
| 601 | n/a | def get(self, timeout=None): |
|---|
| 602 | n/a | self.wait(timeout) |
|---|
| 603 | n/a | if not self.ready(): |
|---|
| 604 | n/a | raise TimeoutError |
|---|
| 605 | n/a | if self._success: |
|---|
| 606 | n/a | return self._value |
|---|
| 607 | n/a | else: |
|---|
| 608 | n/a | raise self._value |
|---|
| 609 | n/a | |
|---|
| 610 | n/a | def _set(self, i, obj): |
|---|
| 611 | n/a | self._success, self._value = obj |
|---|
| 612 | n/a | if self._callback and self._success: |
|---|
| 613 | n/a | self._callback(self._value) |
|---|
| 614 | n/a | if self._error_callback and not self._success: |
|---|
| 615 | n/a | self._error_callback(self._value) |
|---|
| 616 | n/a | self._event.set() |
|---|
| 617 | n/a | del self._cache[self._job] |
|---|
| 618 | n/a | |
|---|
| 619 | n/a | AsyncResult = ApplyResult # create alias -- see #17805 |
|---|
| 620 | n/a | |
|---|
| 621 | n/a | # |
|---|
| 622 | n/a | # Class whose instances are returned by `Pool.map_async()` |
|---|
| 623 | n/a | # |
|---|
| 624 | n/a | |
|---|
| 625 | n/a | class MapResult(ApplyResult): |
|---|
| 626 | n/a | |
|---|
| 627 | n/a | def __init__(self, cache, chunksize, length, callback, error_callback): |
|---|
| 628 | n/a | ApplyResult.__init__(self, cache, callback, |
|---|
| 629 | n/a | error_callback=error_callback) |
|---|
| 630 | n/a | self._success = True |
|---|
| 631 | n/a | self._value = [None] * length |
|---|
| 632 | n/a | self._chunksize = chunksize |
|---|
| 633 | n/a | if chunksize <= 0: |
|---|
| 634 | n/a | self._number_left = 0 |
|---|
| 635 | n/a | self._event.set() |
|---|
| 636 | n/a | del cache[self._job] |
|---|
| 637 | n/a | else: |
|---|
| 638 | n/a | self._number_left = length//chunksize + bool(length % chunksize) |
|---|
| 639 | n/a | |
|---|
| 640 | n/a | def _set(self, i, success_result): |
|---|
| 641 | n/a | self._number_left -= 1 |
|---|
| 642 | n/a | success, result = success_result |
|---|
| 643 | n/a | if success and self._success: |
|---|
| 644 | n/a | self._value[i*self._chunksize:(i+1)*self._chunksize] = result |
|---|
| 645 | n/a | if self._number_left == 0: |
|---|
| 646 | n/a | if self._callback: |
|---|
| 647 | n/a | self._callback(self._value) |
|---|
| 648 | n/a | del self._cache[self._job] |
|---|
| 649 | n/a | self._event.set() |
|---|
| 650 | n/a | else: |
|---|
| 651 | n/a | if not success and self._success: |
|---|
| 652 | n/a | # only store first exception |
|---|
| 653 | n/a | self._success = False |
|---|
| 654 | n/a | self._value = result |
|---|
| 655 | n/a | if self._number_left == 0: |
|---|
| 656 | n/a | # only consider the result ready once all jobs are done |
|---|
| 657 | n/a | if self._error_callback: |
|---|
| 658 | n/a | self._error_callback(self._value) |
|---|
| 659 | n/a | del self._cache[self._job] |
|---|
| 660 | n/a | self._event.set() |
|---|
| 661 | n/a | |
|---|
| 662 | n/a | # |
|---|
| 663 | n/a | # Class whose instances are returned by `Pool.imap()` |
|---|
| 664 | n/a | # |
|---|
| 665 | n/a | |
|---|
| 666 | n/a | class IMapIterator(object): |
|---|
| 667 | n/a | |
|---|
| 668 | n/a | def __init__(self, cache): |
|---|
| 669 | n/a | self._cond = threading.Condition(threading.Lock()) |
|---|
| 670 | n/a | self._job = next(job_counter) |
|---|
| 671 | n/a | self._cache = cache |
|---|
| 672 | n/a | self._items = collections.deque() |
|---|
| 673 | n/a | self._index = 0 |
|---|
| 674 | n/a | self._length = None |
|---|
| 675 | n/a | self._unsorted = {} |
|---|
| 676 | n/a | cache[self._job] = self |
|---|
| 677 | n/a | |
|---|
| 678 | n/a | def __iter__(self): |
|---|
| 679 | n/a | return self |
|---|
| 680 | n/a | |
|---|
| 681 | n/a | def next(self, timeout=None): |
|---|
| 682 | n/a | with self._cond: |
|---|
| 683 | n/a | try: |
|---|
| 684 | n/a | item = self._items.popleft() |
|---|
| 685 | n/a | except IndexError: |
|---|
| 686 | n/a | if self._index == self._length: |
|---|
| 687 | n/a | raise StopIteration |
|---|
| 688 | n/a | self._cond.wait(timeout) |
|---|
| 689 | n/a | try: |
|---|
| 690 | n/a | item = self._items.popleft() |
|---|
| 691 | n/a | except IndexError: |
|---|
| 692 | n/a | if self._index == self._length: |
|---|
| 693 | n/a | raise StopIteration |
|---|
| 694 | n/a | raise TimeoutError |
|---|
| 695 | n/a | |
|---|
| 696 | n/a | success, value = item |
|---|
| 697 | n/a | if success: |
|---|
| 698 | n/a | return value |
|---|
| 699 | n/a | raise value |
|---|
| 700 | n/a | |
|---|
| 701 | n/a | __next__ = next # XXX |
|---|
| 702 | n/a | |
|---|
| 703 | n/a | def _set(self, i, obj): |
|---|
| 704 | n/a | with self._cond: |
|---|
| 705 | n/a | if self._index == i: |
|---|
| 706 | n/a | self._items.append(obj) |
|---|
| 707 | n/a | self._index += 1 |
|---|
| 708 | n/a | while self._index in self._unsorted: |
|---|
| 709 | n/a | obj = self._unsorted.pop(self._index) |
|---|
| 710 | n/a | self._items.append(obj) |
|---|
| 711 | n/a | self._index += 1 |
|---|
| 712 | n/a | self._cond.notify() |
|---|
| 713 | n/a | else: |
|---|
| 714 | n/a | self._unsorted[i] = obj |
|---|
| 715 | n/a | |
|---|
| 716 | n/a | if self._index == self._length: |
|---|
| 717 | n/a | del self._cache[self._job] |
|---|
| 718 | n/a | |
|---|
| 719 | n/a | def _set_length(self, length): |
|---|
| 720 | n/a | with self._cond: |
|---|
| 721 | n/a | self._length = length |
|---|
| 722 | n/a | if self._index == self._length: |
|---|
| 723 | n/a | self._cond.notify() |
|---|
| 724 | n/a | del self._cache[self._job] |
|---|
| 725 | n/a | |
|---|
| 726 | n/a | # |
|---|
| 727 | n/a | # Class whose instances are returned by `Pool.imap_unordered()` |
|---|
| 728 | n/a | # |
|---|
| 729 | n/a | |
|---|
| 730 | n/a | class IMapUnorderedIterator(IMapIterator): |
|---|
| 731 | n/a | |
|---|
| 732 | n/a | def _set(self, i, obj): |
|---|
| 733 | n/a | with self._cond: |
|---|
| 734 | n/a | self._items.append(obj) |
|---|
| 735 | n/a | self._index += 1 |
|---|
| 736 | n/a | self._cond.notify() |
|---|
| 737 | n/a | if self._index == self._length: |
|---|
| 738 | n/a | del self._cache[self._job] |
|---|
| 739 | n/a | |
|---|
| 740 | n/a | # |
|---|
| 741 | n/a | # |
|---|
| 742 | n/a | # |
|---|
| 743 | n/a | |
|---|
| 744 | n/a | class ThreadPool(Pool): |
|---|
| 745 | n/a | _wrap_exception = False |
|---|
| 746 | n/a | |
|---|
| 747 | n/a | @staticmethod |
|---|
| 748 | n/a | def Process(*args, **kwds): |
|---|
| 749 | n/a | from .dummy import Process |
|---|
| 750 | n/a | return Process(*args, **kwds) |
|---|
| 751 | n/a | |
|---|
| 752 | n/a | def __init__(self, processes=None, initializer=None, initargs=()): |
|---|
| 753 | n/a | Pool.__init__(self, processes, initializer, initargs) |
|---|
| 754 | n/a | |
|---|
| 755 | n/a | def _setup_queues(self): |
|---|
| 756 | n/a | self._inqueue = queue.Queue() |
|---|
| 757 | n/a | self._outqueue = queue.Queue() |
|---|
| 758 | n/a | self._quick_put = self._inqueue.put |
|---|
| 759 | n/a | self._quick_get = self._outqueue.get |
|---|
| 760 | n/a | |
|---|
| 761 | n/a | @staticmethod |
|---|
| 762 | n/a | def _help_stuff_finish(inqueue, task_handler, size): |
|---|
| 763 | n/a | # put sentinels at head of inqueue to make workers finish |
|---|
| 764 | n/a | with inqueue.not_empty: |
|---|
| 765 | n/a | inqueue.queue.clear() |
|---|
| 766 | n/a | inqueue.queue.extend([None] * size) |
|---|
| 767 | n/a | inqueue.not_empty.notify_all() |
|---|