1 | n/a | # Copyright 2009 Brian Quinlan. All Rights Reserved. |
---|
2 | n/a | # Licensed to PSF under a Contributor Agreement. |
---|
3 | n/a | |
---|
4 | n/a | """Implements ThreadPoolExecutor.""" |
---|
5 | n/a | |
---|
6 | n/a | __author__ = 'Brian Quinlan (brian@sweetapp.com)' |
---|
7 | n/a | |
---|
8 | n/a | import atexit |
---|
9 | n/a | from concurrent.futures import _base |
---|
10 | n/a | import queue |
---|
11 | n/a | import threading |
---|
12 | n/a | import weakref |
---|
13 | n/a | import os |
---|
14 | n/a | |
---|
15 | n/a | # Workers are created as daemon threads. This is done to allow the interpreter |
---|
16 | n/a | # to exit when there are still idle threads in a ThreadPoolExecutor's thread |
---|
17 | n/a | # pool (i.e. shutdown() was not called). However, allowing workers to die with |
---|
18 | n/a | # the interpreter has two undesirable properties: |
---|
19 | n/a | # - The workers would still be running during interpreter shutdown, |
---|
20 | n/a | # meaning that they would fail in unpredictable ways. |
---|
21 | n/a | # - The workers could be killed while evaluating a work item, which could |
---|
22 | n/a | # be bad if the callable being evaluated has external side-effects e.g. |
---|
23 | n/a | # writing to a file. |
---|
24 | n/a | # |
---|
25 | n/a | # To work around this problem, an exit handler is installed which tells the |
---|
26 | n/a | # workers to exit when their work queues are empty and then waits until the |
---|
27 | n/a | # threads finish. |
---|
28 | n/a | |
---|
29 | n/a | _threads_queues = weakref.WeakKeyDictionary() |
---|
30 | n/a | _shutdown = False |
---|
31 | n/a | |
---|
32 | n/a | def _python_exit(): |
---|
33 | n/a | global _shutdown |
---|
34 | n/a | _shutdown = True |
---|
35 | n/a | items = list(_threads_queues.items()) |
---|
36 | n/a | for t, q in items: |
---|
37 | n/a | q.put(None) |
---|
38 | n/a | for t, q in items: |
---|
39 | n/a | t.join() |
---|
40 | n/a | |
---|
41 | n/a | atexit.register(_python_exit) |
---|
42 | n/a | |
---|
43 | n/a | class _WorkItem(object): |
---|
44 | n/a | def __init__(self, future, fn, args, kwargs): |
---|
45 | n/a | self.future = future |
---|
46 | n/a | self.fn = fn |
---|
47 | n/a | self.args = args |
---|
48 | n/a | self.kwargs = kwargs |
---|
49 | n/a | |
---|
50 | n/a | def run(self): |
---|
51 | n/a | if not self.future.set_running_or_notify_cancel(): |
---|
52 | n/a | return |
---|
53 | n/a | |
---|
54 | n/a | try: |
---|
55 | n/a | result = self.fn(*self.args, **self.kwargs) |
---|
56 | n/a | except BaseException as e: |
---|
57 | n/a | self.future.set_exception(e) |
---|
58 | n/a | else: |
---|
59 | n/a | self.future.set_result(result) |
---|
60 | n/a | |
---|
61 | n/a | def _worker(executor_reference, work_queue): |
---|
62 | n/a | try: |
---|
63 | n/a | while True: |
---|
64 | n/a | work_item = work_queue.get(block=True) |
---|
65 | n/a | if work_item is not None: |
---|
66 | n/a | work_item.run() |
---|
67 | n/a | # Delete references to object. See issue16284 |
---|
68 | n/a | del work_item |
---|
69 | n/a | continue |
---|
70 | n/a | executor = executor_reference() |
---|
71 | n/a | # Exit if: |
---|
72 | n/a | # - The interpreter is shutting down OR |
---|
73 | n/a | # - The executor that owns the worker has been collected OR |
---|
74 | n/a | # - The executor that owns the worker has been shutdown. |
---|
75 | n/a | if _shutdown or executor is None or executor._shutdown: |
---|
76 | n/a | # Notice other workers |
---|
77 | n/a | work_queue.put(None) |
---|
78 | n/a | return |
---|
79 | n/a | del executor |
---|
80 | n/a | except BaseException: |
---|
81 | n/a | _base.LOGGER.critical('Exception in worker', exc_info=True) |
---|
82 | n/a | |
---|
83 | n/a | class ThreadPoolExecutor(_base.Executor): |
---|
84 | n/a | def __init__(self, max_workers=None, thread_name_prefix=''): |
---|
85 | n/a | """Initializes a new ThreadPoolExecutor instance. |
---|
86 | n/a | |
---|
87 | n/a | Args: |
---|
88 | n/a | max_workers: The maximum number of threads that can be used to |
---|
89 | n/a | execute the given calls. |
---|
90 | n/a | thread_name_prefix: An optional name prefix to give our threads. |
---|
91 | n/a | """ |
---|
92 | n/a | if max_workers is None: |
---|
93 | n/a | # Use this number because ThreadPoolExecutor is often |
---|
94 | n/a | # used to overlap I/O instead of CPU work. |
---|
95 | n/a | max_workers = (os.cpu_count() or 1) * 5 |
---|
96 | n/a | if max_workers <= 0: |
---|
97 | n/a | raise ValueError("max_workers must be greater than 0") |
---|
98 | n/a | |
---|
99 | n/a | self._max_workers = max_workers |
---|
100 | n/a | self._work_queue = queue.Queue() |
---|
101 | n/a | self._threads = set() |
---|
102 | n/a | self._shutdown = False |
---|
103 | n/a | self._shutdown_lock = threading.Lock() |
---|
104 | n/a | self._thread_name_prefix = thread_name_prefix |
---|
105 | n/a | |
---|
106 | n/a | def submit(self, fn, *args, **kwargs): |
---|
107 | n/a | with self._shutdown_lock: |
---|
108 | n/a | if self._shutdown: |
---|
109 | n/a | raise RuntimeError('cannot schedule new futures after shutdown') |
---|
110 | n/a | |
---|
111 | n/a | f = _base.Future() |
---|
112 | n/a | w = _WorkItem(f, fn, args, kwargs) |
---|
113 | n/a | |
---|
114 | n/a | self._work_queue.put(w) |
---|
115 | n/a | self._adjust_thread_count() |
---|
116 | n/a | return f |
---|
117 | n/a | submit.__doc__ = _base.Executor.submit.__doc__ |
---|
118 | n/a | |
---|
119 | n/a | def _adjust_thread_count(self): |
---|
120 | n/a | # When the executor gets lost, the weakref callback will wake up |
---|
121 | n/a | # the worker threads. |
---|
122 | n/a | def weakref_cb(_, q=self._work_queue): |
---|
123 | n/a | q.put(None) |
---|
124 | n/a | # TODO(bquinlan): Should avoid creating new threads if there are more |
---|
125 | n/a | # idle threads than items in the work queue. |
---|
126 | n/a | num_threads = len(self._threads) |
---|
127 | n/a | if num_threads < self._max_workers: |
---|
128 | n/a | thread_name = '%s_%d' % (self._thread_name_prefix or self, |
---|
129 | n/a | num_threads) |
---|
130 | n/a | t = threading.Thread(name=thread_name, target=_worker, |
---|
131 | n/a | args=(weakref.ref(self, weakref_cb), |
---|
132 | n/a | self._work_queue)) |
---|
133 | n/a | t.daemon = True |
---|
134 | n/a | t.start() |
---|
135 | n/a | self._threads.add(t) |
---|
136 | n/a | _threads_queues[t] = self._work_queue |
---|
137 | n/a | |
---|
138 | n/a | def shutdown(self, wait=True): |
---|
139 | n/a | with self._shutdown_lock: |
---|
140 | n/a | self._shutdown = True |
---|
141 | n/a | self._work_queue.put(None) |
---|
142 | n/a | if wait: |
---|
143 | n/a | for t in self._threads: |
---|
144 | n/a | t.join() |
---|
145 | n/a | shutdown.__doc__ = _base.Executor.shutdown.__doc__ |
---|